1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "amdgpu_ras.h"
25 #include "mmhub_v1_0.h"
26
27 #include "mmhub/mmhub_1_0_offset.h"
28 #include "mmhub/mmhub_1_0_sh_mask.h"
29 #include "mmhub/mmhub_1_0_default.h"
30 #include "vega10_enum.h"
31 #include "soc15.h"
32 #include "soc15_common.h"
33
34 #define mmDAGB0_CNTL_MISC2_RV 0x008f
35 #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
36
mmhub_v1_0_get_fb_location(struct amdgpu_device * adev)37 static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
38 {
39 u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
40 u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
41
42 base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
43 base <<= 24;
44
45 top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
46 top <<= 24;
47
48 adev->gmc.fb_start = base;
49 adev->gmc.fb_end = top;
50
51 return base;
52 }
53
mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device * adev,uint32_t vmid,uint64_t page_table_base)54 static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
55 uint64_t page_table_base)
56 {
57 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
58
59 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
60 hub->ctx_addr_distance * vmid,
61 lower_32_bits(page_table_base));
62
63 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
64 hub->ctx_addr_distance * vmid,
65 upper_32_bits(page_table_base));
66 }
67
mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device * adev)68 static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
69 {
70 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
71
72 mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
73
74 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
75 (u32)(adev->gmc.gart_start >> 12));
76 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
77 (u32)(adev->gmc.gart_start >> 44));
78
79 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
80 (u32)(adev->gmc.gart_end >> 12));
81 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
82 (u32)(adev->gmc.gart_end >> 44));
83 }
84
mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device * adev)85 static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
86 {
87 uint64_t value;
88 uint32_t tmp;
89
90 /* Program the AGP BAR */
91 WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
92 WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
93 WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
94
95 /* Program the system aperture low logical page number. */
96 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
97 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
98
99 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
100 AMD_APU_IS_RENOIR |
101 AMD_APU_IS_GREEN_SARDINE))
102 /*
103 * Raven2 has a HW issue that it is unable to use the vram which
104 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
105 * workaround that increase system aperture high address (add 1)
106 * to get rid of the VM fault and hardware hang.
107 */
108 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
109 max((adev->gmc.fb_end >> 18) + 0x1,
110 adev->gmc.agp_end >> 18));
111 else
112 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
113 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
114
115 if (amdgpu_sriov_vf(adev))
116 return;
117
118 /* Set default page address. */
119 value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
120 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
121 (u32)(value >> 12));
122 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
123 (u32)(value >> 44));
124
125 /* Program "protection fault". */
126 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
127 (u32)(adev->dummy_page_addr >> 12));
128 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
129 (u32)((u64)adev->dummy_page_addr >> 44));
130
131 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
132 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
133 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
134 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp);
135 }
136
mmhub_v1_0_init_tlb_regs(struct amdgpu_device * adev)137 static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
138 {
139 uint32_t tmp;
140
141 /* Setup TLB control */
142 tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
143
144 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
145 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
146 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
147 ENABLE_ADVANCED_DRIVER_MODEL, 1);
148 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
149 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
150 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
151 MTYPE, MTYPE_UC);/* XXX for emulation. */
152 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
153
154 WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
155 }
156
mmhub_v1_0_init_cache_regs(struct amdgpu_device * adev)157 static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
158 {
159 uint32_t tmp;
160
161 if (amdgpu_sriov_vf(adev))
162 return;
163
164 /* Setup L2 cache */
165 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
166 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
167 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
168 /* XXX for emulation, Refer to closed source code.*/
169 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
170 0);
171 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
172 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
173 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
174 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
175
176 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2);
177 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
178 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
179 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
180
181 tmp = mmVM_L2_CNTL3_DEFAULT;
182 if (adev->gmc.translate_further) {
183 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
184 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
185 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
186 } else {
187 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
188 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
189 L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
190 }
191 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
192
193 tmp = mmVM_L2_CNTL4_DEFAULT;
194 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
195 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
196 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
197 }
198
mmhub_v1_0_enable_system_domain(struct amdgpu_device * adev)199 static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
200 {
201 uint32_t tmp;
202
203 tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
204 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
205 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
206 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
207 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
208 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
209 }
210
mmhub_v1_0_disable_identity_aperture(struct amdgpu_device * adev)211 static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
212 {
213 if (amdgpu_sriov_vf(adev))
214 return;
215
216 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
217 0XFFFFFFFF);
218 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
219 0x0000000F);
220
221 WREG32_SOC15(MMHUB, 0,
222 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
223 WREG32_SOC15(MMHUB, 0,
224 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
225
226 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
227 0);
228 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
229 0);
230 }
231
mmhub_v1_0_init_saw(struct amdgpu_device * adev)232 static void mmhub_v1_0_init_saw(struct amdgpu_device *adev)
233 {
234 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
235 uint32_t tmp;
236
237 /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 */
238 WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
239 lower_32_bits(pt_base >> 12));
240
241 /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 */
242 WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
243 upper_32_bits(pt_base >> 12));
244
245 /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 */
246 WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
247 (u32)(adev->gmc.gart_start >> 12));
248
249 /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 */
250 WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
251 (u32)(adev->gmc.gart_start >> 44));
252
253 /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 */
254 WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
255 (u32)(adev->gmc.gart_end >> 12));
256
257 /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 */
258 WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
259 (u32)(adev->gmc.gart_end >> 44));
260
261 /* Program SAW CONTEXT0 CNTL */
262 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL);
263 tmp |= 1 << CONTEXT0_CNTL_ENABLE_OFFSET;
264 tmp &= ~(3 << CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET);
265 WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL, tmp);
266
267 /* Disable all Contexts except Context0 */
268 tmp = 0xfffe;
269 WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXTS_DISABLE, tmp);
270
271 /* Program SAW CNTL4 */
272 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4);
273 tmp |= 1 << VMC_TAP_PDE_REQUEST_SNOOP_OFFSET;
274 tmp |= 1 << VMC_TAP_PTE_REQUEST_SNOOP_OFFSET;
275 WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4, tmp);
276 }
277
mmhub_v1_0_setup_vmid_config(struct amdgpu_device * adev)278 static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
279 {
280 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
281 unsigned num_level, block_size;
282 uint32_t tmp;
283 int i;
284
285 num_level = adev->vm_manager.num_level;
286 block_size = adev->vm_manager.block_size;
287 if (adev->gmc.translate_further)
288 num_level -= 1;
289 else
290 block_size -= 9;
291
292 for (i = 0; i <= 14; i++) {
293 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i * hub->ctx_distance);
294 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
295 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
296 num_level);
297 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
298 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
299 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
300 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
301 1);
302 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
303 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
304 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
305 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
306 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
307 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
308 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
309 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
310 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
311 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
312 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
313 PAGE_TABLE_BLOCK_SIZE,
314 block_size);
315 /* Send no-retry XNACK on fault to suppress VM fault storm. */
316 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
317 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
318 !adev->gmc.noretry);
319 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL,
320 i * hub->ctx_distance, tmp);
321 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
322 i * hub->ctx_addr_distance, 0);
323 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
324 i * hub->ctx_addr_distance, 0);
325 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
326 i * hub->ctx_addr_distance,
327 lower_32_bits(adev->vm_manager.max_pfn - 1));
328 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
329 i * hub->ctx_addr_distance,
330 upper_32_bits(adev->vm_manager.max_pfn - 1));
331 }
332
333 if (amdgpu_ip_version(adev, ISP_HWIP, 0))
334 mmhub_v1_0_init_saw(adev);
335 }
336
mmhub_v1_0_program_invalidation(struct amdgpu_device * adev)337 static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
338 {
339 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
340 unsigned i;
341
342 for (i = 0; i < 18; ++i) {
343 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
344 i * hub->eng_addr_distance, 0xffffffff);
345 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
346 i * hub->eng_addr_distance, 0x1f);
347 }
348 }
349
mmhub_v1_0_update_power_gating(struct amdgpu_device * adev,bool enable)350 static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
351 bool enable)
352 {
353 if (amdgpu_sriov_vf(adev))
354 return;
355
356 if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
357 amdgpu_dpm_set_powergating_by_smu(adev,
358 AMD_IP_BLOCK_TYPE_GMC,
359 enable, 0);
360 }
361
mmhub_v1_0_gart_enable(struct amdgpu_device * adev)362 static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
363 {
364 if (amdgpu_sriov_vf(adev)) {
365 /*
366 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
367 * VF copy registers so vbios post doesn't program them, for
368 * SRIOV driver need to program them
369 */
370 WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
371 adev->gmc.vram_start >> 24);
372 WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
373 adev->gmc.vram_end >> 24);
374 }
375
376 /* GART Enable. */
377 mmhub_v1_0_init_gart_aperture_regs(adev);
378 mmhub_v1_0_init_system_aperture_regs(adev);
379 mmhub_v1_0_init_tlb_regs(adev);
380 mmhub_v1_0_init_cache_regs(adev);
381
382 mmhub_v1_0_enable_system_domain(adev);
383 mmhub_v1_0_disable_identity_aperture(adev);
384 mmhub_v1_0_setup_vmid_config(adev);
385 mmhub_v1_0_program_invalidation(adev);
386
387 return 0;
388 }
389
mmhub_v1_0_gart_disable(struct amdgpu_device * adev)390 static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
391 {
392 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
393 u32 tmp;
394 u32 i;
395
396 /* Disable all tables */
397 for (i = 0; i < AMDGPU_NUM_VMID; i++)
398 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL,
399 i * hub->ctx_distance, 0);
400
401 /* Setup TLB control */
402 tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
403 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
404 tmp = REG_SET_FIELD(tmp,
405 MC_VM_MX_L1_TLB_CNTL,
406 ENABLE_ADVANCED_DRIVER_MODEL,
407 0);
408 WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
409
410 if (!amdgpu_sriov_vf(adev)) {
411 /* Setup L2 cache */
412 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
413 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
414 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
415 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
416 }
417 }
418
419 /**
420 * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
421 *
422 * @adev: amdgpu_device pointer
423 * @value: true redirects VM faults to the default page
424 */
mmhub_v1_0_set_fault_enable_default(struct amdgpu_device * adev,bool value)425 static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
426 {
427 u32 tmp;
428
429 if (amdgpu_sriov_vf(adev))
430 return;
431
432 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
433 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
434 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
435 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
436 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
437 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
438 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
439 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
440 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
441 tmp = REG_SET_FIELD(tmp,
442 VM_L2_PROTECTION_FAULT_CNTL,
443 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
444 value);
445 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
446 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
447 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
448 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
449 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
450 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
451 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
452 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
453 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
454 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
455 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
456 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
457 if (!value) {
458 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
459 CRASH_ON_NO_RETRY_FAULT, 1);
460 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
461 CRASH_ON_RETRY_FAULT, 1);
462 }
463
464 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
465 }
466
mmhub_v1_0_init(struct amdgpu_device * adev)467 static void mmhub_v1_0_init(struct amdgpu_device *adev)
468 {
469 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
470
471 hub->ctx0_ptb_addr_lo32 =
472 SOC15_REG_OFFSET(MMHUB, 0,
473 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
474 hub->ctx0_ptb_addr_hi32 =
475 SOC15_REG_OFFSET(MMHUB, 0,
476 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
477 hub->vm_inv_eng0_sem =
478 SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
479 hub->vm_inv_eng0_req =
480 SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
481 hub->vm_inv_eng0_ack =
482 SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
483 hub->vm_context0_cntl =
484 SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
485 hub->vm_l2_pro_fault_status =
486 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
487 hub->vm_l2_pro_fault_cntl =
488 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
489
490 hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL;
491 hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
492 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
493 hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ;
494 hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
495 mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
496 }
497
mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)498 static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
499 bool enable)
500 {
501 uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
502
503 def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
504
505 if (adev->asic_type != CHIP_RAVEN) {
506 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
507 def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
508 } else
509 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV);
510
511 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
512 data |= ATC_L2_MISC_CG__ENABLE_MASK;
513
514 data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
515 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
516 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
517 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
518 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
519 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
520
521 if (adev->asic_type != CHIP_RAVEN)
522 data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
523 DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
524 DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
525 DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
526 DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
527 DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
528 } else {
529 data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
530
531 data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
532 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
533 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
534 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
535 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
536 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
537
538 if (adev->asic_type != CHIP_RAVEN)
539 data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
540 DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
541 DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
542 DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
543 DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
544 DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
545 }
546
547 if (def != data)
548 WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
549
550 if (def1 != data1) {
551 if (adev->asic_type != CHIP_RAVEN)
552 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
553 else
554 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
555 }
556
557 if (adev->asic_type != CHIP_RAVEN && def2 != data2)
558 WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
559 }
560
mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device * adev,bool enable)561 static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
562 bool enable)
563 {
564 uint32_t def, data;
565
566 def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
567
568 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
569 data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
570 else
571 data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
572
573 if (def != data)
574 WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
575 }
576
mmhub_v1_0_set_clockgating(struct amdgpu_device * adev,enum amd_clockgating_state state)577 static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
578 enum amd_clockgating_state state)
579 {
580 if (amdgpu_sriov_vf(adev))
581 return 0;
582
583 switch (adev->asic_type) {
584 case CHIP_VEGA10:
585 case CHIP_VEGA12:
586 case CHIP_VEGA20:
587 case CHIP_RAVEN:
588 case CHIP_RENOIR:
589 mmhub_v1_0_update_medium_grain_clock_gating(adev,
590 state == AMD_CG_STATE_GATE);
591 mmhub_v1_0_update_medium_grain_light_sleep(adev,
592 state == AMD_CG_STATE_GATE);
593 break;
594 default:
595 break;
596 }
597
598 return 0;
599 }
600
mmhub_v1_0_get_clockgating(struct amdgpu_device * adev,u64 * flags)601 static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
602 {
603 int data, data1;
604
605 if (amdgpu_sriov_vf(adev))
606 *flags = 0;
607
608 data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
609
610 data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
611
612 /* AMD_CG_SUPPORT_MC_MGCG */
613 if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
614 !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
615 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
616 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
617 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
618 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
619 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
620 *flags |= AMD_CG_SUPPORT_MC_MGCG;
621
622 /* AMD_CG_SUPPORT_MC_LS */
623 if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
624 *flags |= AMD_CG_SUPPORT_MC_LS;
625 }
626
627 static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = {
628 { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
629 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
630 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
631 },
632 { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
633 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
634 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
635 },
636 { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
637 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
638 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
639 },
640 { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
641 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
642 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
643 },
644 { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
645 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
646 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
647 },
648 { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
649 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
650 0, 0,
651 },
652 { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
653 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
654 0, 0,
655 },
656 { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
657 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
658 0, 0,
659 },
660 { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
661 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
662 0, 0,
663 },
664 { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
665 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
666 0, 0,
667 },
668 { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
669 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
670 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
671 },
672 { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
673 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
674 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
675 },
676 { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
677 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
678 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
679 },
680 { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
681 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
682 0, 0,
683 },
684 { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
685 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
686 0, 0,
687 },
688 { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
689 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
690 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
691 },
692 { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
693 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
694 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
695 },
696 { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
697 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
698 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
699 },
700 { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
701 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
702 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
703 },
704 { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
705 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
706 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
707 },
708 { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
709 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
710 0, 0,
711 },
712 { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
713 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
714 0, 0,
715 },
716 { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
717 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
718 0, 0,
719 },
720 { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
721 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
722 0, 0,
723 },
724 { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
725 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
726 0, 0,
727 },
728 { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
729 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
730 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
731 },
732 { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
733 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
734 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
735 },
736 { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
737 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
738 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
739 },
740 { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
741 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
742 0, 0,
743 },
744 { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
745 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
746 0, 0,
747 }
748 };
749
750 static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
751 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0},
752 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0},
753 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0},
754 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
755 };
756
mmhub_v1_0_get_ras_error_count(struct amdgpu_device * adev,const struct soc15_reg_entry * reg,uint32_t value,uint32_t * sec_count,uint32_t * ded_count)757 static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
758 const struct soc15_reg_entry *reg,
759 uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
760 {
761 uint32_t i;
762 uint32_t sec_cnt, ded_cnt;
763
764 for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) {
765 if (mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset)
766 continue;
767
768 sec_cnt = (value &
769 mmhub_v1_0_ras_fields[i].sec_count_mask) >>
770 mmhub_v1_0_ras_fields[i].sec_count_shift;
771 if (sec_cnt) {
772 dev_info(adev->dev,
773 "MMHUB SubBlock %s, SEC %d\n",
774 mmhub_v1_0_ras_fields[i].name,
775 sec_cnt);
776 *sec_count += sec_cnt;
777 }
778
779 ded_cnt = (value &
780 mmhub_v1_0_ras_fields[i].ded_count_mask) >>
781 mmhub_v1_0_ras_fields[i].ded_count_shift;
782 if (ded_cnt) {
783 dev_info(adev->dev,
784 "MMHUB SubBlock %s, DED %d\n",
785 mmhub_v1_0_ras_fields[i].name,
786 ded_cnt);
787 *ded_count += ded_cnt;
788 }
789 }
790
791 return 0;
792 }
793
mmhub_v1_0_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)794 static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
795 void *ras_error_status)
796 {
797 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
798 uint32_t sec_count = 0, ded_count = 0;
799 uint32_t i;
800 uint32_t reg_value;
801
802 err_data->ue_count = 0;
803 err_data->ce_count = 0;
804
805 for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) {
806 reg_value =
807 RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
808 if (reg_value)
809 mmhub_v1_0_get_ras_error_count(adev,
810 &mmhub_v1_0_edc_cnt_regs[i],
811 reg_value, &sec_count, &ded_count);
812 }
813
814 err_data->ce_count += sec_count;
815 err_data->ue_count += ded_count;
816 }
817
mmhub_v1_0_reset_ras_error_count(struct amdgpu_device * adev)818 static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
819 {
820 uint32_t i;
821
822 /* read back edc counter registers to reset the counters to 0 */
823 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
824 for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
825 RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
826 }
827 }
828
829 struct amdgpu_ras_block_hw_ops mmhub_v1_0_ras_hw_ops = {
830 .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
831 .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
832 };
833
834 struct amdgpu_mmhub_ras mmhub_v1_0_ras = {
835 .ras_block = {
836 .hw_ops = &mmhub_v1_0_ras_hw_ops,
837 },
838 };
839
840 const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
841 .get_fb_location = mmhub_v1_0_get_fb_location,
842 .init = mmhub_v1_0_init,
843 .gart_enable = mmhub_v1_0_gart_enable,
844 .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default,
845 .gart_disable = mmhub_v1_0_gart_disable,
846 .set_clockgating = mmhub_v1_0_set_clockgating,
847 .get_clockgating = mmhub_v1_0_get_clockgating,
848 .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs,
849 .update_power_gating = mmhub_v1_0_update_power_gating,
850 };
851