xref: /linux/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c (revision 32daa5d7899e03433429bedf9e20d7963179703a)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "umc_v6_1.h"
24 #include "amdgpu_ras.h"
25 #include "amdgpu.h"
26 
27 #include "rsmu/rsmu_0_0_2_offset.h"
28 #include "rsmu/rsmu_0_0_2_sh_mask.h"
29 #include "umc/umc_6_1_1_offset.h"
30 #include "umc/umc_6_1_1_sh_mask.h"
31 #include "umc/umc_6_1_2_offset.h"
32 
33 #define UMC_6_INST_DIST			0x40000
34 
35 const uint32_t
36 	umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
37 		{2, 18, 11, 27},	{4, 20, 13, 29},
38 		{1, 17, 8, 24},		{7, 23, 14, 30},
39 		{10, 26, 3, 19},	{12, 28, 5, 21},
40 		{9, 25, 0, 16},		{15, 31, 6, 22}
41 };
42 
43 static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
44 {
45 	uint32_t rsmu_umc_addr, rsmu_umc_val;
46 
47 	rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
48 			mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
49 	rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
50 
51 	rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
52 			RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
53 			RSMU_UMC_INDEX_MODE_EN, 1);
54 
55 	WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
56 }
57 
58 static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
59 {
60 	uint32_t rsmu_umc_addr, rsmu_umc_val;
61 
62 	rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
63 			mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
64 	rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
65 
66 	rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
67 			RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
68 			RSMU_UMC_INDEX_MODE_EN, 0);
69 
70 	WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
71 }
72 
73 static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
74 {
75 	uint32_t rsmu_umc_addr, rsmu_umc_val;
76 
77 	rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
78 			mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
79 	rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
80 
81 	return REG_GET_FIELD(rsmu_umc_val,
82 			RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
83 			RSMU_UMC_INDEX_MODE_EN);
84 }
85 
86 static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
87 					    uint32_t umc_inst,
88 					    uint32_t ch_inst)
89 {
90 	return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
91 }
92 
93 static void umc_v6_1_clear_error_count_per_channel(struct amdgpu_device *adev,
94 					uint32_t umc_reg_offset)
95 {
96 	uint32_t ecc_err_cnt_addr;
97 	uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
98 
99 	if (adev->asic_type == CHIP_ARCTURUS) {
100 		/* UMC 6_1_2 registers */
101 		ecc_err_cnt_sel_addr =
102 			SOC15_REG_OFFSET(UMC, 0,
103 					mmUMCCH0_0_EccErrCntSel_ARCT);
104 		ecc_err_cnt_addr =
105 			SOC15_REG_OFFSET(UMC, 0,
106 					mmUMCCH0_0_EccErrCnt_ARCT);
107 	} else {
108 		/* UMC 6_1_1 registers */
109 		ecc_err_cnt_sel_addr =
110 			SOC15_REG_OFFSET(UMC, 0,
111 					mmUMCCH0_0_EccErrCntSel);
112 		ecc_err_cnt_addr =
113 			SOC15_REG_OFFSET(UMC, 0,
114 					mmUMCCH0_0_EccErrCnt);
115 	}
116 
117 	/* select the lower chip */
118 	ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
119 					umc_reg_offset) * 4);
120 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
121 					UMCCH0_0_EccErrCntSel,
122 					EccErrCntCsSel, 0);
123 	WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
124 			ecc_err_cnt_sel);
125 
126 	/* clear lower chip error count */
127 	WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
128 			UMC_V6_1_CE_CNT_INIT);
129 
130 	/* select the higher chip */
131 	ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
132 					umc_reg_offset) * 4);
133 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
134 					UMCCH0_0_EccErrCntSel,
135 					EccErrCntCsSel, 1);
136 	WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
137 			ecc_err_cnt_sel);
138 
139 	/* clear higher chip error count */
140 	WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
141 			UMC_V6_1_CE_CNT_INIT);
142 }
143 
144 static void umc_v6_1_clear_error_count(struct amdgpu_device *adev)
145 {
146 	uint32_t umc_inst        = 0;
147 	uint32_t ch_inst         = 0;
148 	uint32_t umc_reg_offset  = 0;
149 	uint32_t rsmu_umc_index_state =
150 				umc_v6_1_get_umc_index_mode_state(adev);
151 
152 	if (rsmu_umc_index_state)
153 		umc_v6_1_disable_umc_index_mode(adev);
154 
155 	LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
156 		umc_reg_offset = get_umc_6_reg_offset(adev,
157 						umc_inst,
158 						ch_inst);
159 
160 		umc_v6_1_clear_error_count_per_channel(adev,
161 						umc_reg_offset);
162 	}
163 
164 	if (rsmu_umc_index_state)
165 		umc_v6_1_enable_umc_index_mode(adev);
166 }
167 
168 static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
169 						   uint32_t umc_reg_offset,
170 						   unsigned long *error_count)
171 {
172 	uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
173 	uint32_t ecc_err_cnt, ecc_err_cnt_addr;
174 	uint64_t mc_umc_status;
175 	uint32_t mc_umc_status_addr;
176 
177 	if (adev->asic_type == CHIP_ARCTURUS) {
178 		/* UMC 6_1_2 registers */
179 		ecc_err_cnt_sel_addr =
180 			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
181 		ecc_err_cnt_addr =
182 			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
183 		mc_umc_status_addr =
184 			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
185 	} else {
186 		/* UMC 6_1_1 registers */
187 		ecc_err_cnt_sel_addr =
188 			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
189 		ecc_err_cnt_addr =
190 			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
191 		mc_umc_status_addr =
192 			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
193 	}
194 
195 	/* select the lower chip and check the error count */
196 	ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
197 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
198 					EccErrCntCsSel, 0);
199 	WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
200 
201 	ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
202 	*error_count +=
203 		(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
204 		 UMC_V6_1_CE_CNT_INIT);
205 
206 	/* select the higher chip and check the err counter */
207 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
208 					EccErrCntCsSel, 1);
209 	WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
210 
211 	ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
212 	*error_count +=
213 		(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
214 		 UMC_V6_1_CE_CNT_INIT);
215 
216 	/* check for SRAM correctable error
217 	  MCUMC_STATUS is a 64 bit register */
218 	mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
219 	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
220 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
221 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
222 		*error_count += 1;
223 }
224 
225 static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev,
226 						      uint32_t umc_reg_offset,
227 						      unsigned long *error_count)
228 {
229 	uint64_t mc_umc_status;
230 	uint32_t mc_umc_status_addr;
231 
232 	if (adev->asic_type == CHIP_ARCTURUS) {
233 		/* UMC 6_1_2 registers */
234 		mc_umc_status_addr =
235 			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
236 	} else {
237 		/* UMC 6_1_1 registers */
238 		mc_umc_status_addr =
239 			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
240 	}
241 
242 	/* check the MCUMC_STATUS */
243 	mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
244 	if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
245 	    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
246 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
247 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
248 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
249 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
250 		*error_count += 1;
251 }
252 
253 static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
254 					   void *ras_error_status)
255 {
256 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
257 
258 	uint32_t umc_inst        = 0;
259 	uint32_t ch_inst         = 0;
260 	uint32_t umc_reg_offset  = 0;
261 
262 	uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
263 
264 	if (rsmu_umc_index_state)
265 		umc_v6_1_disable_umc_index_mode(adev);
266 
267 	if ((adev->asic_type == CHIP_ARCTURUS) &&
268 		amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
269 		DRM_WARN("Fail to disable DF-Cstate.\n");
270 
271 	LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
272 		umc_reg_offset = get_umc_6_reg_offset(adev,
273 						      umc_inst,
274 						      ch_inst);
275 
276 		umc_v6_1_query_correctable_error_count(adev,
277 						       umc_reg_offset,
278 						       &(err_data->ce_count));
279 		umc_v6_1_querry_uncorrectable_error_count(adev,
280 							  umc_reg_offset,
281 							  &(err_data->ue_count));
282 	}
283 
284 	if ((adev->asic_type == CHIP_ARCTURUS) &&
285 		amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
286 		DRM_WARN("Fail to enable DF-Cstate\n");
287 
288 	if (rsmu_umc_index_state)
289 		umc_v6_1_enable_umc_index_mode(adev);
290 
291 	umc_v6_1_clear_error_count(adev);
292 }
293 
294 static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
295 					 struct ras_err_data *err_data,
296 					 uint32_t umc_reg_offset,
297 					 uint32_t ch_inst,
298 					 uint32_t umc_inst)
299 {
300 	uint32_t lsb, mc_umc_status_addr;
301 	uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
302 	struct eeprom_table_record *err_rec;
303 	uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
304 
305 	if (adev->asic_type == CHIP_ARCTURUS) {
306 		/* UMC 6_1_2 registers */
307 		mc_umc_status_addr =
308 			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
309 		mc_umc_addrt0 =
310 			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT);
311 	} else {
312 		/* UMC 6_1_1 registers */
313 		mc_umc_status_addr =
314 			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
315 		mc_umc_addrt0 =
316 			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
317 	}
318 
319 	mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
320 
321 	if (mc_umc_status == 0)
322 		return;
323 
324 	if (!err_data->err_addr) {
325 		/* clear umc status */
326 		WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
327 		return;
328 	}
329 
330 	err_rec = &err_data->err_addr[err_data->err_addr_cnt];
331 
332 	/* calculate error address if ue/ce error is detected */
333 	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
334 	    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
335 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
336 
337 		err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
338 		/* the lowest lsb bits should be ignored */
339 		lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB);
340 		err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
341 		err_addr &= ~((0x1ULL << lsb) - 1);
342 
343 		/* translate umc channel address to soc pa, 3 parts are included */
344 		retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
345 				ADDR_OF_256B_BLOCK(channel_index) |
346 				OFFSET_IN_256B_BLOCK(err_addr);
347 
348 		/* we only save ue error information currently, ce is skipped */
349 		if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
350 				== 1) {
351 			err_rec->address = err_addr;
352 			/* page frame address is saved */
353 			err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
354 			err_rec->ts = (uint64_t)ktime_get_real_seconds();
355 			err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
356 			err_rec->cu = 0;
357 			err_rec->mem_channel = channel_index;
358 			err_rec->mcumc_id = umc_inst;
359 
360 			err_data->err_addr_cnt++;
361 		}
362 	}
363 
364 	/* clear umc status */
365 	WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
366 }
367 
368 static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
369 					     void *ras_error_status)
370 {
371 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
372 
373 	uint32_t umc_inst        = 0;
374 	uint32_t ch_inst         = 0;
375 	uint32_t umc_reg_offset  = 0;
376 
377 	uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
378 
379 	if (rsmu_umc_index_state)
380 		umc_v6_1_disable_umc_index_mode(adev);
381 
382 	if ((adev->asic_type == CHIP_ARCTURUS) &&
383 		amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
384 		DRM_WARN("Fail to disable DF-Cstate.\n");
385 
386 	LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
387 		umc_reg_offset = get_umc_6_reg_offset(adev,
388 						      umc_inst,
389 						      ch_inst);
390 
391 		umc_v6_1_query_error_address(adev,
392 					     err_data,
393 					     umc_reg_offset,
394 					     ch_inst,
395 					     umc_inst);
396 	}
397 
398 	if ((adev->asic_type == CHIP_ARCTURUS) &&
399 		amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
400 		DRM_WARN("Fail to enable DF-Cstate\n");
401 
402 	if (rsmu_umc_index_state)
403 		umc_v6_1_enable_umc_index_mode(adev);
404 }
405 
406 static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
407 					      uint32_t umc_reg_offset)
408 {
409 	uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
410 	uint32_t ecc_err_cnt_addr;
411 
412 	if (adev->asic_type == CHIP_ARCTURUS) {
413 		/* UMC 6_1_2 registers */
414 		ecc_err_cnt_sel_addr =
415 			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
416 		ecc_err_cnt_addr =
417 			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
418 	} else {
419 		/* UMC 6_1_1 registers */
420 		ecc_err_cnt_sel_addr =
421 			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
422 		ecc_err_cnt_addr =
423 			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
424 	}
425 
426 	/* select the lower chip and check the error count */
427 	ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
428 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
429 					EccErrCntCsSel, 0);
430 	/* set ce error interrupt type to APIC based interrupt */
431 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
432 					EccErrInt, 0x1);
433 	WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
434 	/* set error count to initial value */
435 	WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
436 
437 	/* select the higher chip and check the err counter */
438 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
439 					EccErrCntCsSel, 1);
440 	WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
441 	WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
442 }
443 
444 static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
445 {
446 	uint32_t umc_inst        = 0;
447 	uint32_t ch_inst         = 0;
448 	uint32_t umc_reg_offset  = 0;
449 
450 	uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
451 
452 	if (rsmu_umc_index_state)
453 		umc_v6_1_disable_umc_index_mode(adev);
454 
455 	LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
456 		umc_reg_offset = get_umc_6_reg_offset(adev,
457 						      umc_inst,
458 						      ch_inst);
459 
460 		umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset);
461 	}
462 
463 	if (rsmu_umc_index_state)
464 		umc_v6_1_enable_umc_index_mode(adev);
465 }
466 
467 const struct amdgpu_umc_funcs umc_v6_1_funcs = {
468 	.err_cnt_init = umc_v6_1_err_cnt_init,
469 	.ras_late_init = amdgpu_umc_ras_late_init,
470 	.query_ras_error_count = umc_v6_1_query_ras_error_count,
471 	.query_ras_error_address = umc_v6_1_query_ras_error_address,
472 };
473