xref: /linux/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c (revision 76d9b92e68f2bb55890f935c5143f4fef97a935d)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "umc_v12_0.h"
24 #include "amdgpu_ras.h"
25 #include "amdgpu_umc.h"
26 #include "amdgpu.h"
27 #include "umc/umc_12_0_0_offset.h"
28 #include "umc/umc_12_0_0_sh_mask.h"
29 #include "mp/mp_13_0_6_sh_mask.h"
30 
31 #define MAX_ECC_NUM_PER_RETIREMENT  32
32 #define DELAYED_TIME_FOR_GPU_RESET  1000  //ms
33 
34 static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev,
35 					    uint32_t node_inst,
36 					    uint32_t umc_inst,
37 					    uint32_t ch_inst)
38 {
39 	uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst;
40 	uint64_t cross_node_offset = (node_inst == 0) ? 0 : UMC_V12_0_CROSS_NODE_OFFSET;
41 
42 	umc_inst = index / 4;
43 	ch_inst = index % 4;
44 
45 	return adev->umc.channel_offs * ch_inst + UMC_V12_0_INST_DIST * umc_inst +
46 		UMC_V12_0_NODE_DIST * node_inst + cross_node_offset;
47 }
48 
49 static int umc_v12_0_reset_error_count_per_channel(struct amdgpu_device *adev,
50 					uint32_t node_inst, uint32_t umc_inst,
51 					uint32_t ch_inst, void *data)
52 {
53 	uint64_t odecc_err_cnt_addr;
54 	uint64_t umc_reg_offset =
55 		get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
56 
57 	odecc_err_cnt_addr =
58 		SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt);
59 
60 	/* clear error count */
61 	WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4,
62 			UMC_V12_0_CE_CNT_INIT);
63 
64 	return 0;
65 }
66 
67 static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
68 {
69 	amdgpu_umc_loop_channels(adev,
70 		umc_v12_0_reset_error_count_per_channel, NULL);
71 }
72 
73 bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
74 {
75 	dev_dbg(adev->dev,
76 		"MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n",
77 		mc_umc_status,
78 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val),
79 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison),
80 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred),
81 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC),
82 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC),
83 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC)
84 	);
85 
86 	return (amdgpu_ras_is_poison_mode_supported(adev) &&
87 		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
88 		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1));
89 }
90 
91 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
92 {
93 	if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
94 		return false;
95 
96 	return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
97 		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
98 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
99 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
100 }
101 
102 bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
103 {
104 	if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
105 		return false;
106 
107 	return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
108 		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
109 		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
110 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) ||
111 		/* Identify data parity error in replay mode */
112 		((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 ||
113 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) &&
114 		!(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
115 }
116 
117 static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev,
118 						   uint64_t umc_reg_offset,
119 						   unsigned long *error_count,
120 						   check_error_type_func error_type_func)
121 {
122 	uint64_t mc_umc_status;
123 	uint64_t mc_umc_status_addr;
124 
125 	mc_umc_status_addr =
126 		SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
127 
128 	/* Check MCUMC_STATUS */
129 	mc_umc_status =
130 		RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
131 
132 	if (error_type_func(adev, mc_umc_status))
133 		*error_count += 1;
134 }
135 
136 static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
137 					uint32_t node_inst, uint32_t umc_inst,
138 					uint32_t ch_inst, void *data)
139 {
140 	struct ras_err_data *err_data = (struct ras_err_data *)data;
141 	unsigned long ue_count = 0, ce_count = 0, de_count = 0;
142 
143 	/* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3],
144 	 * which can be used as die ID directly */
145 	struct amdgpu_smuio_mcm_config_info mcm_info = {
146 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
147 		.die_id = node_inst,
148 	};
149 
150 	uint64_t umc_reg_offset =
151 		get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
152 
153 	umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
154 					    &ce_count, umc_v12_0_is_correctable_error);
155 	umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
156 					    &ue_count, umc_v12_0_is_uncorrectable_error);
157 	umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
158 					    &de_count, umc_v12_0_is_deferred_error);
159 
160 	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
161 	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
162 	amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count);
163 
164 	return 0;
165 }
166 
167 static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
168 					   void *ras_error_status)
169 {
170 	amdgpu_umc_loop_channels(adev,
171 		umc_v12_0_query_error_count, ras_error_status);
172 
173 	umc_v12_0_reset_error_count(adev);
174 }
175 
176 static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
177 					struct ras_err_data *err_data,
178 					struct ta_ras_query_address_input *addr_in)
179 {
180 	uint32_t col, row, row_xor, bank, channel_index;
181 	uint64_t soc_pa, retired_page, column, err_addr;
182 	struct ta_ras_query_address_output addr_out;
183 
184 	err_addr = addr_in->ma.err_addr;
185 	addr_in->addr_type = TA_RAS_MCA_TO_PA;
186 	if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
187 		dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
188 			err_addr);
189 
190 		return;
191 	}
192 
193 	soc_pa = addr_out.pa.pa;
194 	bank = addr_out.pa.bank;
195 	channel_index = addr_out.pa.channel_idx;
196 
197 	col = (err_addr >> 1) & 0x1fULL;
198 	row = (err_addr >> 10) & 0x3fffULL;
199 	row_xor = row ^ (0x1ULL << 13);
200 	/* clear [C3 C2] in soc physical address */
201 	soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
202 	/* clear [C4] in soc physical address */
203 	soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
204 
205 	/* loop for all possibilities of [C4 C3 C2] */
206 	for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
207 		retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
208 		retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
209 		/* include column bit 0 and 1 */
210 		col &= 0x3;
211 		col |= (column << 2);
212 		dev_info(adev->dev,
213 			"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
214 			retired_page, row, col, bank, channel_index);
215 		amdgpu_umc_fill_error_record(err_data, err_addr,
216 			retired_page, channel_index, addr_in->ma.umc_inst);
217 
218 		/* shift R13 bit */
219 		retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
220 		dev_info(adev->dev,
221 			"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
222 			retired_page, row_xor, col, bank, channel_index);
223 		amdgpu_umc_fill_error_record(err_data, err_addr,
224 			retired_page, channel_index, addr_in->ma.umc_inst);
225 	}
226 }
227 
228 static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
229 				struct ta_ras_query_address_input *addr_in,
230 				uint64_t *pfns, int len)
231 {
232 	uint32_t col, row, row_xor, bank, channel_index;
233 	uint64_t soc_pa, retired_page, column, err_addr;
234 	struct ta_ras_query_address_output addr_out;
235 	uint32_t pos = 0;
236 
237 	err_addr = addr_in->ma.err_addr;
238 	addr_in->addr_type = TA_RAS_MCA_TO_PA;
239 	if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
240 		dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
241 			err_addr);
242 		return 0;
243 	}
244 
245 	soc_pa = addr_out.pa.pa;
246 	bank = addr_out.pa.bank;
247 	channel_index = addr_out.pa.channel_idx;
248 
249 	col = (err_addr >> 1) & 0x1fULL;
250 	row = (err_addr >> 10) & 0x3fffULL;
251 	row_xor = row ^ (0x1ULL << 13);
252 	/* clear [C3 C2] in soc physical address */
253 	soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
254 	/* clear [C4] in soc physical address */
255 	soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
256 
257 	/* loop for all possibilities of [C4 C3 C2] */
258 	for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
259 		retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
260 		retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
261 
262 		if (pos >= len)
263 			return 0;
264 		pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
265 
266 		/* include column bit 0 and 1 */
267 		col &= 0x3;
268 		col |= (column << 2);
269 		dev_info(adev->dev,
270 			"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
271 			retired_page, row, col, bank, channel_index);
272 
273 		/* shift R13 bit */
274 		retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
275 
276 		if (pos >= len)
277 			return 0;
278 		pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
279 
280 		dev_info(adev->dev,
281 			"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
282 			retired_page, row_xor, col, bank, channel_index);
283 	}
284 
285 	return pos;
286 }
287 
288 static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
289 					uint32_t node_inst, uint32_t umc_inst,
290 					uint32_t ch_inst, void *data)
291 {
292 	struct ras_err_data *err_data = (struct ras_err_data *)data;
293 	struct ta_ras_query_address_input addr_in;
294 	uint64_t mc_umc_status_addr;
295 	uint64_t mc_umc_status, err_addr;
296 	uint64_t mc_umc_addrt0;
297 	uint64_t umc_reg_offset =
298 		get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
299 
300 	mc_umc_status_addr =
301 		SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
302 
303 	mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
304 
305 	if (mc_umc_status == 0)
306 		return 0;
307 
308 	if (!err_data->err_addr) {
309 		/* clear umc status */
310 		WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
311 
312 		return 0;
313 	}
314 
315 	/* calculate error address if ue error is detected */
316 	if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) ||
317 	    umc_v12_0_is_deferred_error(adev, mc_umc_status)) {
318 		mc_umc_addrt0 =
319 			SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
320 
321 		err_addr = RREG64_PCIE_EXT((mc_umc_addrt0 + umc_reg_offset) * 4);
322 
323 		err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
324 
325 		if (!adev->aid_mask &&
326 		    adev->smuio.funcs &&
327 		    adev->smuio.funcs->get_socket_id)
328 			addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev);
329 		else
330 			addr_in.ma.socket_id = 0;
331 
332 		addr_in.ma.err_addr = err_addr;
333 		addr_in.ma.ch_inst = ch_inst;
334 		addr_in.ma.umc_inst = umc_inst;
335 		addr_in.ma.node_inst = node_inst;
336 
337 		umc_v12_0_convert_error_address(adev, err_data, &addr_in);
338 	}
339 
340 	/* clear umc status */
341 	WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
342 
343 	return 0;
344 }
345 
346 static void umc_v12_0_query_ras_error_address(struct amdgpu_device *adev,
347 					     void *ras_error_status)
348 {
349 	amdgpu_umc_loop_channels(adev,
350 		umc_v12_0_query_error_address, ras_error_status);
351 }
352 
353 static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
354 					uint32_t node_inst, uint32_t umc_inst,
355 					uint32_t ch_inst, void *data)
356 {
357 	uint32_t odecc_cnt_sel;
358 	uint64_t odecc_cnt_sel_addr, odecc_err_cnt_addr;
359 	uint64_t umc_reg_offset =
360 		get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
361 
362 	odecc_cnt_sel_addr =
363 		SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccCntSel);
364 	odecc_err_cnt_addr =
365 		SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt);
366 
367 	odecc_cnt_sel = RREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4);
368 
369 	/* set ce error interrupt type to APIC based interrupt */
370 	odecc_cnt_sel = REG_SET_FIELD(odecc_cnt_sel, UMCCH0_OdEccCntSel,
371 					OdEccErrInt, 0x1);
372 	WREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4, odecc_cnt_sel);
373 
374 	/* set error count to initial value */
375 	WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V12_0_CE_CNT_INIT);
376 
377 	return 0;
378 }
379 
380 static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev,
381 			enum amdgpu_mca_error_type type, void *ras_error_status)
382 {
383 	uint64_t mc_umc_status = *(uint64_t *)ras_error_status;
384 
385 	switch (type) {
386 	case AMDGPU_MCA_ERROR_TYPE_UE:
387 		return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status);
388 	case AMDGPU_MCA_ERROR_TYPE_CE:
389 		return umc_v12_0_is_correctable_error(adev, mc_umc_status);
390 	case AMDGPU_MCA_ERROR_TYPE_DE:
391 		return umc_v12_0_is_deferred_error(adev, mc_umc_status);
392 	default:
393 		return false;
394 	}
395 
396 	return false;
397 }
398 
399 static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
400 {
401 	amdgpu_umc_loop_channels(adev,
402 		umc_v12_0_err_cnt_init_per_channel, NULL);
403 }
404 
405 static bool umc_v12_0_query_ras_poison_mode(struct amdgpu_device *adev)
406 {
407 	/*
408 	 * Force return true, because regUMCCH0_EccCtrl
409 	 * is not accessible from host side
410 	 */
411 	return true;
412 }
413 
414 const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = {
415 	.query_ras_error_count = umc_v12_0_query_ras_error_count,
416 	.query_ras_error_address = umc_v12_0_query_ras_error_address,
417 };
418 
419 static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
420 				     enum aca_smu_type type, void *data)
421 {
422 	struct amdgpu_device *adev = handle->adev;
423 	struct aca_bank_info info;
424 	enum aca_error_type err_type;
425 	u64 status, count;
426 	u32 ext_error_code;
427 	int ret;
428 
429 	status = bank->regs[ACA_REG_IDX_STATUS];
430 	if (umc_v12_0_is_deferred_error(adev, status))
431 		err_type = ACA_ERROR_TYPE_DEFERRED;
432 	else if (umc_v12_0_is_uncorrectable_error(adev, status))
433 		err_type = ACA_ERROR_TYPE_UE;
434 	else if (umc_v12_0_is_correctable_error(adev, status))
435 		err_type = ACA_ERROR_TYPE_CE;
436 	else
437 		return 0;
438 
439 	ret = aca_bank_info_decode(bank, &info);
440 	if (ret)
441 		return ret;
442 
443 	amdgpu_umc_update_ecc_status(adev,
444 		bank->regs[ACA_REG_IDX_STATUS],
445 		bank->regs[ACA_REG_IDX_IPID],
446 		bank->regs[ACA_REG_IDX_ADDR]);
447 
448 	ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
449 	count = ext_error_code == 0 ?
450 		ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
451 
452 	return aca_error_cache_log_bank_error(handle, &info, err_type, count);
453 }
454 
455 static const struct aca_bank_ops umc_v12_0_aca_bank_ops = {
456 	.aca_bank_parser = umc_v12_0_aca_bank_parser,
457 };
458 
459 const struct aca_info umc_v12_0_aca_info = {
460 	.hwip = ACA_HWIP_TYPE_UMC,
461 	.mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK,
462 	.bank_ops = &umc_v12_0_aca_bank_ops,
463 };
464 
465 static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
466 {
467 	int ret;
468 
469 	ret = amdgpu_umc_ras_late_init(adev, ras_block);
470 	if (ret)
471 		return ret;
472 
473 	ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC,
474 				  &umc_v12_0_aca_info, NULL);
475 	if (ret)
476 		return ret;
477 
478 	return 0;
479 }
480 
481 static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
482 			uint64_t status, uint64_t ipid, uint64_t addr)
483 {
484 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
485 	uint16_t hwid, mcatype;
486 	struct ta_ras_query_address_input addr_in;
487 	uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
488 	uint64_t err_addr, hash_val = 0;
489 	struct ras_ecc_err *ecc_err;
490 	int count;
491 	int ret;
492 
493 	hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
494 	mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
495 
496 	if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
497 		return 0;
498 
499 	if (!status)
500 		return 0;
501 
502 	if (!umc_v12_0_is_deferred_error(adev, status))
503 		return 0;
504 
505 	err_addr = REG_GET_FIELD(addr,
506 				MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
507 
508 	dev_dbg(adev->dev,
509 		"UMC:IPID:0x%llx, socket:%llu, aid:%llu, inst:%llu, ch:%llu, err_addr:0x%llx\n",
510 		ipid,
511 		MCA_IPID_2_SOCKET_ID(ipid),
512 		MCA_IPID_2_DIE_ID(ipid),
513 		MCA_IPID_2_UMC_INST(ipid),
514 		MCA_IPID_2_UMC_CH(ipid),
515 		err_addr);
516 
517 	memset(page_pfn, 0, sizeof(page_pfn));
518 
519 	memset(&addr_in, 0, sizeof(addr_in));
520 	addr_in.ma.err_addr = err_addr;
521 	addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid);
522 	addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid);
523 	addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid);
524 	addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid);
525 
526 	count = umc_v12_0_convert_err_addr(adev,
527 				&addr_in, page_pfn, ARRAY_SIZE(page_pfn));
528 	if (count <= 0) {
529 		dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
530 		return 0;
531 	}
532 
533 	ret = amdgpu_umc_build_pages_hash(adev,
534 			page_pfn, count, &hash_val);
535 	if (ret) {
536 		dev_err(adev->dev, "Fail to build error pages hash\n");
537 		return ret;
538 	}
539 
540 	ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL);
541 	if (!ecc_err)
542 		return -ENOMEM;
543 
544 	ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL);
545 	if (!ecc_err->err_pages.pfn) {
546 		kfree(ecc_err);
547 		return -ENOMEM;
548 	}
549 
550 	memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn));
551 	ecc_err->err_pages.count = count;
552 
553 	ecc_err->hash_index = hash_val;
554 	ecc_err->status = status;
555 	ecc_err->ipid = ipid;
556 	ecc_err->addr = addr;
557 
558 	ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
559 	if (ret) {
560 		if (ret == -EEXIST)
561 			con->umc_ecc_log.de_queried_count++;
562 		else
563 			dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
564 
565 		kfree(ecc_err->err_pages.pfn);
566 		kfree(ecc_err);
567 		return ret;
568 	}
569 
570 	con->umc_ecc_log.de_queried_count++;
571 
572 	/* The problem case is as follows:
573 	 * 1. GPU A triggers a gpu ras reset, and GPU A drives
574 	 *    GPU B to also perform a gpu ras reset.
575 	 * 2. After gpu B ras reset started, gpu B queried a DE
576 	 *    data. Since the DE data was queried in the ras reset
577 	 *    thread instead of the page retirement thread, bad
578 	 *    page retirement work would not be triggered. Then
579 	 *    even if all gpu resets are completed, the bad pages
580 	 *    will be cached in RAM until GPU B's bad page retirement
581 	 *    work is triggered again and then saved to eeprom.
582 	 * Trigger delayed work to save the bad pages to eeprom in time
583 	 * after gpu ras reset is completed.
584 	 */
585 	if (amdgpu_ras_in_recovery(adev))
586 		schedule_delayed_work(&con->page_retirement_dwork,
587 			msecs_to_jiffies(DELAYED_TIME_FOR_GPU_RESET));
588 
589 	return 0;
590 }
591 
592 static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
593 				struct ras_ecc_err *ecc_err, void *ras_error_status)
594 {
595 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
596 	uint32_t i = 0;
597 	int ret = 0;
598 
599 	if (!err_data || !ecc_err)
600 		return -EINVAL;
601 
602 	for (i = 0; i < ecc_err->err_pages.count; i++) {
603 		ret = amdgpu_umc_fill_error_record(err_data,
604 				ecc_err->addr,
605 				ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
606 				MCA_IPID_2_UMC_CH(ecc_err->ipid),
607 				MCA_IPID_2_UMC_INST(ecc_err->ipid));
608 		if (ret)
609 			break;
610 	}
611 
612 	err_data->de_count++;
613 
614 	return ret;
615 }
616 
617 static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
618 					void *ras_error_status)
619 {
620 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
621 	struct ras_ecc_err *entries[MAX_ECC_NUM_PER_RETIREMENT];
622 	struct radix_tree_root *ecc_tree;
623 	int new_detected, ret, i;
624 
625 	ecc_tree = &con->umc_ecc_log.de_page_tree;
626 
627 	mutex_lock(&con->umc_ecc_log.lock);
628 	new_detected = radix_tree_gang_lookup_tag(ecc_tree, (void **)entries,
629 			0, ARRAY_SIZE(entries), UMC_ECC_NEW_DETECTED_TAG);
630 	for (i = 0; i < new_detected; i++) {
631 		if (!entries[i])
632 			continue;
633 
634 		ret = umc_v12_0_fill_error_record(adev, entries[i], ras_error_status);
635 		if (ret) {
636 			dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret);
637 			break;
638 		}
639 		radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG);
640 	}
641 	mutex_unlock(&con->umc_ecc_log.lock);
642 }
643 
644 struct amdgpu_umc_ras umc_v12_0_ras = {
645 	.ras_block = {
646 		.hw_ops = &umc_v12_0_ras_hw_ops,
647 		.ras_late_init = umc_v12_0_ras_late_init,
648 	},
649 	.err_cnt_init = umc_v12_0_err_cnt_init,
650 	.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
651 	.ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr,
652 	.check_ecc_err_status = umc_v12_0_check_ecc_err_status,
653 	.update_ecc_status = umc_v12_0_update_ecc_status,
654 };
655 
656