xref: /linux/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c (revision 6b340cccf1340da310ae01d267b0586e08a538c9)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "umc_v12_0.h"
24 #include "amdgpu_ras.h"
25 #include "amdgpu_umc.h"
26 #include "amdgpu.h"
27 #include "umc/umc_12_0_0_offset.h"
28 #include "umc/umc_12_0_0_sh_mask.h"
29 #include "mp/mp_13_0_6_sh_mask.h"
30 
31 #define MAX_ECC_NUM_PER_RETIREMENT  32
32 #define DELAYED_TIME_FOR_GPU_RESET  1000  //ms
33 
34 static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev,
35 					    uint32_t node_inst,
36 					    uint32_t umc_inst,
37 					    uint32_t ch_inst)
38 {
39 	uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst;
40 	uint64_t cross_node_offset = (node_inst == 0) ? 0 : UMC_V12_0_CROSS_NODE_OFFSET;
41 
42 	umc_inst = index / 4;
43 	ch_inst = index % 4;
44 
45 	return adev->umc.channel_offs * ch_inst + UMC_V12_0_INST_DIST * umc_inst +
46 		UMC_V12_0_NODE_DIST * node_inst + cross_node_offset;
47 }
48 
49 static int umc_v12_0_reset_error_count_per_channel(struct amdgpu_device *adev,
50 					uint32_t node_inst, uint32_t umc_inst,
51 					uint32_t ch_inst, void *data)
52 {
53 	uint64_t odecc_err_cnt_addr;
54 	uint64_t umc_reg_offset =
55 		get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
56 
57 	odecc_err_cnt_addr =
58 		SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt);
59 
60 	/* clear error count */
61 	WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4,
62 			UMC_V12_0_CE_CNT_INIT);
63 
64 	return 0;
65 }
66 
67 static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
68 {
69 	amdgpu_umc_loop_channels(adev,
70 		umc_v12_0_reset_error_count_per_channel, NULL);
71 }
72 
73 bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
74 {
75 	dev_dbg(adev->dev,
76 		"MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n",
77 		mc_umc_status,
78 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val),
79 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison),
80 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred),
81 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC),
82 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC),
83 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC)
84 	);
85 
86 	return (amdgpu_ras_is_poison_mode_supported(adev) &&
87 		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
88 		((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1) ||
89 		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison) == 1)));
90 }
91 
92 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
93 {
94 	if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
95 		return false;
96 
97 	return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
98 		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
99 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
100 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
101 }
102 
103 bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
104 {
105 	if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
106 		return false;
107 
108 	return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
109 		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
110 		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
111 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) ||
112 		/* Identify data parity error in replay mode */
113 		((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 ||
114 		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) &&
115 		!(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
116 }
117 
118 static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev,
119 						   uint64_t umc_reg_offset,
120 						   unsigned long *error_count,
121 						   check_error_type_func error_type_func)
122 {
123 	uint64_t mc_umc_status;
124 	uint64_t mc_umc_status_addr;
125 
126 	mc_umc_status_addr =
127 		SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
128 
129 	/* Check MCUMC_STATUS */
130 	mc_umc_status =
131 		RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
132 
133 	if (error_type_func(adev, mc_umc_status))
134 		*error_count += 1;
135 }
136 
137 static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
138 					uint32_t node_inst, uint32_t umc_inst,
139 					uint32_t ch_inst, void *data)
140 {
141 	struct ras_err_data *err_data = (struct ras_err_data *)data;
142 	unsigned long ue_count = 0, ce_count = 0, de_count = 0;
143 
144 	/* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3],
145 	 * which can be used as die ID directly */
146 	struct amdgpu_smuio_mcm_config_info mcm_info = {
147 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
148 		.die_id = node_inst,
149 	};
150 
151 	uint64_t umc_reg_offset =
152 		get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
153 
154 	umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
155 					    &ce_count, umc_v12_0_is_correctable_error);
156 	umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
157 					    &ue_count, umc_v12_0_is_uncorrectable_error);
158 	umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
159 					    &de_count, umc_v12_0_is_deferred_error);
160 
161 	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
162 	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
163 	amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, de_count);
164 
165 	return 0;
166 }
167 
168 static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
169 					   void *ras_error_status)
170 {
171 	amdgpu_umc_loop_channels(adev,
172 		umc_v12_0_query_error_count, ras_error_status);
173 
174 	umc_v12_0_reset_error_count(adev);
175 }
176 
177 static void umc_v12_0_get_retire_flip_bits(struct amdgpu_device *adev)
178 {
179 	enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
180 	uint32_t vram_type = adev->gmc.vram_type;
181 	struct amdgpu_umc_flip_bits *flip_bits = &(adev->umc.flip_bits);
182 
183 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
184 		nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
185 
186 	if (adev->gmc.num_umc == 16) {
187 		/* default setting */
188 		flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
189 		flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
190 		flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
191 		flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
192 		flip_bits->flip_row_bit = 13;
193 		flip_bits->bit_num = 4;
194 		flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
195 
196 		if (nps == AMDGPU_NPS2_PARTITION_MODE) {
197 			flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
198 			flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
199 			flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
200 			flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
201 		} else if (nps == AMDGPU_NPS4_PARTITION_MODE) {
202 			flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
203 			flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
204 			flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
205 			flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
206 		}
207 
208 		switch (vram_type) {
209 		case AMDGPU_VRAM_TYPE_HBM:
210 			/* other nps modes are taken as nps1 */
211 			if (nps == AMDGPU_NPS2_PARTITION_MODE)
212 				flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
213 			else if (nps == AMDGPU_NPS4_PARTITION_MODE)
214 				flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
215 
216 			break;
217 		case AMDGPU_VRAM_TYPE_HBM3E:
218 			flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
219 			flip_bits->flip_row_bit = 12;
220 
221 			if (nps == AMDGPU_NPS2_PARTITION_MODE)
222 				flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
223 			else if (nps == AMDGPU_NPS4_PARTITION_MODE)
224 				flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
225 
226 			break;
227 		default:
228 			dev_warn(adev->dev,
229 				"Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
230 			break;
231 		}
232 	} else if (adev->gmc.num_umc == 8) {
233 		/* default setting */
234 		flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
235 		flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
236 		flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
237 		flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
238 		flip_bits->flip_row_bit = 12;
239 		flip_bits->bit_num = 4;
240 		flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
241 
242 		if (nps == AMDGPU_NPS2_PARTITION_MODE) {
243 			flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
244 			flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
245 			flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
246 			flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
247 		}
248 
249 		switch (vram_type) {
250 		case AMDGPU_VRAM_TYPE_HBM:
251 			flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
252 
253 			/* other nps modes are taken as nps1 */
254 			if (nps == AMDGPU_NPS2_PARTITION_MODE)
255 				flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
256 
257 			break;
258 		case AMDGPU_VRAM_TYPE_HBM3E:
259 			flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
260 			flip_bits->flip_row_bit = 12;
261 
262 			if (nps == AMDGPU_NPS2_PARTITION_MODE)
263 				flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
264 
265 			break;
266 		default:
267 			dev_warn(adev->dev,
268 				"Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
269 			break;
270 		}
271 	} else {
272 		dev_warn(adev->dev,
273 			"Unsupported UMC number(%d), failed to set RAS flip bits.\n",
274 			adev->gmc.num_umc);
275 
276 		return;
277 	}
278 
279 	adev->umc.retire_unit = 0x1 << flip_bits->bit_num;
280 }
281 
282 static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
283 					struct ras_err_data *err_data,
284 					struct ta_ras_query_address_input *addr_in,
285 					struct ta_ras_query_address_output *addr_out,
286 					bool dump_addr)
287 {
288 	uint32_t col, col_lower, row, row_lower, row_high, bank;
289 	uint32_t channel_index = 0, umc_inst = 0;
290 	uint32_t i, bit_num, retire_unit, *flip_bits;
291 	uint64_t soc_pa, column, err_addr;
292 	struct ta_ras_query_address_output addr_out_tmp;
293 	struct ta_ras_query_address_output *paddr_out;
294 	int ret = 0;
295 
296 	if (!addr_out)
297 		paddr_out = &addr_out_tmp;
298 	else
299 		paddr_out = addr_out;
300 
301 	err_addr = bank = 0;
302 	if (addr_in) {
303 		err_addr = addr_in->ma.err_addr;
304 		addr_in->addr_type = TA_RAS_MCA_TO_PA;
305 		ret = psp_ras_query_address(&adev->psp, addr_in, paddr_out);
306 		if (ret) {
307 			dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
308 				err_addr);
309 
310 			goto out;
311 		}
312 
313 		bank = paddr_out->pa.bank;
314 		/* no need to care about umc inst if addr_in is NULL */
315 		umc_inst = addr_in->ma.umc_inst;
316 	}
317 
318 	flip_bits = adev->umc.flip_bits.flip_bits_in_pa;
319 	bit_num = adev->umc.flip_bits.bit_num;
320 	retire_unit = adev->umc.retire_unit;
321 
322 	soc_pa = paddr_out->pa.pa;
323 	channel_index = paddr_out->pa.channel_idx;
324 	/* clear loop bits in soc physical address */
325 	for (i = 0; i < bit_num; i++)
326 		soc_pa &= ~BIT_ULL(flip_bits[i]);
327 
328 	paddr_out->pa.pa = soc_pa;
329 	/* get column bit 0 and 1 in mca address */
330 	col_lower = (err_addr >> 1) & 0x3ULL;
331 	/* extra row bit will be handled later */
332 	row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL;
333 	row_lower &= ~BIT_ULL(adev->umc.flip_bits.flip_row_bit);
334 
335 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 5, 0)) {
336 		row_high = (soc_pa >> adev->umc.flip_bits.r13_in_pa) & 0x3ULL;
337 		/* it's 2.25GB in each channel, from MCA address to PA
338 		 * [R14 R13] is converted if the two bits value are 0x3,
339 		 * get them from PA instead of MCA address.
340 		 */
341 		row_lower |= (row_high << 13);
342 	}
343 
344 	if (!err_data && !dump_addr)
345 		goto out;
346 
347 	/* loop for all possibilities of retired bits */
348 	for (column = 0; column < retire_unit; column++) {
349 		soc_pa = paddr_out->pa.pa;
350 		for (i = 0; i < bit_num; i++)
351 			soc_pa |= (((column >> i) & 0x1ULL) << flip_bits[i]);
352 
353 		col = ((column & 0x7) << 2) | col_lower;
354 		/* handle extra row bit */
355 		if (bit_num == RETIRE_FLIP_BITS_NUM)
356 			row = ((column >> 3) << adev->umc.flip_bits.flip_row_bit) |
357 					row_lower;
358 
359 		if (dump_addr)
360 			dev_info(adev->dev,
361 				"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
362 				soc_pa, row, col, bank, channel_index);
363 
364 		if (err_data)
365 			amdgpu_umc_fill_error_record(err_data, err_addr,
366 				soc_pa, channel_index, umc_inst);
367 	}
368 
369 out:
370 	return ret;
371 }
372 
373 static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
374 					uint32_t node_inst, uint32_t umc_inst,
375 					uint32_t ch_inst, void *data)
376 {
377 	struct ras_err_data *err_data = (struct ras_err_data *)data;
378 	struct ta_ras_query_address_input addr_in;
379 	uint64_t mc_umc_status_addr;
380 	uint64_t mc_umc_status, err_addr;
381 	uint64_t mc_umc_addrt0;
382 	uint64_t umc_reg_offset =
383 		get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
384 
385 	mc_umc_status_addr =
386 		SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
387 
388 	mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
389 
390 	if (mc_umc_status == 0)
391 		return 0;
392 
393 	if (!err_data->err_addr) {
394 		/* clear umc status */
395 		WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
396 
397 		return 0;
398 	}
399 
400 	/* calculate error address if ue error is detected */
401 	if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) ||
402 	    umc_v12_0_is_deferred_error(adev, mc_umc_status)) {
403 		mc_umc_addrt0 =
404 			SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
405 
406 		err_addr = RREG64_PCIE_EXT((mc_umc_addrt0 + umc_reg_offset) * 4);
407 
408 		err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
409 
410 		if (!adev->aid_mask &&
411 		    adev->smuio.funcs &&
412 		    adev->smuio.funcs->get_socket_id)
413 			addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev);
414 		else
415 			addr_in.ma.socket_id = 0;
416 
417 		addr_in.ma.err_addr = err_addr;
418 		addr_in.ma.ch_inst = ch_inst;
419 		addr_in.ma.umc_inst = umc_inst;
420 		addr_in.ma.node_inst = node_inst;
421 
422 		umc_v12_0_convert_error_address(adev, err_data, &addr_in, NULL, true);
423 	}
424 
425 	/* clear umc status */
426 	WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
427 
428 	return 0;
429 }
430 
431 static void umc_v12_0_query_ras_error_address(struct amdgpu_device *adev,
432 					     void *ras_error_status)
433 {
434 	amdgpu_umc_loop_channels(adev,
435 		umc_v12_0_query_error_address, ras_error_status);
436 }
437 
438 static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
439 					uint32_t node_inst, uint32_t umc_inst,
440 					uint32_t ch_inst, void *data)
441 {
442 	uint32_t odecc_cnt_sel;
443 	uint64_t odecc_cnt_sel_addr, odecc_err_cnt_addr;
444 	uint64_t umc_reg_offset =
445 		get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
446 
447 	odecc_cnt_sel_addr =
448 		SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccCntSel);
449 	odecc_err_cnt_addr =
450 		SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt);
451 
452 	odecc_cnt_sel = RREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4);
453 
454 	/* set ce error interrupt type to APIC based interrupt */
455 	odecc_cnt_sel = REG_SET_FIELD(odecc_cnt_sel, UMCCH0_OdEccCntSel,
456 					OdEccErrInt, 0x1);
457 	WREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4, odecc_cnt_sel);
458 
459 	/* set error count to initial value */
460 	WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V12_0_CE_CNT_INIT);
461 
462 	return 0;
463 }
464 
465 static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev,
466 			enum amdgpu_mca_error_type type, void *ras_error_status)
467 {
468 	uint64_t mc_umc_status = *(uint64_t *)ras_error_status;
469 
470 	switch (type) {
471 	case AMDGPU_MCA_ERROR_TYPE_UE:
472 		return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status);
473 	case AMDGPU_MCA_ERROR_TYPE_CE:
474 		return umc_v12_0_is_correctable_error(adev, mc_umc_status);
475 	case AMDGPU_MCA_ERROR_TYPE_DE:
476 		return umc_v12_0_is_deferred_error(adev, mc_umc_status);
477 	default:
478 		return false;
479 	}
480 
481 	return false;
482 }
483 
484 static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
485 {
486 	amdgpu_umc_loop_channels(adev,
487 		umc_v12_0_err_cnt_init_per_channel, NULL);
488 }
489 
490 static bool umc_v12_0_query_ras_poison_mode(struct amdgpu_device *adev)
491 {
492 	/*
493 	 * Force return true, because regUMCCH0_EccCtrl
494 	 * is not accessible from host side
495 	 */
496 	return true;
497 }
498 
499 const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = {
500 	.query_ras_error_count = umc_v12_0_query_ras_error_count,
501 	.query_ras_error_address = umc_v12_0_query_ras_error_address,
502 };
503 
504 static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
505 				     enum aca_smu_type type, void *data)
506 {
507 	struct amdgpu_device *adev = handle->adev;
508 	struct aca_bank_info info;
509 	enum aca_error_type err_type;
510 	u64 status, count;
511 	u32 ext_error_code;
512 	int ret;
513 
514 	status = bank->regs[ACA_REG_IDX_STATUS];
515 	if (umc_v12_0_is_deferred_error(adev, status))
516 		err_type = ACA_ERROR_TYPE_DEFERRED;
517 	else if (umc_v12_0_is_uncorrectable_error(adev, status))
518 		err_type = ACA_ERROR_TYPE_UE;
519 	else if (umc_v12_0_is_correctable_error(adev, status))
520 		err_type = ACA_ERROR_TYPE_CE;
521 	else
522 		return 0;
523 	bank->aca_err_type = err_type;
524 
525 	ret = aca_bank_info_decode(bank, &info);
526 	if (ret)
527 		return ret;
528 
529 	amdgpu_umc_update_ecc_status(adev,
530 		bank->regs[ACA_REG_IDX_STATUS],
531 		bank->regs[ACA_REG_IDX_IPID],
532 		bank->regs[ACA_REG_IDX_ADDR]);
533 
534 	ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
535 	if (umc_v12_0_is_deferred_error(adev, status))
536 		count = ext_error_code == 0 ?
537 			adev->umc.err_addr_cnt / adev->umc.retire_unit : 1ULL;
538 	else
539 		count = ext_error_code == 0 ?
540 			ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
541 
542 	return aca_error_cache_log_bank_error(handle, &info, err_type, count);
543 }
544 
545 static const struct aca_bank_ops umc_v12_0_aca_bank_ops = {
546 	.aca_bank_parser = umc_v12_0_aca_bank_parser,
547 };
548 
549 const struct aca_info umc_v12_0_aca_info = {
550 	.hwip = ACA_HWIP_TYPE_UMC,
551 	.mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK,
552 	.bank_ops = &umc_v12_0_aca_bank_ops,
553 };
554 
555 static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
556 {
557 	int ret;
558 
559 	ret = amdgpu_umc_ras_late_init(adev, ras_block);
560 	if (ret)
561 		return ret;
562 
563 	ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC,
564 				  &umc_v12_0_aca_info, NULL);
565 	if (ret)
566 		return ret;
567 
568 	return 0;
569 }
570 
571 static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
572 			uint64_t status, uint64_t ipid, uint64_t addr)
573 {
574 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
575 	uint16_t hwid, mcatype;
576 	uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
577 	uint64_t err_addr, pa_addr = 0;
578 	struct ras_ecc_err *ecc_err;
579 	struct ta_ras_query_address_output addr_out;
580 	uint32_t shift_bit = adev->umc.flip_bits.flip_bits_in_pa[2];
581 	int count, ret, i;
582 
583 	hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
584 	mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
585 
586 	/* The IP block decode of consumption is SMU */
587 	if (hwid != MCA_UMC_HWID_V12_0 || mcatype != MCA_UMC_MCATYPE_V12_0) {
588 		con->umc_ecc_log.consumption_q_count++;
589 		return 0;
590 	}
591 
592 	if (!status)
593 		return 0;
594 
595 	if (!umc_v12_0_is_deferred_error(adev, status))
596 		return 0;
597 
598 	err_addr = REG_GET_FIELD(addr,
599 				MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
600 
601 	dev_dbg(adev->dev,
602 		"UMC:IPID:0x%llx, socket:%llu, aid:%llu, inst:%llu, ch:%llu, err_addr:0x%llx\n",
603 		ipid,
604 		MCA_IPID_2_SOCKET_ID(ipid),
605 		MCA_IPID_2_DIE_ID(ipid),
606 		MCA_IPID_2_UMC_INST(ipid),
607 		MCA_IPID_2_UMC_CH(ipid),
608 		err_addr);
609 
610 	ret = amdgpu_umc_mca_to_addr(adev,
611 			err_addr, MCA_IPID_2_UMC_CH(ipid),
612 			MCA_IPID_2_UMC_INST(ipid), MCA_IPID_2_DIE_ID(ipid),
613 			MCA_IPID_2_SOCKET_ID(ipid), &addr_out, true);
614 	if (ret)
615 		return ret;
616 
617 	ecc_err = kzalloc_obj(*ecc_err);
618 	if (!ecc_err)
619 		return -ENOMEM;
620 
621 	pa_addr = addr_out.pa.pa;
622 	ecc_err->status = status;
623 	ecc_err->ipid = ipid;
624 	ecc_err->addr = addr;
625 	ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT;
626 	ecc_err->channel_idx = addr_out.pa.channel_idx;
627 
628 	/* If converted pa_pfn is 0, use pa C4 pfn. */
629 	if (!ecc_err->pa_pfn)
630 		ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT;
631 
632 	ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
633 	if (ret) {
634 		if (ret == -EEXIST)
635 			con->umc_ecc_log.de_queried_count++;
636 		else
637 			dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
638 
639 		kfree(ecc_err);
640 		return ret;
641 	}
642 
643 	con->umc_ecc_log.de_queried_count++;
644 
645 	memset(page_pfn, 0, sizeof(page_pfn));
646 	count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
647 				pa_addr,
648 				page_pfn, ARRAY_SIZE(page_pfn));
649 	if (count <= 0) {
650 		dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
651 		return 0;
652 	}
653 
654 	/* Reserve memory */
655 	for (i = 0; i < count; i++)
656 		amdgpu_ras_reserve_page(adev, page_pfn[i]);
657 
658 	/* The problem case is as follows:
659 	 * 1. GPU A triggers a gpu ras reset, and GPU A drives
660 	 *    GPU B to also perform a gpu ras reset.
661 	 * 2. After gpu B ras reset started, gpu B queried a DE
662 	 *    data. Since the DE data was queried in the ras reset
663 	 *    thread instead of the page retirement thread, bad
664 	 *    page retirement work would not be triggered. Then
665 	 *    even if all gpu resets are completed, the bad pages
666 	 *    will be cached in RAM until GPU B's bad page retirement
667 	 *    work is triggered again and then saved to eeprom.
668 	 * Trigger delayed work to save the bad pages to eeprom in time
669 	 * after gpu ras reset is completed.
670 	 */
671 	if (amdgpu_ras_in_recovery(adev))
672 		schedule_delayed_work(&con->page_retirement_dwork,
673 			msecs_to_jiffies(DELAYED_TIME_FOR_GPU_RESET));
674 
675 	return 0;
676 }
677 
678 static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
679 				struct ras_ecc_err *ecc_err, void *ras_error_status)
680 {
681 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
682 	uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
683 	int ret, i, count;
684 
685 	if (!err_data || !ecc_err)
686 		return -EINVAL;
687 
688 	memset(page_pfn, 0, sizeof(page_pfn));
689 	count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
690 				ecc_err->pa_pfn << AMDGPU_GPU_PAGE_SHIFT,
691 				page_pfn, ARRAY_SIZE(page_pfn));
692 
693 	for (i = 0; i < count; i++) {
694 		ret = amdgpu_umc_fill_error_record(err_data,
695 				ecc_err->addr,
696 				page_pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
697 				ecc_err->channel_idx,
698 				MCA_IPID_2_UMC_INST(ecc_err->ipid));
699 		if (ret)
700 			break;
701 	}
702 
703 	err_data->de_count++;
704 
705 	return ret;
706 }
707 
708 static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
709 					void *ras_error_status)
710 {
711 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
712 	struct ras_ecc_err *entries[MAX_ECC_NUM_PER_RETIREMENT];
713 	struct radix_tree_root *ecc_tree;
714 	int new_detected, ret, i;
715 
716 	ecc_tree = &con->umc_ecc_log.de_page_tree;
717 
718 	mutex_lock(&con->umc_ecc_log.lock);
719 	new_detected = radix_tree_gang_lookup_tag(ecc_tree, (void **)entries,
720 			0, ARRAY_SIZE(entries), UMC_ECC_NEW_DETECTED_TAG);
721 	for (i = 0; i < new_detected; i++) {
722 		if (!entries[i])
723 			continue;
724 
725 		ret = umc_v12_0_fill_error_record(adev, entries[i], ras_error_status);
726 		if (ret) {
727 			dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret);
728 			break;
729 		}
730 		radix_tree_tag_clear(ecc_tree,
731 				entries[i]->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
732 	}
733 	mutex_unlock(&con->umc_ecc_log.lock);
734 }
735 
736 static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev,
737 		uint64_t mca_addr, uint64_t retired_page)
738 {
739 	uint32_t die = 0;
740 
741 	/* we only calculate die id for nps1 mode right now */
742 	die += ((((retired_page >> 12) & 0x1ULL)^
743 	    ((retired_page >> 20) & 0x1ULL) ^
744 	    ((retired_page >> 27) & 0x1ULL) ^
745 	    ((retired_page >> 34) & 0x1ULL) ^
746 	    ((retired_page >> 41) & 0x1ULL)) << 0);
747 
748 	/* the original PA_C4 and PA_R13 may be cleared in retired_page, so
749 	 * get them from mca_addr.
750 	 */
751 	die += ((((retired_page >> 13) & 0x1ULL) ^
752 	    ((mca_addr >> 5) & 0x1ULL) ^
753 	    ((retired_page >> 28) & 0x1ULL) ^
754 	    ((mca_addr >> 23) & 0x1ULL) ^
755 	    ((retired_page >> 42) & 0x1ULL)) << 1);
756 	die &= 3;
757 
758 	return die;
759 }
760 
761 static void umc_v12_0_mca_ipid_parse(struct amdgpu_device *adev, uint64_t ipid,
762 		uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid)
763 {
764 	if (did)
765 		*did = MCA_IPID_2_DIE_ID(ipid);
766 	if (ch)
767 		*ch = MCA_IPID_2_UMC_CH(ipid);
768 	if (umc_inst)
769 		*umc_inst = MCA_IPID_2_UMC_INST(ipid);
770 	if (sid)
771 		*sid = MCA_IPID_2_SOCKET_ID(ipid);
772 }
773 
774 struct amdgpu_umc_ras umc_v12_0_ras = {
775 	.ras_block = {
776 		.hw_ops = &umc_v12_0_ras_hw_ops,
777 		.ras_late_init = umc_v12_0_ras_late_init,
778 	},
779 	.err_cnt_init = umc_v12_0_err_cnt_init,
780 	.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
781 	.ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr,
782 	.check_ecc_err_status = umc_v12_0_check_ecc_err_status,
783 	.update_ecc_status = umc_v12_0_update_ecc_status,
784 	.convert_ras_err_addr = umc_v12_0_convert_error_address,
785 	.get_die_id_from_pa = umc_v12_0_get_die_id,
786 	.get_retire_flip_bits = umc_v12_0_get_retire_flip_bits,
787 	.mca_ipid_parse = umc_v12_0_mca_ipid_parse,
788 };
789 
790