1 /*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "umc_v12_0.h"
24 #include "amdgpu_ras.h"
25 #include "amdgpu_umc.h"
26 #include "amdgpu.h"
27 #include "umc/umc_12_0_0_offset.h"
28 #include "umc/umc_12_0_0_sh_mask.h"
29 #include "mp/mp_13_0_6_sh_mask.h"
30
31 #define MAX_ECC_NUM_PER_RETIREMENT 32
32 #define DELAYED_TIME_FOR_GPU_RESET 1000 //ms
33
get_umc_v12_0_reg_offset(struct amdgpu_device * adev,uint32_t node_inst,uint32_t umc_inst,uint32_t ch_inst)34 static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev,
35 uint32_t node_inst,
36 uint32_t umc_inst,
37 uint32_t ch_inst)
38 {
39 uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst;
40 uint64_t cross_node_offset = (node_inst == 0) ? 0 : UMC_V12_0_CROSS_NODE_OFFSET;
41
42 umc_inst = index / 4;
43 ch_inst = index % 4;
44
45 return adev->umc.channel_offs * ch_inst + UMC_V12_0_INST_DIST * umc_inst +
46 UMC_V12_0_NODE_DIST * node_inst + cross_node_offset;
47 }
48
umc_v12_0_reset_error_count_per_channel(struct amdgpu_device * adev,uint32_t node_inst,uint32_t umc_inst,uint32_t ch_inst,void * data)49 static int umc_v12_0_reset_error_count_per_channel(struct amdgpu_device *adev,
50 uint32_t node_inst, uint32_t umc_inst,
51 uint32_t ch_inst, void *data)
52 {
53 uint64_t odecc_err_cnt_addr;
54 uint64_t umc_reg_offset =
55 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
56
57 odecc_err_cnt_addr =
58 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt);
59
60 /* clear error count */
61 WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4,
62 UMC_V12_0_CE_CNT_INIT);
63
64 return 0;
65 }
66
umc_v12_0_reset_error_count(struct amdgpu_device * adev)67 static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
68 {
69 amdgpu_umc_loop_channels(adev,
70 umc_v12_0_reset_error_count_per_channel, NULL);
71 }
72
umc_v12_0_is_deferred_error(struct amdgpu_device * adev,uint64_t mc_umc_status)73 bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
74 {
75 dev_dbg(adev->dev,
76 "MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n",
77 mc_umc_status,
78 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val),
79 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison),
80 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred),
81 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC),
82 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC),
83 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC)
84 );
85
86 return (amdgpu_ras_is_poison_mode_supported(adev) &&
87 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
88 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1));
89 }
90
umc_v12_0_is_uncorrectable_error(struct amdgpu_device * adev,uint64_t mc_umc_status)91 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
92 {
93 if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
94 return false;
95
96 return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
97 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
98 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
99 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
100 }
101
umc_v12_0_is_correctable_error(struct amdgpu_device * adev,uint64_t mc_umc_status)102 bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
103 {
104 if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
105 return false;
106
107 return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
108 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
109 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
110 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) ||
111 /* Identify data parity error in replay mode */
112 ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 ||
113 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) &&
114 !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
115 }
116
umc_v12_0_query_error_count_per_type(struct amdgpu_device * adev,uint64_t umc_reg_offset,unsigned long * error_count,check_error_type_func error_type_func)117 static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev,
118 uint64_t umc_reg_offset,
119 unsigned long *error_count,
120 check_error_type_func error_type_func)
121 {
122 uint64_t mc_umc_status;
123 uint64_t mc_umc_status_addr;
124
125 mc_umc_status_addr =
126 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
127
128 /* Check MCUMC_STATUS */
129 mc_umc_status =
130 RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
131
132 if (error_type_func(adev, mc_umc_status))
133 *error_count += 1;
134 }
135
umc_v12_0_query_error_count(struct amdgpu_device * adev,uint32_t node_inst,uint32_t umc_inst,uint32_t ch_inst,void * data)136 static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
137 uint32_t node_inst, uint32_t umc_inst,
138 uint32_t ch_inst, void *data)
139 {
140 struct ras_err_data *err_data = (struct ras_err_data *)data;
141 unsigned long ue_count = 0, ce_count = 0, de_count = 0;
142
143 /* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3],
144 * which can be used as die ID directly */
145 struct amdgpu_smuio_mcm_config_info mcm_info = {
146 .socket_id = adev->smuio.funcs->get_socket_id(adev),
147 .die_id = node_inst,
148 };
149
150 uint64_t umc_reg_offset =
151 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
152
153 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
154 &ce_count, umc_v12_0_is_correctable_error);
155 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
156 &ue_count, umc_v12_0_is_uncorrectable_error);
157 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
158 &de_count, umc_v12_0_is_deferred_error);
159
160 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
161 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
162 amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, de_count);
163
164 return 0;
165 }
166
umc_v12_0_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)167 static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
168 void *ras_error_status)
169 {
170 amdgpu_umc_loop_channels(adev,
171 umc_v12_0_query_error_count, ras_error_status);
172
173 umc_v12_0_reset_error_count(adev);
174 }
175
umc_v12_0_convert_error_address(struct amdgpu_device * adev,struct ras_err_data * err_data,struct ta_ras_query_address_input * addr_in)176 static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
177 struct ras_err_data *err_data,
178 struct ta_ras_query_address_input *addr_in)
179 {
180 uint32_t col, row, row_xor, bank, channel_index;
181 uint64_t soc_pa, retired_page, column, err_addr;
182 struct ta_ras_query_address_output addr_out;
183
184 err_addr = addr_in->ma.err_addr;
185 addr_in->addr_type = TA_RAS_MCA_TO_PA;
186 if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
187 dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
188 err_addr);
189
190 return;
191 }
192
193 soc_pa = addr_out.pa.pa;
194 bank = addr_out.pa.bank;
195 channel_index = addr_out.pa.channel_idx;
196
197 col = (err_addr >> 1) & 0x1fULL;
198 row = (err_addr >> 10) & 0x3fffULL;
199 row_xor = row ^ (0x1ULL << 13);
200 /* clear [C3 C2] in soc physical address */
201 soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
202 /* clear [C4] in soc physical address */
203 soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
204
205 /* loop for all possibilities of [C4 C3 C2] */
206 for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
207 retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
208 retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
209 /* include column bit 0 and 1 */
210 col &= 0x3;
211 col |= (column << 2);
212 dev_info(adev->dev,
213 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
214 retired_page, row, col, bank, channel_index);
215 amdgpu_umc_fill_error_record(err_data, err_addr,
216 retired_page, channel_index, addr_in->ma.umc_inst);
217
218 /* shift R13 bit */
219 retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
220 dev_info(adev->dev,
221 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
222 retired_page, row_xor, col, bank, channel_index);
223 amdgpu_umc_fill_error_record(err_data, err_addr,
224 retired_page, channel_index, addr_in->ma.umc_inst);
225 }
226 }
227
umc_v12_0_dump_addr_info(struct amdgpu_device * adev,struct ta_ras_query_address_output * addr_out,uint64_t err_addr)228 static void umc_v12_0_dump_addr_info(struct amdgpu_device *adev,
229 struct ta_ras_query_address_output *addr_out,
230 uint64_t err_addr)
231 {
232 uint32_t col, row, row_xor, bank, channel_index;
233 uint64_t soc_pa, retired_page, column;
234
235 soc_pa = addr_out->pa.pa;
236 bank = addr_out->pa.bank;
237 channel_index = addr_out->pa.channel_idx;
238
239 col = (err_addr >> 1) & 0x1fULL;
240 row = (err_addr >> 10) & 0x3fffULL;
241 row_xor = row ^ (0x1ULL << 13);
242 /* clear [C3 C2] in soc physical address */
243 soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
244 /* clear [C4] in soc physical address */
245 soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
246
247 /* loop for all possibilities of [C4 C3 C2] */
248 for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
249 retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
250 retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
251 /* include column bit 0 and 1 */
252 col &= 0x3;
253 col |= (column << 2);
254 dev_info(adev->dev,
255 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
256 retired_page, row, col, bank, channel_index);
257
258 /* shift R13 bit */
259 retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
260 dev_info(adev->dev,
261 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
262 retired_page, row_xor, col, bank, channel_index);
263 }
264 }
265
umc_v12_0_lookup_bad_pages_in_a_row(struct amdgpu_device * adev,uint64_t pa_addr,uint64_t * pfns,int len)266 static int umc_v12_0_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
267 uint64_t pa_addr, uint64_t *pfns, int len)
268 {
269 uint64_t soc_pa, retired_page, column;
270 uint32_t pos = 0;
271
272 soc_pa = pa_addr;
273 /* clear [C3 C2] in soc physical address */
274 soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
275 /* clear [C4] in soc physical address */
276 soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
277
278 /* loop for all possibilities of [C4 C3 C2] */
279 for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
280 retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
281 retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
282
283 if (pos >= len)
284 return 0;
285 pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
286
287 /* shift R13 bit */
288 retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
289
290 if (pos >= len)
291 return 0;
292 pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
293
294 }
295
296 return pos;
297 }
298
umc_v12_0_convert_mca_to_addr(struct amdgpu_device * adev,uint64_t err_addr,uint32_t ch,uint32_t umc,uint32_t node,uint32_t socket,uint64_t * addr,bool dump_addr)299 static int umc_v12_0_convert_mca_to_addr(struct amdgpu_device *adev,
300 uint64_t err_addr, uint32_t ch, uint32_t umc,
301 uint32_t node, uint32_t socket,
302 uint64_t *addr, bool dump_addr)
303 {
304 struct ta_ras_query_address_input addr_in;
305 struct ta_ras_query_address_output addr_out;
306
307 memset(&addr_in, 0, sizeof(addr_in));
308 addr_in.ma.err_addr = err_addr;
309 addr_in.ma.ch_inst = ch;
310 addr_in.ma.umc_inst = umc;
311 addr_in.ma.node_inst = node;
312 addr_in.ma.socket_id = socket;
313 addr_in.addr_type = TA_RAS_MCA_TO_PA;
314 if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) {
315 dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
316 err_addr);
317 return -EINVAL;
318 }
319
320 if (dump_addr)
321 umc_v12_0_dump_addr_info(adev, &addr_out, err_addr);
322
323 *addr = addr_out.pa.pa;
324
325 return 0;
326 }
327
umc_v12_0_query_error_address(struct amdgpu_device * adev,uint32_t node_inst,uint32_t umc_inst,uint32_t ch_inst,void * data)328 static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
329 uint32_t node_inst, uint32_t umc_inst,
330 uint32_t ch_inst, void *data)
331 {
332 struct ras_err_data *err_data = (struct ras_err_data *)data;
333 struct ta_ras_query_address_input addr_in;
334 uint64_t mc_umc_status_addr;
335 uint64_t mc_umc_status, err_addr;
336 uint64_t mc_umc_addrt0;
337 uint64_t umc_reg_offset =
338 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
339
340 mc_umc_status_addr =
341 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
342
343 mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
344
345 if (mc_umc_status == 0)
346 return 0;
347
348 if (!err_data->err_addr) {
349 /* clear umc status */
350 WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
351
352 return 0;
353 }
354
355 /* calculate error address if ue error is detected */
356 if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) ||
357 umc_v12_0_is_deferred_error(adev, mc_umc_status)) {
358 mc_umc_addrt0 =
359 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
360
361 err_addr = RREG64_PCIE_EXT((mc_umc_addrt0 + umc_reg_offset) * 4);
362
363 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
364
365 if (!adev->aid_mask &&
366 adev->smuio.funcs &&
367 adev->smuio.funcs->get_socket_id)
368 addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev);
369 else
370 addr_in.ma.socket_id = 0;
371
372 addr_in.ma.err_addr = err_addr;
373 addr_in.ma.ch_inst = ch_inst;
374 addr_in.ma.umc_inst = umc_inst;
375 addr_in.ma.node_inst = node_inst;
376
377 umc_v12_0_convert_error_address(adev, err_data, &addr_in);
378 }
379
380 /* clear umc status */
381 WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
382
383 return 0;
384 }
385
umc_v12_0_query_ras_error_address(struct amdgpu_device * adev,void * ras_error_status)386 static void umc_v12_0_query_ras_error_address(struct amdgpu_device *adev,
387 void *ras_error_status)
388 {
389 amdgpu_umc_loop_channels(adev,
390 umc_v12_0_query_error_address, ras_error_status);
391 }
392
umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device * adev,uint32_t node_inst,uint32_t umc_inst,uint32_t ch_inst,void * data)393 static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
394 uint32_t node_inst, uint32_t umc_inst,
395 uint32_t ch_inst, void *data)
396 {
397 uint32_t odecc_cnt_sel;
398 uint64_t odecc_cnt_sel_addr, odecc_err_cnt_addr;
399 uint64_t umc_reg_offset =
400 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
401
402 odecc_cnt_sel_addr =
403 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccCntSel);
404 odecc_err_cnt_addr =
405 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt);
406
407 odecc_cnt_sel = RREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4);
408
409 /* set ce error interrupt type to APIC based interrupt */
410 odecc_cnt_sel = REG_SET_FIELD(odecc_cnt_sel, UMCCH0_OdEccCntSel,
411 OdEccErrInt, 0x1);
412 WREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4, odecc_cnt_sel);
413
414 /* set error count to initial value */
415 WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V12_0_CE_CNT_INIT);
416
417 return 0;
418 }
419
umc_v12_0_check_ecc_err_status(struct amdgpu_device * adev,enum amdgpu_mca_error_type type,void * ras_error_status)420 static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev,
421 enum amdgpu_mca_error_type type, void *ras_error_status)
422 {
423 uint64_t mc_umc_status = *(uint64_t *)ras_error_status;
424
425 switch (type) {
426 case AMDGPU_MCA_ERROR_TYPE_UE:
427 return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status);
428 case AMDGPU_MCA_ERROR_TYPE_CE:
429 return umc_v12_0_is_correctable_error(adev, mc_umc_status);
430 case AMDGPU_MCA_ERROR_TYPE_DE:
431 return umc_v12_0_is_deferred_error(adev, mc_umc_status);
432 default:
433 return false;
434 }
435
436 return false;
437 }
438
umc_v12_0_err_cnt_init(struct amdgpu_device * adev)439 static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
440 {
441 amdgpu_umc_loop_channels(adev,
442 umc_v12_0_err_cnt_init_per_channel, NULL);
443 }
444
umc_v12_0_query_ras_poison_mode(struct amdgpu_device * adev)445 static bool umc_v12_0_query_ras_poison_mode(struct amdgpu_device *adev)
446 {
447 /*
448 * Force return true, because regUMCCH0_EccCtrl
449 * is not accessible from host side
450 */
451 return true;
452 }
453
454 const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = {
455 .query_ras_error_count = umc_v12_0_query_ras_error_count,
456 .query_ras_error_address = umc_v12_0_query_ras_error_address,
457 };
458
umc_v12_0_aca_bank_parser(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)459 static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
460 enum aca_smu_type type, void *data)
461 {
462 struct amdgpu_device *adev = handle->adev;
463 struct aca_bank_info info;
464 enum aca_error_type err_type;
465 u64 status, count;
466 u32 ext_error_code;
467 int ret;
468
469 status = bank->regs[ACA_REG_IDX_STATUS];
470 if (umc_v12_0_is_deferred_error(adev, status))
471 err_type = ACA_ERROR_TYPE_DEFERRED;
472 else if (umc_v12_0_is_uncorrectable_error(adev, status))
473 err_type = ACA_ERROR_TYPE_UE;
474 else if (umc_v12_0_is_correctable_error(adev, status))
475 err_type = ACA_ERROR_TYPE_CE;
476 else
477 return 0;
478
479 ret = aca_bank_info_decode(bank, &info);
480 if (ret)
481 return ret;
482
483 amdgpu_umc_update_ecc_status(adev,
484 bank->regs[ACA_REG_IDX_STATUS],
485 bank->regs[ACA_REG_IDX_IPID],
486 bank->regs[ACA_REG_IDX_ADDR]);
487
488 ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
489 count = ext_error_code == 0 ?
490 ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
491
492 return aca_error_cache_log_bank_error(handle, &info, err_type, count);
493 }
494
495 static const struct aca_bank_ops umc_v12_0_aca_bank_ops = {
496 .aca_bank_parser = umc_v12_0_aca_bank_parser,
497 };
498
499 const struct aca_info umc_v12_0_aca_info = {
500 .hwip = ACA_HWIP_TYPE_UMC,
501 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK,
502 .bank_ops = &umc_v12_0_aca_bank_ops,
503 };
504
umc_v12_0_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)505 static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
506 {
507 int ret;
508
509 ret = amdgpu_umc_ras_late_init(adev, ras_block);
510 if (ret)
511 return ret;
512
513 ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC,
514 &umc_v12_0_aca_info, NULL);
515 if (ret)
516 return ret;
517
518 return 0;
519 }
520
umc_v12_0_update_ecc_status(struct amdgpu_device * adev,uint64_t status,uint64_t ipid,uint64_t addr)521 static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
522 uint64_t status, uint64_t ipid, uint64_t addr)
523 {
524 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
525 uint16_t hwid, mcatype;
526 uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
527 uint64_t err_addr, pa_addr = 0;
528 struct ras_ecc_err *ecc_err;
529 int count, ret, i;
530
531 hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
532 mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
533
534 if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
535 return 0;
536
537 if (!status)
538 return 0;
539
540 if (!umc_v12_0_is_deferred_error(adev, status))
541 return 0;
542
543 err_addr = REG_GET_FIELD(addr,
544 MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
545
546 dev_dbg(adev->dev,
547 "UMC:IPID:0x%llx, socket:%llu, aid:%llu, inst:%llu, ch:%llu, err_addr:0x%llx\n",
548 ipid,
549 MCA_IPID_2_SOCKET_ID(ipid),
550 MCA_IPID_2_DIE_ID(ipid),
551 MCA_IPID_2_UMC_INST(ipid),
552 MCA_IPID_2_UMC_CH(ipid),
553 err_addr);
554
555 ret = umc_v12_0_convert_mca_to_addr(adev,
556 err_addr, MCA_IPID_2_UMC_CH(ipid),
557 MCA_IPID_2_UMC_INST(ipid), MCA_IPID_2_DIE_ID(ipid),
558 MCA_IPID_2_SOCKET_ID(ipid), &pa_addr, true);
559 if (ret)
560 return ret;
561
562 ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL);
563 if (!ecc_err)
564 return -ENOMEM;
565
566 ecc_err->status = status;
567 ecc_err->ipid = ipid;
568 ecc_err->addr = addr;
569 ecc_err->pa_pfn = UMC_V12_ADDR_MASK_BAD_COLS(pa_addr) >> AMDGPU_GPU_PAGE_SHIFT;
570
571 /* If converted pa_pfn is 0, use pa C4 pfn. */
572 if (!ecc_err->pa_pfn)
573 ecc_err->pa_pfn = BIT_ULL(UMC_V12_0_PA_C4_BIT) >> AMDGPU_GPU_PAGE_SHIFT;
574
575 ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
576 if (ret) {
577 if (ret == -EEXIST)
578 con->umc_ecc_log.de_queried_count++;
579 else
580 dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
581
582 kfree(ecc_err);
583 return ret;
584 }
585
586 con->umc_ecc_log.de_queried_count++;
587
588 memset(page_pfn, 0, sizeof(page_pfn));
589 count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
590 pa_addr,
591 page_pfn, ARRAY_SIZE(page_pfn));
592 if (count <= 0) {
593 dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
594 return 0;
595 }
596
597 /* Reserve memory */
598 for (i = 0; i < count; i++)
599 amdgpu_ras_reserve_page(adev, page_pfn[i]);
600
601 /* The problem case is as follows:
602 * 1. GPU A triggers a gpu ras reset, and GPU A drives
603 * GPU B to also perform a gpu ras reset.
604 * 2. After gpu B ras reset started, gpu B queried a DE
605 * data. Since the DE data was queried in the ras reset
606 * thread instead of the page retirement thread, bad
607 * page retirement work would not be triggered. Then
608 * even if all gpu resets are completed, the bad pages
609 * will be cached in RAM until GPU B's bad page retirement
610 * work is triggered again and then saved to eeprom.
611 * Trigger delayed work to save the bad pages to eeprom in time
612 * after gpu ras reset is completed.
613 */
614 if (amdgpu_ras_in_recovery(adev))
615 schedule_delayed_work(&con->page_retirement_dwork,
616 msecs_to_jiffies(DELAYED_TIME_FOR_GPU_RESET));
617
618 return 0;
619 }
620
umc_v12_0_fill_error_record(struct amdgpu_device * adev,struct ras_ecc_err * ecc_err,void * ras_error_status)621 static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
622 struct ras_ecc_err *ecc_err, void *ras_error_status)
623 {
624 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
625 uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
626 int ret, i, count;
627
628 if (!err_data || !ecc_err)
629 return -EINVAL;
630
631 memset(page_pfn, 0, sizeof(page_pfn));
632 count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
633 ecc_err->pa_pfn << AMDGPU_GPU_PAGE_SHIFT,
634 page_pfn, ARRAY_SIZE(page_pfn));
635
636 for (i = 0; i < count; i++) {
637 ret = amdgpu_umc_fill_error_record(err_data,
638 ecc_err->addr,
639 page_pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
640 MCA_IPID_2_UMC_CH(ecc_err->ipid),
641 MCA_IPID_2_UMC_INST(ecc_err->ipid));
642 if (ret)
643 break;
644 }
645
646 err_data->de_count++;
647
648 return ret;
649 }
650
umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device * adev,void * ras_error_status)651 static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
652 void *ras_error_status)
653 {
654 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
655 struct ras_ecc_err *entries[MAX_ECC_NUM_PER_RETIREMENT];
656 struct radix_tree_root *ecc_tree;
657 int new_detected, ret, i;
658
659 ecc_tree = &con->umc_ecc_log.de_page_tree;
660
661 mutex_lock(&con->umc_ecc_log.lock);
662 new_detected = radix_tree_gang_lookup_tag(ecc_tree, (void **)entries,
663 0, ARRAY_SIZE(entries), UMC_ECC_NEW_DETECTED_TAG);
664 for (i = 0; i < new_detected; i++) {
665 if (!entries[i])
666 continue;
667
668 ret = umc_v12_0_fill_error_record(adev, entries[i], ras_error_status);
669 if (ret) {
670 dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret);
671 break;
672 }
673 radix_tree_tag_clear(ecc_tree,
674 entries[i]->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
675 }
676 mutex_unlock(&con->umc_ecc_log.lock);
677 }
678
679 struct amdgpu_umc_ras umc_v12_0_ras = {
680 .ras_block = {
681 .hw_ops = &umc_v12_0_ras_hw_ops,
682 .ras_late_init = umc_v12_0_ras_late_init,
683 },
684 .err_cnt_init = umc_v12_0_err_cnt_init,
685 .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
686 .ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr,
687 .check_ecc_err_status = umc_v12_0_check_ecc_err_status,
688 .update_ecc_status = umc_v12_0_update_ecc_status,
689 };
690
691