1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "umc_v12_0.h" 24 #include "amdgpu_ras.h" 25 #include "amdgpu_umc.h" 26 #include "amdgpu.h" 27 #include "umc/umc_12_0_0_offset.h" 28 #include "umc/umc_12_0_0_sh_mask.h" 29 #include "mp/mp_13_0_6_sh_mask.h" 30 31 #define MAX_ECC_NUM_PER_RETIREMENT 32 32 33 static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev, 34 uint32_t node_inst, 35 uint32_t umc_inst, 36 uint32_t ch_inst) 37 { 38 uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst; 39 uint64_t cross_node_offset = (node_inst == 0) ? 0 : UMC_V12_0_CROSS_NODE_OFFSET; 40 41 umc_inst = index / 4; 42 ch_inst = index % 4; 43 44 return adev->umc.channel_offs * ch_inst + UMC_V12_0_INST_DIST * umc_inst + 45 UMC_V12_0_NODE_DIST * node_inst + cross_node_offset; 46 } 47 48 static int umc_v12_0_reset_error_count_per_channel(struct amdgpu_device *adev, 49 uint32_t node_inst, uint32_t umc_inst, 50 uint32_t ch_inst, void *data) 51 { 52 uint64_t odecc_err_cnt_addr; 53 uint64_t umc_reg_offset = 54 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 55 56 odecc_err_cnt_addr = 57 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt); 58 59 /* clear error count */ 60 WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, 61 UMC_V12_0_CE_CNT_INIT); 62 63 return 0; 64 } 65 66 static void umc_v12_0_reset_error_count(struct amdgpu_device *adev) 67 { 68 amdgpu_umc_loop_channels(adev, 69 umc_v12_0_reset_error_count_per_channel, NULL); 70 } 71 72 bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status) 73 { 74 dev_dbg(adev->dev, 75 "MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n", 76 mc_umc_status, 77 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val), 78 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison), 79 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred), 80 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC), 81 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC), 82 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) 83 ); 84 85 return (amdgpu_ras_is_poison_mode_supported(adev) && 86 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 87 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)); 88 } 89 90 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) 91 { 92 if (umc_v12_0_is_deferred_error(adev, mc_umc_status)) 93 return false; 94 95 return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 96 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || 97 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || 98 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); 99 } 100 101 bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) 102 { 103 if (umc_v12_0_is_deferred_error(adev, mc_umc_status)) 104 return false; 105 106 return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && 107 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || 108 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 && 109 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) || 110 /* Identify data parity error in replay mode */ 111 ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 || 112 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) && 113 !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))))); 114 } 115 116 static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev, 117 uint64_t umc_reg_offset, 118 unsigned long *error_count, 119 check_error_type_func error_type_func) 120 { 121 uint64_t mc_umc_status; 122 uint64_t mc_umc_status_addr; 123 124 mc_umc_status_addr = 125 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); 126 127 /* Check MCUMC_STATUS */ 128 mc_umc_status = 129 RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); 130 131 if (error_type_func(adev, mc_umc_status)) 132 *error_count += 1; 133 } 134 135 static int umc_v12_0_query_error_count(struct amdgpu_device *adev, 136 uint32_t node_inst, uint32_t umc_inst, 137 uint32_t ch_inst, void *data) 138 { 139 struct ras_err_data *err_data = (struct ras_err_data *)data; 140 unsigned long ue_count = 0, ce_count = 0, de_count = 0; 141 142 /* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3], 143 * which can be used as die ID directly */ 144 struct amdgpu_smuio_mcm_config_info mcm_info = { 145 .socket_id = adev->smuio.funcs->get_socket_id(adev), 146 .die_id = node_inst, 147 }; 148 149 uint64_t umc_reg_offset = 150 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 151 152 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, 153 &ce_count, umc_v12_0_is_correctable_error); 154 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, 155 &ue_count, umc_v12_0_is_uncorrectable_error); 156 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, 157 &de_count, umc_v12_0_is_deferred_error); 158 159 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); 160 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); 161 amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count); 162 163 return 0; 164 } 165 166 static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev, 167 void *ras_error_status) 168 { 169 amdgpu_umc_loop_channels(adev, 170 umc_v12_0_query_error_count, ras_error_status); 171 172 umc_v12_0_reset_error_count(adev); 173 } 174 175 static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, 176 struct ras_err_data *err_data, 177 struct ta_ras_query_address_input *addr_in) 178 { 179 uint32_t col, row, row_xor, bank, channel_index; 180 uint64_t soc_pa, retired_page, column, err_addr; 181 struct ta_ras_query_address_output addr_out; 182 183 err_addr = addr_in->ma.err_addr; 184 addr_in->addr_type = TA_RAS_MCA_TO_PA; 185 if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) { 186 dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx", 187 err_addr); 188 189 return; 190 } 191 192 soc_pa = addr_out.pa.pa; 193 bank = addr_out.pa.bank; 194 channel_index = addr_out.pa.channel_idx; 195 196 col = (err_addr >> 1) & 0x1fULL; 197 row = (err_addr >> 10) & 0x3fffULL; 198 row_xor = row ^ (0x1ULL << 13); 199 /* clear [C3 C2] in soc physical address */ 200 soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT); 201 /* clear [C4] in soc physical address */ 202 soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT); 203 204 /* loop for all possibilities of [C4 C3 C2] */ 205 for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) { 206 retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT); 207 retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT); 208 /* include column bit 0 and 1 */ 209 col &= 0x3; 210 col |= (column << 2); 211 dev_info(adev->dev, 212 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", 213 retired_page, row, col, bank, channel_index); 214 amdgpu_umc_fill_error_record(err_data, err_addr, 215 retired_page, channel_index, addr_in->ma.umc_inst); 216 217 /* shift R13 bit */ 218 retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT); 219 dev_info(adev->dev, 220 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", 221 retired_page, row_xor, col, bank, channel_index); 222 amdgpu_umc_fill_error_record(err_data, err_addr, 223 retired_page, channel_index, addr_in->ma.umc_inst); 224 } 225 } 226 227 static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev, 228 struct ta_ras_query_address_input *addr_in, 229 uint64_t *pfns, int len) 230 { 231 uint32_t col, row, row_xor, bank, channel_index; 232 uint64_t soc_pa, retired_page, column, err_addr; 233 struct ta_ras_query_address_output addr_out; 234 uint32_t pos = 0; 235 236 err_addr = addr_in->ma.err_addr; 237 addr_in->addr_type = TA_RAS_MCA_TO_PA; 238 if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) { 239 dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx", 240 err_addr); 241 return 0; 242 } 243 244 soc_pa = addr_out.pa.pa; 245 bank = addr_out.pa.bank; 246 channel_index = addr_out.pa.channel_idx; 247 248 col = (err_addr >> 1) & 0x1fULL; 249 row = (err_addr >> 10) & 0x3fffULL; 250 row_xor = row ^ (0x1ULL << 13); 251 /* clear [C3 C2] in soc physical address */ 252 soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT); 253 /* clear [C4] in soc physical address */ 254 soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT); 255 256 /* loop for all possibilities of [C4 C3 C2] */ 257 for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) { 258 retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT); 259 retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT); 260 261 if (pos >= len) 262 return 0; 263 pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT; 264 265 /* include column bit 0 and 1 */ 266 col &= 0x3; 267 col |= (column << 2); 268 dev_info(adev->dev, 269 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", 270 retired_page, row, col, bank, channel_index); 271 272 /* shift R13 bit */ 273 retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT); 274 275 if (pos >= len) 276 return 0; 277 pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT; 278 279 dev_info(adev->dev, 280 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", 281 retired_page, row_xor, col, bank, channel_index); 282 } 283 284 return pos; 285 } 286 287 static int umc_v12_0_query_error_address(struct amdgpu_device *adev, 288 uint32_t node_inst, uint32_t umc_inst, 289 uint32_t ch_inst, void *data) 290 { 291 struct ras_err_data *err_data = (struct ras_err_data *)data; 292 struct ta_ras_query_address_input addr_in; 293 uint64_t mc_umc_status_addr; 294 uint64_t mc_umc_status, err_addr; 295 uint64_t mc_umc_addrt0; 296 uint64_t umc_reg_offset = 297 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 298 299 mc_umc_status_addr = 300 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); 301 302 mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); 303 304 if (mc_umc_status == 0) 305 return 0; 306 307 if (!err_data->err_addr) { 308 /* clear umc status */ 309 WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); 310 311 return 0; 312 } 313 314 /* calculate error address if ue error is detected */ 315 if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) || 316 umc_v12_0_is_deferred_error(adev, mc_umc_status)) { 317 mc_umc_addrt0 = 318 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0); 319 320 err_addr = RREG64_PCIE_EXT((mc_umc_addrt0 + umc_reg_offset) * 4); 321 322 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); 323 324 if (!adev->aid_mask && 325 adev->smuio.funcs && 326 adev->smuio.funcs->get_socket_id) 327 addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev); 328 else 329 addr_in.ma.socket_id = 0; 330 331 addr_in.ma.err_addr = err_addr; 332 addr_in.ma.ch_inst = ch_inst; 333 addr_in.ma.umc_inst = umc_inst; 334 addr_in.ma.node_inst = node_inst; 335 336 umc_v12_0_convert_error_address(adev, err_data, &addr_in); 337 } 338 339 /* clear umc status */ 340 WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); 341 342 return 0; 343 } 344 345 static void umc_v12_0_query_ras_error_address(struct amdgpu_device *adev, 346 void *ras_error_status) 347 { 348 amdgpu_umc_loop_channels(adev, 349 umc_v12_0_query_error_address, ras_error_status); 350 } 351 352 static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev, 353 uint32_t node_inst, uint32_t umc_inst, 354 uint32_t ch_inst, void *data) 355 { 356 uint32_t odecc_cnt_sel; 357 uint64_t odecc_cnt_sel_addr, odecc_err_cnt_addr; 358 uint64_t umc_reg_offset = 359 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 360 361 odecc_cnt_sel_addr = 362 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccCntSel); 363 odecc_err_cnt_addr = 364 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt); 365 366 odecc_cnt_sel = RREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4); 367 368 /* set ce error interrupt type to APIC based interrupt */ 369 odecc_cnt_sel = REG_SET_FIELD(odecc_cnt_sel, UMCCH0_OdEccCntSel, 370 OdEccErrInt, 0x1); 371 WREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4, odecc_cnt_sel); 372 373 /* set error count to initial value */ 374 WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V12_0_CE_CNT_INIT); 375 376 return 0; 377 } 378 379 static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev, 380 enum amdgpu_mca_error_type type, void *ras_error_status) 381 { 382 uint64_t mc_umc_status = *(uint64_t *)ras_error_status; 383 384 switch (type) { 385 case AMDGPU_MCA_ERROR_TYPE_UE: 386 return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status); 387 case AMDGPU_MCA_ERROR_TYPE_CE: 388 return umc_v12_0_is_correctable_error(adev, mc_umc_status); 389 case AMDGPU_MCA_ERROR_TYPE_DE: 390 return umc_v12_0_is_deferred_error(adev, mc_umc_status); 391 default: 392 return false; 393 } 394 395 return false; 396 } 397 398 static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev) 399 { 400 amdgpu_umc_loop_channels(adev, 401 umc_v12_0_err_cnt_init_per_channel, NULL); 402 } 403 404 static bool umc_v12_0_query_ras_poison_mode(struct amdgpu_device *adev) 405 { 406 /* 407 * Force return true, because regUMCCH0_EccCtrl 408 * is not accessible from host side 409 */ 410 return true; 411 } 412 413 const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = { 414 .query_ras_error_count = umc_v12_0_query_ras_error_count, 415 .query_ras_error_address = umc_v12_0_query_ras_error_address, 416 }; 417 418 static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, 419 enum aca_smu_type type, void *data) 420 { 421 struct amdgpu_device *adev = handle->adev; 422 struct aca_bank_info info; 423 enum aca_error_type err_type; 424 u64 status, count; 425 u32 ext_error_code; 426 int ret; 427 428 status = bank->regs[ACA_REG_IDX_STATUS]; 429 if (umc_v12_0_is_deferred_error(adev, status)) 430 err_type = ACA_ERROR_TYPE_DEFERRED; 431 else if (umc_v12_0_is_uncorrectable_error(adev, status)) 432 err_type = ACA_ERROR_TYPE_UE; 433 else if (umc_v12_0_is_correctable_error(adev, status)) 434 err_type = ACA_ERROR_TYPE_CE; 435 else 436 return 0; 437 438 ret = aca_bank_info_decode(bank, &info); 439 if (ret) 440 return ret; 441 442 amdgpu_umc_update_ecc_status(adev, 443 bank->regs[ACA_REG_IDX_STATUS], 444 bank->regs[ACA_REG_IDX_IPID], 445 bank->regs[ACA_REG_IDX_ADDR]); 446 447 ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); 448 count = ext_error_code == 0 ? 449 ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL; 450 451 return aca_error_cache_log_bank_error(handle, &info, err_type, count); 452 } 453 454 static const struct aca_bank_ops umc_v12_0_aca_bank_ops = { 455 .aca_bank_parser = umc_v12_0_aca_bank_parser, 456 }; 457 458 const struct aca_info umc_v12_0_aca_info = { 459 .hwip = ACA_HWIP_TYPE_UMC, 460 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK, 461 .bank_ops = &umc_v12_0_aca_bank_ops, 462 }; 463 464 static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 465 { 466 int ret; 467 468 ret = amdgpu_umc_ras_late_init(adev, ras_block); 469 if (ret) 470 return ret; 471 472 ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC, 473 &umc_v12_0_aca_info, NULL); 474 if (ret) 475 return ret; 476 477 return 0; 478 } 479 480 static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, 481 uint64_t status, uint64_t ipid, uint64_t addr) 482 { 483 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 484 uint16_t hwid, mcatype; 485 struct ta_ras_query_address_input addr_in; 486 uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL]; 487 uint64_t err_addr, hash_val = 0; 488 struct ras_ecc_err *ecc_err; 489 int count; 490 int ret; 491 492 hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); 493 mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType); 494 495 if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0)) 496 return 0; 497 498 if (!status) 499 return 0; 500 501 if (!umc_v12_0_is_deferred_error(adev, status)) 502 return 0; 503 504 err_addr = REG_GET_FIELD(addr, 505 MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); 506 507 dev_dbg(adev->dev, 508 "UMC:IPID:0x%llx, socket:%llu, aid:%llu, inst:%llu, ch:%llu, err_addr:0x%llx\n", 509 ipid, 510 MCA_IPID_2_SOCKET_ID(ipid), 511 MCA_IPID_2_DIE_ID(ipid), 512 MCA_IPID_2_UMC_INST(ipid), 513 MCA_IPID_2_UMC_CH(ipid), 514 err_addr); 515 516 memset(page_pfn, 0, sizeof(page_pfn)); 517 518 memset(&addr_in, 0, sizeof(addr_in)); 519 addr_in.ma.err_addr = err_addr; 520 addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid); 521 addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid); 522 addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid); 523 addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid); 524 525 count = umc_v12_0_convert_err_addr(adev, 526 &addr_in, page_pfn, ARRAY_SIZE(page_pfn)); 527 if (count <= 0) { 528 dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count); 529 return 0; 530 } 531 532 ret = amdgpu_umc_build_pages_hash(adev, 533 page_pfn, count, &hash_val); 534 if (ret) { 535 dev_err(adev->dev, "Fail to build error pages hash\n"); 536 return ret; 537 } 538 539 ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL); 540 if (!ecc_err) 541 return -ENOMEM; 542 543 ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL); 544 if (!ecc_err->err_pages.pfn) { 545 kfree(ecc_err); 546 return -ENOMEM; 547 } 548 549 memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn)); 550 ecc_err->err_pages.count = count; 551 552 ecc_err->hash_index = hash_val; 553 ecc_err->status = status; 554 ecc_err->ipid = ipid; 555 ecc_err->addr = addr; 556 557 ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err); 558 if (ret) { 559 if (ret == -EEXIST) 560 con->umc_ecc_log.de_updated = true; 561 else 562 dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret); 563 564 kfree(ecc_err->err_pages.pfn); 565 kfree(ecc_err); 566 return ret; 567 } 568 569 con->umc_ecc_log.de_updated = true; 570 571 return 0; 572 } 573 574 static int umc_v12_0_fill_error_record(struct amdgpu_device *adev, 575 struct ras_ecc_err *ecc_err, void *ras_error_status) 576 { 577 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 578 uint32_t i = 0; 579 int ret = 0; 580 581 if (!err_data || !ecc_err) 582 return -EINVAL; 583 584 for (i = 0; i < ecc_err->err_pages.count; i++) { 585 ret = amdgpu_umc_fill_error_record(err_data, 586 ecc_err->addr, 587 ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT, 588 MCA_IPID_2_UMC_CH(ecc_err->ipid), 589 MCA_IPID_2_UMC_INST(ecc_err->ipid)); 590 if (ret) 591 break; 592 } 593 594 err_data->de_count++; 595 596 return ret; 597 } 598 599 static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev, 600 void *ras_error_status) 601 { 602 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 603 struct ras_ecc_err *entries[MAX_ECC_NUM_PER_RETIREMENT]; 604 struct radix_tree_root *ecc_tree; 605 int new_detected, ret, i; 606 607 ecc_tree = &con->umc_ecc_log.de_page_tree; 608 609 mutex_lock(&con->umc_ecc_log.lock); 610 new_detected = radix_tree_gang_lookup_tag(ecc_tree, (void **)entries, 611 0, ARRAY_SIZE(entries), UMC_ECC_NEW_DETECTED_TAG); 612 for (i = 0; i < new_detected; i++) { 613 if (!entries[i]) 614 continue; 615 616 ret = umc_v12_0_fill_error_record(adev, entries[i], ras_error_status); 617 if (ret) { 618 dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret); 619 break; 620 } 621 radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG); 622 } 623 mutex_unlock(&con->umc_ecc_log.lock); 624 } 625 626 struct amdgpu_umc_ras umc_v12_0_ras = { 627 .ras_block = { 628 .hw_ops = &umc_v12_0_ras_hw_ops, 629 .ras_late_init = umc_v12_0_ras_late_init, 630 }, 631 .err_cnt_init = umc_v12_0_err_cnt_init, 632 .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode, 633 .ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr, 634 .check_ecc_err_status = umc_v12_0_check_ecc_err_status, 635 .update_ecc_status = umc_v12_0_update_ecc_status, 636 }; 637 638