1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "umc_v12_0.h" 24 #include "amdgpu_ras.h" 25 #include "amdgpu_umc.h" 26 #include "amdgpu.h" 27 #include "umc/umc_12_0_0_offset.h" 28 #include "umc/umc_12_0_0_sh_mask.h" 29 #include "mp/mp_13_0_6_sh_mask.h" 30 31 #define MAX_ECC_NUM_PER_RETIREMENT 32 32 #define DELAYED_TIME_FOR_GPU_RESET 1000 //ms 33 34 static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev, 35 uint32_t node_inst, 36 uint32_t umc_inst, 37 uint32_t ch_inst) 38 { 39 uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst; 40 uint64_t cross_node_offset = (node_inst == 0) ? 0 : UMC_V12_0_CROSS_NODE_OFFSET; 41 42 umc_inst = index / 4; 43 ch_inst = index % 4; 44 45 return adev->umc.channel_offs * ch_inst + UMC_V12_0_INST_DIST * umc_inst + 46 UMC_V12_0_NODE_DIST * node_inst + cross_node_offset; 47 } 48 49 static int umc_v12_0_reset_error_count_per_channel(struct amdgpu_device *adev, 50 uint32_t node_inst, uint32_t umc_inst, 51 uint32_t ch_inst, void *data) 52 { 53 uint64_t odecc_err_cnt_addr; 54 uint64_t umc_reg_offset = 55 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 56 57 odecc_err_cnt_addr = 58 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt); 59 60 /* clear error count */ 61 WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, 62 UMC_V12_0_CE_CNT_INIT); 63 64 return 0; 65 } 66 67 static void umc_v12_0_reset_error_count(struct amdgpu_device *adev) 68 { 69 amdgpu_umc_loop_channels(adev, 70 umc_v12_0_reset_error_count_per_channel, NULL); 71 } 72 73 bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status) 74 { 75 dev_dbg(adev->dev, 76 "MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n", 77 mc_umc_status, 78 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val), 79 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison), 80 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred), 81 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC), 82 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC), 83 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) 84 ); 85 86 return (amdgpu_ras_is_poison_mode_supported(adev) && 87 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 88 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)); 89 } 90 91 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) 92 { 93 if (umc_v12_0_is_deferred_error(adev, mc_umc_status)) 94 return false; 95 96 return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 97 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || 98 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || 99 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); 100 } 101 102 bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) 103 { 104 if (umc_v12_0_is_deferred_error(adev, mc_umc_status)) 105 return false; 106 107 return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && 108 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || 109 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 && 110 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) || 111 /* Identify data parity error in replay mode */ 112 ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 || 113 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) && 114 !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))))); 115 } 116 117 static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev, 118 uint64_t umc_reg_offset, 119 unsigned long *error_count, 120 check_error_type_func error_type_func) 121 { 122 uint64_t mc_umc_status; 123 uint64_t mc_umc_status_addr; 124 125 mc_umc_status_addr = 126 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); 127 128 /* Check MCUMC_STATUS */ 129 mc_umc_status = 130 RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); 131 132 if (error_type_func(adev, mc_umc_status)) 133 *error_count += 1; 134 } 135 136 static int umc_v12_0_query_error_count(struct amdgpu_device *adev, 137 uint32_t node_inst, uint32_t umc_inst, 138 uint32_t ch_inst, void *data) 139 { 140 struct ras_err_data *err_data = (struct ras_err_data *)data; 141 unsigned long ue_count = 0, ce_count = 0, de_count = 0; 142 143 /* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3], 144 * which can be used as die ID directly */ 145 struct amdgpu_smuio_mcm_config_info mcm_info = { 146 .socket_id = adev->smuio.funcs->get_socket_id(adev), 147 .die_id = node_inst, 148 }; 149 150 uint64_t umc_reg_offset = 151 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 152 153 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, 154 &ce_count, umc_v12_0_is_correctable_error); 155 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, 156 &ue_count, umc_v12_0_is_uncorrectable_error); 157 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, 158 &de_count, umc_v12_0_is_deferred_error); 159 160 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); 161 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); 162 amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, de_count); 163 164 return 0; 165 } 166 167 static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev, 168 void *ras_error_status) 169 { 170 amdgpu_umc_loop_channels(adev, 171 umc_v12_0_query_error_count, ras_error_status); 172 173 umc_v12_0_reset_error_count(adev); 174 } 175 176 static int umc_v12_0_convert_error_address(struct amdgpu_device *adev, 177 struct ras_err_data *err_data, 178 struct ta_ras_query_address_input *addr_in, 179 struct ta_ras_query_address_output *addr_out, 180 bool dump_addr) 181 { 182 uint32_t col, col_lower, row, row_lower, bank; 183 uint32_t channel_index = 0, umc_inst = 0; 184 uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS]; 185 uint64_t soc_pa, column, err_addr; 186 struct ta_ras_query_address_output addr_out_tmp; 187 struct ta_ras_query_address_output *paddr_out; 188 enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; 189 int ret = 0; 190 191 if (!addr_out) 192 paddr_out = &addr_out_tmp; 193 else 194 paddr_out = addr_out; 195 196 err_addr = bank = 0; 197 if (addr_in) { 198 err_addr = addr_in->ma.err_addr; 199 addr_in->addr_type = TA_RAS_MCA_TO_PA; 200 ret = psp_ras_query_address(&adev->psp, addr_in, paddr_out); 201 if (ret) { 202 dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx", 203 err_addr); 204 205 goto out; 206 } 207 208 bank = paddr_out->pa.bank; 209 /* no need to care about umc inst if addr_in is NULL */ 210 umc_inst = addr_in->ma.umc_inst; 211 } 212 213 loop_bits[0] = UMC_V12_0_PA_C2_BIT; 214 loop_bits[1] = UMC_V12_0_PA_C3_BIT; 215 loop_bits[2] = UMC_V12_0_PA_C4_BIT; 216 loop_bits[3] = UMC_V12_0_PA_R13_BIT; 217 218 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 219 nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 220 221 /* other nps modes are taken as nps1 */ 222 if (nps == AMDGPU_NPS4_PARTITION_MODE) { 223 loop_bits[0] = UMC_V12_0_PA_CH4_BIT; 224 loop_bits[1] = UMC_V12_0_PA_CH5_BIT; 225 loop_bits[2] = UMC_V12_0_PA_B0_BIT; 226 loop_bits[3] = UMC_V12_0_PA_R11_BIT; 227 } 228 229 soc_pa = paddr_out->pa.pa; 230 channel_index = paddr_out->pa.channel_idx; 231 /* clear loop bits in soc physical address */ 232 for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++) 233 soc_pa &= ~BIT_ULL(loop_bits[i]); 234 235 paddr_out->pa.pa = soc_pa; 236 /* get column bit 0 and 1 in mca address */ 237 col_lower = (err_addr >> 1) & 0x3ULL; 238 /* MA_R13_BIT will be handled later */ 239 row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL; 240 241 if (!err_data && !dump_addr) 242 goto out; 243 244 /* loop for all possibilities of retired bits */ 245 for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) { 246 soc_pa = paddr_out->pa.pa; 247 for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++) 248 soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]); 249 250 col = ((column & 0x7) << 2) | col_lower; 251 /* add row bit 13 */ 252 row = ((column >> 3) << 13) | row_lower; 253 254 if (dump_addr) 255 dev_info(adev->dev, 256 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", 257 soc_pa, row, col, bank, channel_index); 258 259 if (err_data) 260 amdgpu_umc_fill_error_record(err_data, err_addr, 261 soc_pa, channel_index, umc_inst); 262 } 263 264 out: 265 return ret; 266 } 267 268 static int umc_v12_0_query_error_address(struct amdgpu_device *adev, 269 uint32_t node_inst, uint32_t umc_inst, 270 uint32_t ch_inst, void *data) 271 { 272 struct ras_err_data *err_data = (struct ras_err_data *)data; 273 struct ta_ras_query_address_input addr_in; 274 uint64_t mc_umc_status_addr; 275 uint64_t mc_umc_status, err_addr; 276 uint64_t mc_umc_addrt0; 277 uint64_t umc_reg_offset = 278 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 279 280 mc_umc_status_addr = 281 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); 282 283 mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); 284 285 if (mc_umc_status == 0) 286 return 0; 287 288 if (!err_data->err_addr) { 289 /* clear umc status */ 290 WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); 291 292 return 0; 293 } 294 295 /* calculate error address if ue error is detected */ 296 if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) || 297 umc_v12_0_is_deferred_error(adev, mc_umc_status)) { 298 mc_umc_addrt0 = 299 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0); 300 301 err_addr = RREG64_PCIE_EXT((mc_umc_addrt0 + umc_reg_offset) * 4); 302 303 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); 304 305 if (!adev->aid_mask && 306 adev->smuio.funcs && 307 adev->smuio.funcs->get_socket_id) 308 addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev); 309 else 310 addr_in.ma.socket_id = 0; 311 312 addr_in.ma.err_addr = err_addr; 313 addr_in.ma.ch_inst = ch_inst; 314 addr_in.ma.umc_inst = umc_inst; 315 addr_in.ma.node_inst = node_inst; 316 317 umc_v12_0_convert_error_address(adev, err_data, &addr_in, NULL, true); 318 } 319 320 /* clear umc status */ 321 WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); 322 323 return 0; 324 } 325 326 static void umc_v12_0_query_ras_error_address(struct amdgpu_device *adev, 327 void *ras_error_status) 328 { 329 amdgpu_umc_loop_channels(adev, 330 umc_v12_0_query_error_address, ras_error_status); 331 } 332 333 static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev, 334 uint32_t node_inst, uint32_t umc_inst, 335 uint32_t ch_inst, void *data) 336 { 337 uint32_t odecc_cnt_sel; 338 uint64_t odecc_cnt_sel_addr, odecc_err_cnt_addr; 339 uint64_t umc_reg_offset = 340 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 341 342 odecc_cnt_sel_addr = 343 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccCntSel); 344 odecc_err_cnt_addr = 345 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt); 346 347 odecc_cnt_sel = RREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4); 348 349 /* set ce error interrupt type to APIC based interrupt */ 350 odecc_cnt_sel = REG_SET_FIELD(odecc_cnt_sel, UMCCH0_OdEccCntSel, 351 OdEccErrInt, 0x1); 352 WREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4, odecc_cnt_sel); 353 354 /* set error count to initial value */ 355 WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V12_0_CE_CNT_INIT); 356 357 return 0; 358 } 359 360 static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev, 361 enum amdgpu_mca_error_type type, void *ras_error_status) 362 { 363 uint64_t mc_umc_status = *(uint64_t *)ras_error_status; 364 365 switch (type) { 366 case AMDGPU_MCA_ERROR_TYPE_UE: 367 return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status); 368 case AMDGPU_MCA_ERROR_TYPE_CE: 369 return umc_v12_0_is_correctable_error(adev, mc_umc_status); 370 case AMDGPU_MCA_ERROR_TYPE_DE: 371 return umc_v12_0_is_deferred_error(adev, mc_umc_status); 372 default: 373 return false; 374 } 375 376 return false; 377 } 378 379 static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev) 380 { 381 amdgpu_umc_loop_channels(adev, 382 umc_v12_0_err_cnt_init_per_channel, NULL); 383 } 384 385 static bool umc_v12_0_query_ras_poison_mode(struct amdgpu_device *adev) 386 { 387 /* 388 * Force return true, because regUMCCH0_EccCtrl 389 * is not accessible from host side 390 */ 391 return true; 392 } 393 394 const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = { 395 .query_ras_error_count = umc_v12_0_query_ras_error_count, 396 .query_ras_error_address = umc_v12_0_query_ras_error_address, 397 }; 398 399 static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, 400 enum aca_smu_type type, void *data) 401 { 402 struct amdgpu_device *adev = handle->adev; 403 struct aca_bank_info info; 404 enum aca_error_type err_type; 405 u64 status, count; 406 u32 ext_error_code; 407 int ret; 408 409 status = bank->regs[ACA_REG_IDX_STATUS]; 410 if (umc_v12_0_is_deferred_error(adev, status)) 411 err_type = ACA_ERROR_TYPE_DEFERRED; 412 else if (umc_v12_0_is_uncorrectable_error(adev, status)) 413 err_type = ACA_ERROR_TYPE_UE; 414 else if (umc_v12_0_is_correctable_error(adev, status)) 415 err_type = ACA_ERROR_TYPE_CE; 416 else 417 return 0; 418 419 ret = aca_bank_info_decode(bank, &info); 420 if (ret) 421 return ret; 422 423 amdgpu_umc_update_ecc_status(adev, 424 bank->regs[ACA_REG_IDX_STATUS], 425 bank->regs[ACA_REG_IDX_IPID], 426 bank->regs[ACA_REG_IDX_ADDR]); 427 428 ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); 429 count = ext_error_code == 0 ? 430 ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL; 431 432 return aca_error_cache_log_bank_error(handle, &info, err_type, count); 433 } 434 435 static const struct aca_bank_ops umc_v12_0_aca_bank_ops = { 436 .aca_bank_parser = umc_v12_0_aca_bank_parser, 437 }; 438 439 const struct aca_info umc_v12_0_aca_info = { 440 .hwip = ACA_HWIP_TYPE_UMC, 441 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK, 442 .bank_ops = &umc_v12_0_aca_bank_ops, 443 }; 444 445 static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 446 { 447 int ret; 448 449 ret = amdgpu_umc_ras_late_init(adev, ras_block); 450 if (ret) 451 return ret; 452 453 ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC, 454 &umc_v12_0_aca_info, NULL); 455 if (ret) 456 return ret; 457 458 return 0; 459 } 460 461 static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, 462 uint64_t status, uint64_t ipid, uint64_t addr) 463 { 464 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 465 uint16_t hwid, mcatype; 466 uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL]; 467 uint64_t err_addr, pa_addr = 0; 468 struct ras_ecc_err *ecc_err; 469 struct ta_ras_query_address_output addr_out; 470 enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; 471 uint32_t shift_bit = UMC_V12_0_PA_C4_BIT; 472 int count, ret, i; 473 474 hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); 475 mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType); 476 477 if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0)) 478 return 0; 479 480 if (!status) 481 return 0; 482 483 if (!umc_v12_0_is_deferred_error(adev, status)) 484 return 0; 485 486 err_addr = REG_GET_FIELD(addr, 487 MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); 488 489 dev_dbg(adev->dev, 490 "UMC:IPID:0x%llx, socket:%llu, aid:%llu, inst:%llu, ch:%llu, err_addr:0x%llx\n", 491 ipid, 492 MCA_IPID_2_SOCKET_ID(ipid), 493 MCA_IPID_2_DIE_ID(ipid), 494 MCA_IPID_2_UMC_INST(ipid), 495 MCA_IPID_2_UMC_CH(ipid), 496 err_addr); 497 498 ret = amdgpu_umc_mca_to_addr(adev, 499 err_addr, MCA_IPID_2_UMC_CH(ipid), 500 MCA_IPID_2_UMC_INST(ipid), MCA_IPID_2_DIE_ID(ipid), 501 MCA_IPID_2_SOCKET_ID(ipid), &addr_out, true); 502 if (ret) 503 return ret; 504 505 ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL); 506 if (!ecc_err) 507 return -ENOMEM; 508 509 pa_addr = addr_out.pa.pa; 510 ecc_err->status = status; 511 ecc_err->ipid = ipid; 512 ecc_err->addr = addr; 513 ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT; 514 ecc_err->channel_idx = addr_out.pa.channel_idx; 515 516 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 517 nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 518 if (nps == AMDGPU_NPS4_PARTITION_MODE) 519 shift_bit = UMC_V12_0_PA_B0_BIT; 520 521 /* If converted pa_pfn is 0, use pa C4 pfn. */ 522 if (!ecc_err->pa_pfn) 523 ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT; 524 525 ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err); 526 if (ret) { 527 if (ret == -EEXIST) 528 con->umc_ecc_log.de_queried_count++; 529 else 530 dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret); 531 532 kfree(ecc_err); 533 return ret; 534 } 535 536 con->umc_ecc_log.de_queried_count++; 537 538 memset(page_pfn, 0, sizeof(page_pfn)); 539 count = amdgpu_umc_lookup_bad_pages_in_a_row(adev, 540 pa_addr, 541 page_pfn, ARRAY_SIZE(page_pfn)); 542 if (count <= 0) { 543 dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count); 544 return 0; 545 } 546 547 /* Reserve memory */ 548 for (i = 0; i < count; i++) 549 amdgpu_ras_reserve_page(adev, page_pfn[i]); 550 551 /* The problem case is as follows: 552 * 1. GPU A triggers a gpu ras reset, and GPU A drives 553 * GPU B to also perform a gpu ras reset. 554 * 2. After gpu B ras reset started, gpu B queried a DE 555 * data. Since the DE data was queried in the ras reset 556 * thread instead of the page retirement thread, bad 557 * page retirement work would not be triggered. Then 558 * even if all gpu resets are completed, the bad pages 559 * will be cached in RAM until GPU B's bad page retirement 560 * work is triggered again and then saved to eeprom. 561 * Trigger delayed work to save the bad pages to eeprom in time 562 * after gpu ras reset is completed. 563 */ 564 if (amdgpu_ras_in_recovery(adev)) 565 schedule_delayed_work(&con->page_retirement_dwork, 566 msecs_to_jiffies(DELAYED_TIME_FOR_GPU_RESET)); 567 568 return 0; 569 } 570 571 static int umc_v12_0_fill_error_record(struct amdgpu_device *adev, 572 struct ras_ecc_err *ecc_err, void *ras_error_status) 573 { 574 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 575 uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL]; 576 int ret, i, count; 577 578 if (!err_data || !ecc_err) 579 return -EINVAL; 580 581 memset(page_pfn, 0, sizeof(page_pfn)); 582 count = amdgpu_umc_lookup_bad_pages_in_a_row(adev, 583 ecc_err->pa_pfn << AMDGPU_GPU_PAGE_SHIFT, 584 page_pfn, ARRAY_SIZE(page_pfn)); 585 586 for (i = 0; i < count; i++) { 587 ret = amdgpu_umc_fill_error_record(err_data, 588 ecc_err->addr, 589 page_pfn[i] << AMDGPU_GPU_PAGE_SHIFT, 590 ecc_err->channel_idx, 591 MCA_IPID_2_UMC_INST(ecc_err->ipid)); 592 if (ret) 593 break; 594 } 595 596 err_data->de_count++; 597 598 return ret; 599 } 600 601 static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev, 602 void *ras_error_status) 603 { 604 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 605 struct ras_ecc_err *entries[MAX_ECC_NUM_PER_RETIREMENT]; 606 struct radix_tree_root *ecc_tree; 607 int new_detected, ret, i; 608 609 ecc_tree = &con->umc_ecc_log.de_page_tree; 610 611 mutex_lock(&con->umc_ecc_log.lock); 612 new_detected = radix_tree_gang_lookup_tag(ecc_tree, (void **)entries, 613 0, ARRAY_SIZE(entries), UMC_ECC_NEW_DETECTED_TAG); 614 for (i = 0; i < new_detected; i++) { 615 if (!entries[i]) 616 continue; 617 618 ret = umc_v12_0_fill_error_record(adev, entries[i], ras_error_status); 619 if (ret) { 620 dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret); 621 break; 622 } 623 radix_tree_tag_clear(ecc_tree, 624 entries[i]->pa_pfn, UMC_ECC_NEW_DETECTED_TAG); 625 } 626 mutex_unlock(&con->umc_ecc_log.lock); 627 } 628 629 static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev, 630 uint64_t mca_addr, uint64_t retired_page) 631 { 632 uint32_t die = 0; 633 634 /* we only calculate die id for nps1 mode right now */ 635 die += ((((retired_page >> 12) & 0x1ULL)^ 636 ((retired_page >> 20) & 0x1ULL) ^ 637 ((retired_page >> 27) & 0x1ULL) ^ 638 ((retired_page >> 34) & 0x1ULL) ^ 639 ((retired_page >> 41) & 0x1ULL)) << 0); 640 641 /* the original PA_C4 and PA_R13 may be cleared in retired_page, so 642 * get them from mca_addr. 643 */ 644 die += ((((retired_page >> 13) & 0x1ULL) ^ 645 ((mca_addr >> 5) & 0x1ULL) ^ 646 ((retired_page >> 28) & 0x1ULL) ^ 647 ((mca_addr >> 23) & 0x1ULL) ^ 648 ((retired_page >> 42) & 0x1ULL)) << 1); 649 die &= 3; 650 651 return die; 652 } 653 654 struct amdgpu_umc_ras umc_v12_0_ras = { 655 .ras_block = { 656 .hw_ops = &umc_v12_0_ras_hw_ops, 657 .ras_late_init = umc_v12_0_ras_late_init, 658 }, 659 .err_cnt_init = umc_v12_0_err_cnt_init, 660 .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode, 661 .ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr, 662 .check_ecc_err_status = umc_v12_0_check_ecc_err_status, 663 .update_ecc_status = umc_v12_0_update_ecc_status, 664 .convert_ras_err_addr = umc_v12_0_convert_error_address, 665 .get_die_id_from_pa = umc_v12_0_get_die_id, 666 }; 667 668