1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "umc_v12_0.h" 24 #include "amdgpu_ras.h" 25 #include "amdgpu_umc.h" 26 #include "amdgpu.h" 27 #include "umc/umc_12_0_0_offset.h" 28 #include "umc/umc_12_0_0_sh_mask.h" 29 #include "mp/mp_13_0_6_sh_mask.h" 30 31 static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev, 32 uint32_t node_inst, 33 uint32_t umc_inst, 34 uint32_t ch_inst) 35 { 36 uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst; 37 uint64_t cross_node_offset = (node_inst == 0) ? 0 : UMC_V12_0_CROSS_NODE_OFFSET; 38 39 umc_inst = index / 4; 40 ch_inst = index % 4; 41 42 return adev->umc.channel_offs * ch_inst + UMC_V12_0_INST_DIST * umc_inst + 43 UMC_V12_0_NODE_DIST * node_inst + cross_node_offset; 44 } 45 46 static int umc_v12_0_reset_error_count_per_channel(struct amdgpu_device *adev, 47 uint32_t node_inst, uint32_t umc_inst, 48 uint32_t ch_inst, void *data) 49 { 50 uint64_t odecc_err_cnt_addr; 51 uint64_t umc_reg_offset = 52 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 53 54 odecc_err_cnt_addr = 55 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt); 56 57 /* clear error count */ 58 WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, 59 UMC_V12_0_CE_CNT_INIT); 60 61 return 0; 62 } 63 64 static void umc_v12_0_reset_error_count(struct amdgpu_device *adev) 65 { 66 amdgpu_umc_loop_channels(adev, 67 umc_v12_0_reset_error_count_per_channel, NULL); 68 } 69 70 bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status) 71 { 72 dev_info(adev->dev, 73 "MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n", 74 mc_umc_status, 75 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val), 76 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison), 77 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred), 78 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC), 79 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC), 80 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) 81 ); 82 83 return (amdgpu_ras_is_poison_mode_supported(adev) && 84 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 85 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)); 86 } 87 88 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) 89 { 90 if (umc_v12_0_is_deferred_error(adev, mc_umc_status)) 91 return false; 92 93 return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 94 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || 95 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || 96 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); 97 } 98 99 bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) 100 { 101 if (umc_v12_0_is_deferred_error(adev, mc_umc_status)) 102 return false; 103 104 return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && 105 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || 106 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 && 107 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) || 108 /* Identify data parity error in replay mode */ 109 ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 || 110 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) && 111 !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))))); 112 } 113 114 static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev, 115 uint64_t umc_reg_offset, 116 unsigned long *error_count, 117 check_error_type_func error_type_func) 118 { 119 uint64_t mc_umc_status; 120 uint64_t mc_umc_status_addr; 121 122 mc_umc_status_addr = 123 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); 124 125 /* Check MCUMC_STATUS */ 126 mc_umc_status = 127 RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); 128 129 if (error_type_func(adev, mc_umc_status)) 130 *error_count += 1; 131 } 132 133 static int umc_v12_0_query_error_count(struct amdgpu_device *adev, 134 uint32_t node_inst, uint32_t umc_inst, 135 uint32_t ch_inst, void *data) 136 { 137 struct ras_err_data *err_data = (struct ras_err_data *)data; 138 unsigned long ue_count = 0, ce_count = 0, de_count = 0; 139 140 /* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3], 141 * which can be used as die ID directly */ 142 struct amdgpu_smuio_mcm_config_info mcm_info = { 143 .socket_id = adev->smuio.funcs->get_socket_id(adev), 144 .die_id = node_inst, 145 }; 146 147 uint64_t umc_reg_offset = 148 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 149 150 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, 151 &ce_count, umc_v12_0_is_correctable_error); 152 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, 153 &ue_count, umc_v12_0_is_uncorrectable_error); 154 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, 155 &de_count, umc_v12_0_is_deferred_error); 156 157 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); 158 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); 159 amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count); 160 161 return 0; 162 } 163 164 static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev, 165 void *ras_error_status) 166 { 167 amdgpu_umc_loop_channels(adev, 168 umc_v12_0_query_error_count, ras_error_status); 169 170 umc_v12_0_reset_error_count(adev); 171 } 172 173 static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, 174 struct ras_err_data *err_data, 175 struct ta_ras_query_address_input *addr_in) 176 { 177 uint32_t col, row, row_xor, bank, channel_index; 178 uint64_t soc_pa, retired_page, column, err_addr; 179 struct ta_ras_query_address_output addr_out; 180 181 err_addr = addr_in->ma.err_addr; 182 addr_in->addr_type = TA_RAS_MCA_TO_PA; 183 if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) { 184 dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx", 185 err_addr); 186 187 return; 188 } 189 190 soc_pa = addr_out.pa.pa; 191 bank = addr_out.pa.bank; 192 channel_index = addr_out.pa.channel_idx; 193 194 col = (err_addr >> 1) & 0x1fULL; 195 row = (err_addr >> 10) & 0x3fffULL; 196 row_xor = row ^ (0x1ULL << 13); 197 /* clear [C3 C2] in soc physical address */ 198 soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT); 199 /* clear [C4] in soc physical address */ 200 soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT); 201 202 /* loop for all possibilities of [C4 C3 C2] */ 203 for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) { 204 retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT); 205 retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT); 206 /* include column bit 0 and 1 */ 207 col &= 0x3; 208 col |= (column << 2); 209 dev_info(adev->dev, 210 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", 211 retired_page, row, col, bank, channel_index); 212 amdgpu_umc_fill_error_record(err_data, err_addr, 213 retired_page, channel_index, addr_in->ma.umc_inst); 214 215 /* shift R13 bit */ 216 retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT); 217 dev_info(adev->dev, 218 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", 219 retired_page, row_xor, col, bank, channel_index); 220 amdgpu_umc_fill_error_record(err_data, err_addr, 221 retired_page, channel_index, addr_in->ma.umc_inst); 222 } 223 } 224 225 static int umc_v12_0_query_error_address(struct amdgpu_device *adev, 226 uint32_t node_inst, uint32_t umc_inst, 227 uint32_t ch_inst, void *data) 228 { 229 struct ras_err_data *err_data = (struct ras_err_data *)data; 230 struct ta_ras_query_address_input addr_in; 231 uint64_t mc_umc_status_addr; 232 uint64_t mc_umc_status, err_addr; 233 uint64_t mc_umc_addrt0; 234 uint64_t umc_reg_offset = 235 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 236 237 mc_umc_status_addr = 238 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); 239 240 mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); 241 242 if (mc_umc_status == 0) 243 return 0; 244 245 if (!err_data->err_addr) { 246 /* clear umc status */ 247 WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); 248 249 return 0; 250 } 251 252 /* calculate error address if ue error is detected */ 253 if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) || 254 umc_v12_0_is_deferred_error(adev, mc_umc_status)) { 255 mc_umc_addrt0 = 256 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0); 257 258 err_addr = RREG64_PCIE_EXT((mc_umc_addrt0 + umc_reg_offset) * 4); 259 260 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); 261 262 if (!adev->aid_mask && 263 adev->smuio.funcs && 264 adev->smuio.funcs->get_socket_id) 265 addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev); 266 else 267 addr_in.ma.socket_id = 0; 268 269 addr_in.ma.err_addr = err_addr; 270 addr_in.ma.ch_inst = ch_inst; 271 addr_in.ma.umc_inst = umc_inst; 272 addr_in.ma.node_inst = node_inst; 273 274 umc_v12_0_convert_error_address(adev, err_data, &addr_in); 275 } 276 277 /* clear umc status */ 278 WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); 279 280 return 0; 281 } 282 283 static void umc_v12_0_query_ras_error_address(struct amdgpu_device *adev, 284 void *ras_error_status) 285 { 286 amdgpu_umc_loop_channels(adev, 287 umc_v12_0_query_error_address, ras_error_status); 288 } 289 290 static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev, 291 uint32_t node_inst, uint32_t umc_inst, 292 uint32_t ch_inst, void *data) 293 { 294 uint32_t odecc_cnt_sel; 295 uint64_t odecc_cnt_sel_addr, odecc_err_cnt_addr; 296 uint64_t umc_reg_offset = 297 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); 298 299 odecc_cnt_sel_addr = 300 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccCntSel); 301 odecc_err_cnt_addr = 302 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt); 303 304 odecc_cnt_sel = RREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4); 305 306 /* set ce error interrupt type to APIC based interrupt */ 307 odecc_cnt_sel = REG_SET_FIELD(odecc_cnt_sel, UMCCH0_OdEccCntSel, 308 OdEccErrInt, 0x1); 309 WREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4, odecc_cnt_sel); 310 311 /* set error count to initial value */ 312 WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V12_0_CE_CNT_INIT); 313 314 return 0; 315 } 316 317 static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev, 318 void *ras_error_status) 319 { 320 struct ras_query_context qctx; 321 322 memset(&qctx, 0, sizeof(qctx)); 323 qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ? 324 RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID); 325 326 amdgpu_mca_smu_log_ras_error(adev, 327 AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status, &qctx); 328 amdgpu_mca_smu_log_ras_error(adev, 329 AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status, &qctx); 330 } 331 332 static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev, 333 void *ras_error_status) 334 { 335 struct ras_err_node *err_node; 336 uint64_t mc_umc_status; 337 struct ras_err_info *err_info; 338 struct ras_err_addr *mca_err_addr, *tmp; 339 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 340 struct ta_ras_query_address_input addr_in; 341 342 for_each_ras_error(err_node, err_data) { 343 err_info = &err_node->err_info; 344 if (list_empty(&err_info->err_addr_list)) 345 continue; 346 347 addr_in.ma.node_inst = err_info->mcm_info.die_id; 348 addr_in.ma.socket_id = err_info->mcm_info.socket_id; 349 350 list_for_each_entry_safe(mca_err_addr, tmp, &err_info->err_addr_list, node) { 351 mc_umc_status = mca_err_addr->err_status; 352 if (mc_umc_status && 353 (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) || 354 umc_v12_0_is_deferred_error(adev, mc_umc_status))) { 355 uint64_t mca_addr, err_addr, mca_ipid; 356 uint32_t InstanceIdLo; 357 358 mca_addr = mca_err_addr->err_addr; 359 mca_ipid = mca_err_addr->err_ipid; 360 361 err_addr = REG_GET_FIELD(mca_addr, 362 MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); 363 InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo); 364 365 addr_in.ma.err_addr = err_addr; 366 addr_in.ma.ch_inst = MCA_IPID_LO_2_UMC_CH(InstanceIdLo); 367 addr_in.ma.umc_inst = MCA_IPID_LO_2_UMC_INST(InstanceIdLo); 368 369 dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n", 370 mca_ipid, 371 err_info->mcm_info.die_id, 372 MCA_IPID_LO_2_UMC_INST(InstanceIdLo), 373 MCA_IPID_LO_2_UMC_CH(InstanceIdLo), 374 err_addr); 375 376 umc_v12_0_convert_error_address(adev, 377 err_data, &addr_in); 378 } 379 380 /* Delete error address node from list and free memory */ 381 amdgpu_ras_del_mca_err_addr(err_info, mca_err_addr); 382 } 383 } 384 } 385 386 static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev, 387 enum amdgpu_mca_error_type type, void *ras_error_status) 388 { 389 uint64_t mc_umc_status = *(uint64_t *)ras_error_status; 390 391 switch (type) { 392 case AMDGPU_MCA_ERROR_TYPE_UE: 393 return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status); 394 case AMDGPU_MCA_ERROR_TYPE_CE: 395 return umc_v12_0_is_correctable_error(adev, mc_umc_status); 396 case AMDGPU_MCA_ERROR_TYPE_DE: 397 return umc_v12_0_is_deferred_error(adev, mc_umc_status); 398 default: 399 return false; 400 } 401 402 return false; 403 } 404 405 static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev) 406 { 407 amdgpu_umc_loop_channels(adev, 408 umc_v12_0_err_cnt_init_per_channel, NULL); 409 } 410 411 static bool umc_v12_0_query_ras_poison_mode(struct amdgpu_device *adev) 412 { 413 /* 414 * Force return true, because regUMCCH0_EccCtrl 415 * is not accessible from host side 416 */ 417 return true; 418 } 419 420 const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = { 421 .query_ras_error_count = umc_v12_0_query_ras_error_count, 422 .query_ras_error_address = umc_v12_0_query_ras_error_address, 423 }; 424 425 static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, 426 enum aca_smu_type type, void *data) 427 { 428 struct amdgpu_device *adev = handle->adev; 429 struct aca_bank_info info; 430 enum aca_error_type err_type; 431 u64 status, count; 432 u32 ext_error_code; 433 int ret; 434 435 status = bank->regs[ACA_REG_IDX_STATUS]; 436 if (umc_v12_0_is_deferred_error(adev, status)) 437 err_type = ACA_ERROR_TYPE_DEFERRED; 438 else if (umc_v12_0_is_uncorrectable_error(adev, status)) 439 err_type = ACA_ERROR_TYPE_UE; 440 else if (umc_v12_0_is_correctable_error(adev, status)) 441 err_type = ACA_ERROR_TYPE_CE; 442 else 443 return 0; 444 445 ret = aca_bank_info_decode(bank, &info); 446 if (ret) 447 return ret; 448 449 ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); 450 count = ext_error_code == 0 ? 451 ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL; 452 453 return aca_error_cache_log_bank_error(handle, &info, err_type, count); 454 } 455 456 static const struct aca_bank_ops umc_v12_0_aca_bank_ops = { 457 .aca_bank_parser = umc_v12_0_aca_bank_parser, 458 }; 459 460 const struct aca_info umc_v12_0_aca_info = { 461 .hwip = ACA_HWIP_TYPE_UMC, 462 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK, 463 .bank_ops = &umc_v12_0_aca_bank_ops, 464 }; 465 466 static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 467 { 468 int ret; 469 470 ret = amdgpu_umc_ras_late_init(adev, ras_block); 471 if (ret) 472 return ret; 473 474 ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC, 475 &umc_v12_0_aca_info, NULL); 476 if (ret) 477 return ret; 478 479 return 0; 480 } 481 482 struct amdgpu_umc_ras umc_v12_0_ras = { 483 .ras_block = { 484 .hw_ops = &umc_v12_0_ras_hw_ops, 485 .ras_late_init = umc_v12_0_ras_late_init, 486 }, 487 .err_cnt_init = umc_v12_0_err_cnt_init, 488 .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode, 489 .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count, 490 .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address, 491 .check_ecc_err_status = umc_v12_0_check_ecc_err_status, 492 }; 493 494