1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu_ras.h" 24 #include "amdgpu.h" 25 #include "amdgpu_mca.h" 26 27 #include "umc/umc_6_7_0_offset.h" 28 #include "umc/umc_6_7_0_sh_mask.h" 29 30 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev, 31 uint64_t mc_status_addr, 32 unsigned long *error_count) 33 { 34 uint64_t mc_status = RREG64_PCIE(mc_status_addr); 35 36 if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && 37 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) 38 *error_count += 1; 39 } 40 41 void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev, 42 uint64_t mc_status_addr, 43 unsigned long *error_count) 44 { 45 uint64_t mc_status = RREG64_PCIE(mc_status_addr); 46 47 if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 48 (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || 49 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || 50 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || 51 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || 52 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) 53 *error_count += 1; 54 } 55 56 void amdgpu_mca_reset_error_count(struct amdgpu_device *adev, 57 uint64_t mc_status_addr) 58 { 59 WREG64_PCIE(mc_status_addr, 0x0ULL); 60 } 61 62 void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, 63 uint64_t mc_status_addr, 64 void *ras_error_status) 65 { 66 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 67 68 amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count)); 69 amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count)); 70 71 amdgpu_mca_reset_error_count(adev, mc_status_addr); 72 } 73 74 int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev) 75 { 76 int err; 77 struct amdgpu_mca_ras_block *ras; 78 79 if (!adev->mca.mp0.ras) 80 return 0; 81 82 ras = adev->mca.mp0.ras; 83 84 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); 85 if (err) { 86 dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n"); 87 return err; 88 } 89 90 strcpy(ras->ras_block.ras_comm.name, "mca.mp0"); 91 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; 92 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 93 adev->mca.mp0.ras_if = &ras->ras_block.ras_comm; 94 95 return 0; 96 } 97 98 int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev) 99 { 100 int err; 101 struct amdgpu_mca_ras_block *ras; 102 103 if (!adev->mca.mp1.ras) 104 return 0; 105 106 ras = adev->mca.mp1.ras; 107 108 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); 109 if (err) { 110 dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n"); 111 return err; 112 } 113 114 strcpy(ras->ras_block.ras_comm.name, "mca.mp1"); 115 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; 116 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 117 adev->mca.mp1.ras_if = &ras->ras_block.ras_comm; 118 119 return 0; 120 } 121 122 int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev) 123 { 124 int err; 125 struct amdgpu_mca_ras_block *ras; 126 127 if (!adev->mca.mpio.ras) 128 return 0; 129 130 ras = adev->mca.mpio.ras; 131 132 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); 133 if (err) { 134 dev_err(adev->dev, "Failed to register mca.mpio ras block!\n"); 135 return err; 136 } 137 138 strcpy(ras->ras_block.ras_comm.name, "mca.mpio"); 139 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; 140 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 141 adev->mca.mpio.ras_if = &ras->ras_block.ras_comm; 142 143 return 0; 144 } 145 146 void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set) 147 { 148 if (!mca_set) 149 return; 150 151 memset(mca_set, 0, sizeof(*mca_set)); 152 INIT_LIST_HEAD(&mca_set->list); 153 } 154 155 int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry) 156 { 157 struct mca_bank_node *node; 158 159 if (!entry) 160 return -EINVAL; 161 162 node = kvzalloc(sizeof(*node), GFP_KERNEL); 163 if (!node) 164 return -ENOMEM; 165 166 memcpy(&node->entry, entry, sizeof(*entry)); 167 168 INIT_LIST_HEAD(&node->node); 169 list_add_tail(&node->node, &mca_set->list); 170 171 mca_set->nr_entries++; 172 173 return 0; 174 } 175 176 void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set) 177 { 178 struct mca_bank_node *node, *tmp; 179 180 list_for_each_entry_safe(node, tmp, &mca_set->list, node) { 181 list_del(&node->node); 182 kvfree(node); 183 } 184 } 185 186 void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs) 187 { 188 struct amdgpu_mca *mca = &adev->mca; 189 190 mca->mca_funcs = mca_funcs; 191 } 192 193 int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) 194 { 195 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 196 197 if (mca_funcs && mca_funcs->mca_set_debug_mode) 198 return mca_funcs->mca_set_debug_mode(adev, enable); 199 200 return -EOPNOTSUPP; 201 } 202 203 static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry) 204 { 205 dev_info(adev->dev, "[Hardware error] Accelerator Check Architecture events logged\n"); 206 dev_info(adev->dev, "[Hardware error] aca entry[%02d].STATUS=0x%016llx\n", 207 idx, entry->regs[MCA_REG_IDX_STATUS]); 208 dev_info(adev->dev, "[Hardware error] aca entry[%02d].ADDR=0x%016llx\n", 209 idx, entry->regs[MCA_REG_IDX_ADDR]); 210 dev_info(adev->dev, "[Hardware error] aca entry[%02d].MISC0=0x%016llx\n", 211 idx, entry->regs[MCA_REG_IDX_MISC0]); 212 dev_info(adev->dev, "[Hardware error] aca entry[%02d].IPID=0x%016llx\n", 213 idx, entry->regs[MCA_REG_IDX_IPID]); 214 dev_info(adev->dev, "[Hardware error] aca entry[%02d].SYND=0x%016llx\n", 215 idx, entry->regs[MCA_REG_IDX_SYND]); 216 } 217 218 int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data) 219 { 220 struct amdgpu_smuio_mcm_config_info mcm_info; 221 struct mca_bank_set mca_set; 222 struct mca_bank_node *node; 223 struct mca_bank_entry *entry; 224 uint32_t count; 225 int ret, i = 0; 226 227 amdgpu_mca_bank_set_init(&mca_set); 228 229 ret = amdgpu_mca_smu_get_mca_set(adev, blk, type, &mca_set); 230 if (ret) 231 goto out_mca_release; 232 233 list_for_each_entry(node, &mca_set.list, node) { 234 entry = &node->entry; 235 236 amdgpu_mca_smu_mca_bank_dump(adev, i++, entry); 237 238 count = 0; 239 ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count); 240 if (ret) 241 goto out_mca_release; 242 243 if (!count) 244 continue; 245 246 mcm_info.socket_id = entry->info.socket_id; 247 mcm_info.die_id = entry->info.aid; 248 249 if (type == AMDGPU_MCA_ERROR_TYPE_UE) 250 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, (uint64_t)count); 251 else 252 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, (uint64_t)count); 253 } 254 255 out_mca_release: 256 amdgpu_mca_bank_set_release(&mca_set); 257 258 return ret; 259 } 260 261 262 int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count) 263 { 264 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 265 266 if (!count) 267 return -EINVAL; 268 269 if (mca_funcs && mca_funcs->mca_get_valid_mca_count) 270 return mca_funcs->mca_get_valid_mca_count(adev, type, count); 271 272 return -EOPNOTSUPP; 273 } 274 275 int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 276 enum amdgpu_mca_error_type type, uint32_t *total) 277 { 278 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 279 struct mca_bank_set mca_set; 280 struct mca_bank_node *node; 281 struct mca_bank_entry *entry; 282 uint32_t count; 283 int ret; 284 285 if (!total) 286 return -EINVAL; 287 288 if (!mca_funcs) 289 return -EOPNOTSUPP; 290 291 if (!mca_funcs->mca_get_ras_mca_set || !mca_funcs->mca_get_valid_mca_count) 292 return -EOPNOTSUPP; 293 294 amdgpu_mca_bank_set_init(&mca_set); 295 296 ret = mca_funcs->mca_get_ras_mca_set(adev, blk, type, &mca_set); 297 if (ret) 298 goto err_mca_set_release; 299 300 *total = 0; 301 list_for_each_entry(node, &mca_set.list, node) { 302 entry = &node->entry; 303 304 count = 0; 305 ret = mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, &count); 306 if (ret) 307 goto err_mca_set_release; 308 309 *total += count; 310 } 311 312 err_mca_set_release: 313 amdgpu_mca_bank_set_release(&mca_set); 314 315 return ret; 316 } 317 318 int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 319 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) 320 { 321 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 322 if (!count || !entry) 323 return -EINVAL; 324 325 if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count) 326 return -EOPNOTSUPP; 327 328 329 return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count); 330 } 331 332 int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 333 enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) 334 { 335 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 336 337 if (!mca_set) 338 return -EINVAL; 339 340 if (!mca_funcs || !mca_funcs->mca_get_ras_mca_set) 341 return -EOPNOTSUPP; 342 343 WARN_ON(!list_empty(&mca_set->list)); 344 345 return mca_funcs->mca_get_ras_mca_set(adev, blk, type, mca_set); 346 } 347 348 int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, 349 int idx, struct mca_bank_entry *entry) 350 { 351 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 352 int count; 353 354 switch (type) { 355 case AMDGPU_MCA_ERROR_TYPE_UE: 356 count = mca_funcs->max_ue_count; 357 break; 358 case AMDGPU_MCA_ERROR_TYPE_CE: 359 count = mca_funcs->max_ce_count; 360 break; 361 default: 362 return -EINVAL; 363 } 364 365 if (idx >= count) 366 return -EINVAL; 367 368 if (mca_funcs && mca_funcs->mca_get_mca_entry) 369 return mca_funcs->mca_get_mca_entry(adev, type, idx, entry); 370 371 return -EOPNOTSUPP; 372 } 373 374 #if defined(CONFIG_DEBUG_FS) 375 static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val) 376 { 377 struct amdgpu_device *adev = (struct amdgpu_device *)data; 378 int ret; 379 380 ret = amdgpu_mca_smu_set_debug_mode(adev, val ? true : false); 381 if (ret) 382 return ret; 383 384 dev_info(adev->dev, "amdgpu set smu mca debug mode %s success\n", val ? "on" : "off"); 385 386 return 0; 387 } 388 389 static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry) 390 { 391 int i, idx = entry->idx; 392 int reg_idx_array[] = { 393 MCA_REG_IDX_STATUS, 394 MCA_REG_IDX_ADDR, 395 MCA_REG_IDX_MISC0, 396 MCA_REG_IDX_IPID, 397 MCA_REG_IDX_SYND, 398 }; 399 400 seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE"); 401 seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip); 402 seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n", 403 idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype); 404 405 for (i = 0; i < ARRAY_SIZE(reg_idx_array); i++) 406 seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, reg_idx_array[i], entry->regs[reg_idx_array[i]]); 407 } 408 409 static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type) 410 { 411 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 412 struct mca_bank_entry *entry; 413 uint32_t count = 0; 414 int i, ret; 415 416 ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count); 417 if (ret) 418 return ret; 419 420 seq_printf(m, "amdgpu smu %s valid mca count: %d\n", 421 type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", count); 422 423 if (!count) 424 return 0; 425 426 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 427 if (!entry) 428 return -ENOMEM; 429 430 for (i = 0; i < count; i++) { 431 memset(entry, 0, sizeof(*entry)); 432 433 ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, entry); 434 if (ret) 435 goto err_free_entry; 436 437 mca_dump_entry(m, entry); 438 } 439 440 err_free_entry: 441 kfree(entry); 442 443 return ret; 444 } 445 446 static int mca_dump_ce_show(struct seq_file *m, void *unused) 447 { 448 return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_CE); 449 } 450 451 static int mca_dump_ce_open(struct inode *inode, struct file *file) 452 { 453 return single_open(file, mca_dump_ce_show, inode->i_private); 454 } 455 456 static const struct file_operations mca_ce_dump_debug_fops = { 457 .owner = THIS_MODULE, 458 .open = mca_dump_ce_open, 459 .read = seq_read, 460 .llseek = seq_lseek, 461 .release = single_release, 462 }; 463 464 static int mca_dump_ue_show(struct seq_file *m, void *unused) 465 { 466 return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_UE); 467 } 468 469 static int mca_dump_ue_open(struct inode *inode, struct file *file) 470 { 471 return single_open(file, mca_dump_ue_show, inode->i_private); 472 } 473 474 static const struct file_operations mca_ue_dump_debug_fops = { 475 .owner = THIS_MODULE, 476 .open = mca_dump_ue_open, 477 .read = seq_read, 478 .llseek = seq_lseek, 479 .release = single_release, 480 }; 481 482 DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_set, "%llu\n"); 483 #endif 484 485 void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root) 486 { 487 #if defined(CONFIG_DEBUG_FS) 488 if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6)) 489 return; 490 491 debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops); 492 debugfs_create_file("mca_ue_dump", 0400, root, adev, &mca_ue_dump_debug_fops); 493 debugfs_create_file("mca_ce_dump", 0400, root, adev, &mca_ce_dump_debug_fops); 494 #endif 495 } 496 497