1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/list.h> 25 #include "amdgpu.h" 26 #include "amdgpu_aca.h" 27 #include "amdgpu_ras.h" 28 29 #define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype} 30 31 typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, void *data); 32 33 struct aca_banks { 34 int nr_banks; 35 struct list_head list; 36 }; 37 38 struct aca_hwip { 39 int hwid; 40 int mcatype; 41 }; 42 43 static struct aca_hwip aca_hwid_mcatypes[ACA_HWIP_TYPE_COUNT] = { 44 ACA_BANK_HWID(SMU, 0x01, 0x01), 45 ACA_BANK_HWID(PCS_XGMI, 0x50, 0x00), 46 ACA_BANK_HWID(UMC, 0x96, 0x00), 47 }; 48 49 static void aca_banks_init(struct aca_banks *banks) 50 { 51 if (!banks) 52 return; 53 54 memset(banks, 0, sizeof(*banks)); 55 INIT_LIST_HEAD(&banks->list); 56 } 57 58 static int aca_banks_add_bank(struct aca_banks *banks, struct aca_bank *bank) 59 { 60 struct aca_bank_node *node; 61 62 if (!bank) 63 return -EINVAL; 64 65 node = kvzalloc(sizeof(*node), GFP_KERNEL); 66 if (!node) 67 return -ENOMEM; 68 69 memcpy(&node->bank, bank, sizeof(*bank)); 70 71 INIT_LIST_HEAD(&node->node); 72 list_add_tail(&node->node, &banks->list); 73 74 banks->nr_banks++; 75 76 return 0; 77 } 78 79 static void aca_banks_release(struct aca_banks *banks) 80 { 81 struct aca_bank_node *node, *tmp; 82 83 list_for_each_entry_safe(node, tmp, &banks->list, node) { 84 list_del(&node->node); 85 kvfree(node); 86 } 87 } 88 89 static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_error_type type, u32 *count) 90 { 91 struct amdgpu_aca *aca = &adev->aca; 92 const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; 93 94 if (!count) 95 return -EINVAL; 96 97 if (!smu_funcs || !smu_funcs->get_valid_aca_count) 98 return -EOPNOTSUPP; 99 100 return smu_funcs->get_valid_aca_count(adev, type, count); 101 } 102 103 static struct aca_regs_dump { 104 const char *name; 105 int reg_idx; 106 } aca_regs[] = { 107 {"CONTROL", ACA_REG_IDX_CTL}, 108 {"STATUS", ACA_REG_IDX_STATUS}, 109 {"ADDR", ACA_REG_IDX_ADDR}, 110 {"MISC", ACA_REG_IDX_MISC0}, 111 {"CONFIG", ACA_REG_IDX_CONFG}, 112 {"IPID", ACA_REG_IDX_IPID}, 113 {"SYND", ACA_REG_IDX_SYND}, 114 {"DESTAT", ACA_REG_IDX_DESTAT}, 115 {"DEADDR", ACA_REG_IDX_DEADDR}, 116 {"CONTROL_MASK", ACA_REG_IDX_CTL_MASK}, 117 }; 118 119 static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank) 120 { 121 int i; 122 123 dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n"); 124 /* plus 1 for output format, e.g: ACA[08/08]: xxxx */ 125 for (i = 0; i < ARRAY_SIZE(aca_regs); i++) 126 dev_info(adev->dev, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", 127 idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); 128 } 129 130 static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_error_type type, 131 int start, int count, 132 struct aca_banks *banks) 133 { 134 struct amdgpu_aca *aca = &adev->aca; 135 const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; 136 struct aca_bank bank; 137 int i, max_count, ret; 138 139 if (!count) 140 return 0; 141 142 if (!smu_funcs || !smu_funcs->get_valid_aca_bank) 143 return -EOPNOTSUPP; 144 145 switch (type) { 146 case ACA_ERROR_TYPE_UE: 147 max_count = smu_funcs->max_ue_bank_count; 148 break; 149 case ACA_ERROR_TYPE_CE: 150 max_count = smu_funcs->max_ce_bank_count; 151 break; 152 case ACA_ERROR_TYPE_DEFERRED: 153 default: 154 return -EINVAL; 155 } 156 157 if (start + count >= max_count) 158 return -EINVAL; 159 160 count = min_t(int, count, max_count); 161 for (i = 0; i < count; i++) { 162 memset(&bank, 0, sizeof(bank)); 163 ret = smu_funcs->get_valid_aca_bank(adev, type, start + i, &bank); 164 if (ret) 165 return ret; 166 167 aca_smu_bank_dump(adev, i, count, &bank); 168 169 ret = aca_banks_add_bank(banks, &bank); 170 if (ret) 171 return ret; 172 } 173 174 return 0; 175 } 176 177 static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type) 178 { 179 180 struct aca_hwip *hwip; 181 int hwid, mcatype; 182 u64 ipid; 183 184 if (!bank || type == ACA_HWIP_TYPE_UNKNOW) 185 return false; 186 187 hwip = &aca_hwid_mcatypes[type]; 188 if (!hwip->hwid) 189 return false; 190 191 ipid = bank->regs[ACA_REG_IDX_IPID]; 192 hwid = ACA_REG__IPID__HARDWAREID(ipid); 193 mcatype = ACA_REG__IPID__MCATYPE(ipid); 194 195 return hwip->hwid == hwid && hwip->mcatype == mcatype; 196 } 197 198 static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type) 199 { 200 const struct aca_bank_ops *bank_ops = handle->bank_ops; 201 202 if (!aca_bank_hwip_is_matched(bank, handle->hwip)) 203 return false; 204 205 if (!bank_ops->aca_bank_is_valid) 206 return true; 207 208 return bank_ops->aca_bank_is_valid(handle, bank, type, handle->data); 209 } 210 211 static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_bank_info *info) 212 { 213 struct aca_bank_error *bank_error; 214 215 bank_error = kvzalloc(sizeof(*bank_error), GFP_KERNEL); 216 if (!bank_error) 217 return NULL; 218 219 INIT_LIST_HEAD(&bank_error->node); 220 memcpy(&bank_error->info, info, sizeof(*info)); 221 222 mutex_lock(&aerr->lock); 223 list_add_tail(&bank_error->node, &aerr->list); 224 mutex_unlock(&aerr->lock); 225 226 return bank_error; 227 } 228 229 static struct aca_bank_error *find_bank_error(struct aca_error *aerr, struct aca_bank_info *info) 230 { 231 struct aca_bank_error *bank_error = NULL; 232 struct aca_bank_info *tmp_info; 233 bool found = false; 234 235 mutex_lock(&aerr->lock); 236 list_for_each_entry(bank_error, &aerr->list, node) { 237 tmp_info = &bank_error->info; 238 if (tmp_info->socket_id == info->socket_id && 239 tmp_info->die_id == info->die_id) { 240 found = true; 241 goto out_unlock; 242 } 243 } 244 245 out_unlock: 246 mutex_unlock(&aerr->lock); 247 248 return found ? bank_error : NULL; 249 } 250 251 static void aca_bank_error_remove(struct aca_error *aerr, struct aca_bank_error *bank_error) 252 { 253 if (!aerr || !bank_error) 254 return; 255 256 list_del(&bank_error->node); 257 aerr->nr_errors--; 258 259 kvfree(bank_error); 260 } 261 262 static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_bank_info *info) 263 { 264 struct aca_bank_error *bank_error; 265 266 if (!aerr || !info) 267 return NULL; 268 269 bank_error = find_bank_error(aerr, info); 270 if (bank_error) 271 return bank_error; 272 273 return new_bank_error(aerr, info); 274 } 275 276 static int aca_log_errors(struct aca_handle *handle, enum aca_error_type type, 277 struct aca_bank_report *report) 278 { 279 struct aca_error_cache *error_cache = &handle->error_cache; 280 struct aca_bank_error *bank_error; 281 struct aca_error *aerr; 282 283 if (!handle || !report) 284 return -EINVAL; 285 286 if (!report->count[type]) 287 return 0; 288 289 aerr = &error_cache->errors[type]; 290 bank_error = get_bank_error(aerr, &report->info); 291 if (!bank_error) 292 return -ENOMEM; 293 294 bank_error->count[type] += report->count[type]; 295 296 return 0; 297 } 298 299 static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank *bank, 300 enum aca_error_type type, struct aca_bank_report *report) 301 { 302 const struct aca_bank_ops *bank_ops = handle->bank_ops; 303 304 if (!bank || !report) 305 return -EINVAL; 306 307 if (!bank_ops->aca_bank_generate_report) 308 return -EOPNOTSUPP; 309 310 memset(report, 0, sizeof(*report)); 311 return bank_ops->aca_bank_generate_report(handle, bank, type, 312 report, handle->data); 313 } 314 315 static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank, 316 enum aca_error_type type, void *data) 317 { 318 struct aca_bank_report report; 319 int ret; 320 321 ret = aca_generate_bank_report(handle, bank, type, &report); 322 if (ret) 323 return ret; 324 325 if (!report.count[type]) 326 return 0; 327 328 ret = aca_log_errors(handle, type, &report); 329 if (ret) 330 return ret; 331 332 return 0; 333 } 334 335 static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank, 336 enum aca_error_type type, bank_handler_t handler, void *data) 337 { 338 struct aca_handle *handle; 339 int ret; 340 341 if (list_empty(&mgr->list)) 342 return 0; 343 344 list_for_each_entry(handle, &mgr->list, node) { 345 if (!aca_bank_is_valid(handle, bank, type)) 346 continue; 347 348 ret = handler(handle, bank, type, data); 349 if (ret) 350 return ret; 351 } 352 353 return 0; 354 } 355 356 static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks, 357 enum aca_error_type type, bank_handler_t handler, void *data) 358 { 359 struct aca_bank_node *node; 360 struct aca_bank *bank; 361 int ret; 362 363 if (!mgr || !banks) 364 return -EINVAL; 365 366 /* pre check to avoid unnecessary operations */ 367 if (list_empty(&mgr->list) || list_empty(&banks->list)) 368 return 0; 369 370 list_for_each_entry(node, &banks->list, node) { 371 bank = &node->bank; 372 373 ret = aca_dispatch_bank(mgr, bank, type, handler, data); 374 if (ret) 375 return ret; 376 } 377 378 return 0; 379 } 380 381 static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type, 382 bank_handler_t handler, void *data) 383 { 384 struct amdgpu_aca *aca = &adev->aca; 385 struct aca_banks banks; 386 u32 count = 0; 387 int ret; 388 389 if (list_empty(&aca->mgr.list)) 390 return 0; 391 392 /* NOTE: pmfw is only support UE and CE */ 393 if (type == ACA_ERROR_TYPE_DEFERRED) 394 type = ACA_ERROR_TYPE_CE; 395 396 ret = aca_smu_get_valid_aca_count(adev, type, &count); 397 if (ret) 398 return ret; 399 400 if (!count) 401 return 0; 402 403 aca_banks_init(&banks); 404 405 ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks); 406 if (ret) 407 goto err_release_banks; 408 409 if (list_empty(&banks.list)) { 410 ret = 0; 411 goto err_release_banks; 412 } 413 414 ret = aca_dispatch_banks(&aca->mgr, &banks, type, 415 handler, data); 416 if (ret) 417 goto err_release_banks; 418 419 err_release_banks: 420 aca_banks_release(&banks); 421 422 return ret; 423 } 424 425 static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_error_type type, struct ras_err_data *err_data) 426 { 427 struct aca_bank_info *info; 428 struct amdgpu_smuio_mcm_config_info mcm_info; 429 u64 count; 430 431 if (type >= ACA_ERROR_TYPE_COUNT) 432 return -EINVAL; 433 434 count = bank_error->count[type]; 435 if (!count) 436 return 0; 437 438 info = &bank_error->info; 439 mcm_info.die_id = info->die_id; 440 mcm_info.socket_id = info->socket_id; 441 442 switch (type) { 443 case ACA_ERROR_TYPE_UE: 444 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, count); 445 break; 446 case ACA_ERROR_TYPE_CE: 447 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count); 448 break; 449 case ACA_ERROR_TYPE_DEFERRED: 450 default: 451 break; 452 } 453 454 return 0; 455 } 456 457 static int aca_log_aca_error(struct aca_handle *handle, enum aca_error_type type, struct ras_err_data *err_data) 458 { 459 struct aca_error_cache *error_cache = &handle->error_cache; 460 struct aca_error *aerr = &error_cache->errors[type]; 461 struct aca_bank_error *bank_error, *tmp; 462 463 mutex_lock(&aerr->lock); 464 465 if (list_empty(&aerr->list)) 466 goto out_unlock; 467 468 list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) { 469 aca_log_aca_error_data(bank_error, type, err_data); 470 aca_bank_error_remove(aerr, bank_error); 471 } 472 473 out_unlock: 474 mutex_unlock(&aerr->lock); 475 476 return 0; 477 } 478 479 static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type, 480 struct ras_err_data *err_data) 481 { 482 int ret; 483 484 /* udpate aca bank to aca source error_cache first */ 485 ret = aca_banks_update(adev, type, handler_aca_log_bank_error, NULL); 486 if (ret) 487 return ret; 488 489 return aca_log_aca_error(handle, type, err_data); 490 } 491 492 static bool aca_handle_is_valid(struct aca_handle *handle) 493 { 494 if (!handle->mask || !list_empty(&handle->node)) 495 return false; 496 497 return true; 498 } 499 500 int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, 501 enum aca_error_type type, void *data) 502 { 503 struct ras_err_data *err_data = (struct ras_err_data *)data; 504 505 if (!handle || !err_data) 506 return -EINVAL; 507 508 if (aca_handle_is_valid(handle)) 509 return -EOPNOTSUPP; 510 511 if (!(BIT(type) & handle->mask)) 512 return 0; 513 514 return __aca_get_error_data(adev, handle, type, err_data); 515 } 516 517 static void aca_error_init(struct aca_error *aerr, enum aca_error_type type) 518 { 519 mutex_init(&aerr->lock); 520 INIT_LIST_HEAD(&aerr->list); 521 aerr->type = type; 522 aerr->nr_errors = 0; 523 } 524 525 static void aca_init_error_cache(struct aca_handle *handle) 526 { 527 struct aca_error_cache *error_cache = &handle->error_cache; 528 int type; 529 530 for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++) 531 aca_error_init(&error_cache->errors[type], type); 532 } 533 534 static void aca_error_fini(struct aca_error *aerr) 535 { 536 struct aca_bank_error *bank_error, *tmp; 537 538 mutex_lock(&aerr->lock); 539 list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) 540 aca_bank_error_remove(aerr, bank_error); 541 542 mutex_destroy(&aerr->lock); 543 } 544 545 static void aca_fini_error_cache(struct aca_handle *handle) 546 { 547 struct aca_error_cache *error_cache = &handle->error_cache; 548 int type; 549 550 for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++) 551 aca_error_fini(&error_cache->errors[type]); 552 } 553 554 static int add_aca_handle(struct amdgpu_device *adev, struct aca_handle_manager *mgr, struct aca_handle *handle, 555 const char *name, const struct aca_info *ras_info, void *data) 556 { 557 memset(handle, 0, sizeof(*handle)); 558 559 handle->adev = adev; 560 handle->mgr = mgr; 561 handle->name = name; 562 handle->hwip = ras_info->hwip; 563 handle->mask = ras_info->mask; 564 handle->bank_ops = ras_info->bank_ops; 565 handle->data = data; 566 aca_init_error_cache(handle); 567 568 INIT_LIST_HEAD(&handle->node); 569 list_add_tail(&handle->node, &mgr->list); 570 mgr->nr_handles++; 571 572 return 0; 573 } 574 575 static ssize_t aca_sysfs_read(struct device *dev, 576 struct device_attribute *attr, char *buf) 577 { 578 struct aca_handle *handle = container_of(attr, struct aca_handle, aca_attr); 579 580 /* NOTE: the aca cache will be auto cleared once read, 581 * So the driver should unify the query entry point, forward request to ras query interface directly */ 582 return amdgpu_ras_aca_sysfs_read(dev, attr, handle, buf, handle->data); 583 } 584 585 static int add_aca_sysfs(struct amdgpu_device *adev, struct aca_handle *handle) 586 { 587 struct device_attribute *aca_attr = &handle->aca_attr; 588 589 snprintf(handle->attr_name, sizeof(handle->attr_name) - 1, "aca_%s", handle->name); 590 aca_attr->show = aca_sysfs_read; 591 aca_attr->attr.name = handle->attr_name; 592 aca_attr->attr.mode = S_IRUGO; 593 sysfs_attr_init(&aca_attr->attr); 594 595 return sysfs_add_file_to_group(&adev->dev->kobj, 596 &aca_attr->attr, 597 "ras"); 598 } 599 600 int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle, 601 const char *name, const struct aca_info *ras_info, void *data) 602 { 603 struct amdgpu_aca *aca = &adev->aca; 604 int ret; 605 606 if (!amdgpu_aca_is_enabled(adev)) 607 return 0; 608 609 ret = add_aca_handle(adev, &aca->mgr, handle, name, ras_info, data); 610 if (ret) 611 return ret; 612 613 return add_aca_sysfs(adev, handle); 614 } 615 616 static void remove_aca_handle(struct aca_handle *handle) 617 { 618 struct aca_handle_manager *mgr = handle->mgr; 619 620 aca_fini_error_cache(handle); 621 list_del(&handle->node); 622 mgr->nr_handles--; 623 } 624 625 static void remove_aca_sysfs(struct aca_handle *handle) 626 { 627 struct amdgpu_device *adev = handle->adev; 628 struct device_attribute *aca_attr = &handle->aca_attr; 629 630 if (adev->dev->kobj.sd) 631 sysfs_remove_file_from_group(&adev->dev->kobj, 632 &aca_attr->attr, 633 "ras"); 634 } 635 636 void amdgpu_aca_remove_handle(struct aca_handle *handle) 637 { 638 if (!handle || list_empty(&handle->node)) 639 return; 640 641 remove_aca_sysfs(handle); 642 remove_aca_handle(handle); 643 } 644 645 static int aca_manager_init(struct aca_handle_manager *mgr) 646 { 647 INIT_LIST_HEAD(&mgr->list); 648 mgr->nr_handles = 0; 649 650 return 0; 651 } 652 653 static void aca_manager_fini(struct aca_handle_manager *mgr) 654 { 655 struct aca_handle *handle, *tmp; 656 657 list_for_each_entry_safe(handle, tmp, &mgr->list, node) 658 amdgpu_aca_remove_handle(handle); 659 } 660 661 bool amdgpu_aca_is_enabled(struct amdgpu_device *adev) 662 { 663 return adev->aca.is_enabled; 664 } 665 666 int amdgpu_aca_init(struct amdgpu_device *adev) 667 { 668 struct amdgpu_aca *aca = &adev->aca; 669 int ret; 670 671 ret = aca_manager_init(&aca->mgr); 672 if (ret) 673 return ret; 674 675 return 0; 676 } 677 678 void amdgpu_aca_fini(struct amdgpu_device *adev) 679 { 680 struct amdgpu_aca *aca = &adev->aca; 681 682 aca_manager_fini(&aca->mgr); 683 } 684 685 int amdgpu_aca_reset(struct amdgpu_device *adev) 686 { 687 amdgpu_aca_fini(adev); 688 689 return amdgpu_aca_init(adev); 690 } 691 692 void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs) 693 { 694 struct amdgpu_aca *aca = &adev->aca; 695 696 WARN_ON(aca->smu_funcs); 697 aca->smu_funcs = smu_funcs; 698 } 699 700 int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info) 701 { 702 u64 ipid; 703 u32 instidhi, instidlo; 704 705 if (!bank || !info) 706 return -EINVAL; 707 708 ipid = bank->regs[ACA_REG_IDX_IPID]; 709 info->hwid = ACA_REG__IPID__HARDWAREID(ipid); 710 info->mcatype = ACA_REG__IPID__MCATYPE(ipid); 711 /* 712 * Unfied DieID Format: SAASS. A:AID, S:Socket. 713 * Unfied DieID[4:4] = InstanceId[0:0] 714 * Unfied DieID[0:3] = InstanceIdHi[0:3] 715 */ 716 instidhi = ACA_REG__IPID__INSTANCEIDHI(ipid); 717 instidlo = ACA_REG__IPID__INSTANCEIDLO(ipid); 718 info->die_id = ((instidhi >> 2) & 0x03); 719 info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03); 720 721 return 0; 722 } 723 724 static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank) 725 { 726 int error_code; 727 728 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 729 case IP_VERSION(13, 0, 6): 730 if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) { 731 error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); 732 return error_code & 0xff; 733 } 734 break; 735 default: 736 break; 737 } 738 739 /* NOTE: the true error code is encoded in status.errorcode[0:7] */ 740 error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); 741 742 return error_code & 0xff; 743 } 744 745 int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size) 746 { 747 int i, error_code; 748 749 if (!bank || !err_codes) 750 return -EINVAL; 751 752 error_code = aca_bank_get_error_code(adev, bank); 753 for (i = 0; i < size; i++) { 754 if (err_codes[i] == error_code) 755 return 0; 756 } 757 758 return -EINVAL; 759 } 760 761 int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en) 762 { 763 struct amdgpu_aca *aca = &adev->aca; 764 const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; 765 766 if (!smu_funcs || !smu_funcs->set_debug_mode) 767 return -EOPNOTSUPP; 768 769 return smu_funcs->set_debug_mode(adev, en); 770 } 771 772 #if defined(CONFIG_DEBUG_FS) 773 static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val) 774 { 775 struct amdgpu_device *adev = (struct amdgpu_device *)data; 776 int ret; 777 778 ret = amdgpu_ras_set_aca_debug_mode(adev, val ? true : false); 779 if (ret) 780 return ret; 781 782 dev_info(adev->dev, "amdgpu set smu aca debug mode %s success\n", val ? "on" : "off"); 783 784 return 0; 785 } 786 787 static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_error_type type, int idx) 788 { 789 struct aca_bank_info info; 790 int i, ret; 791 792 ret = aca_bank_info_decode(bank, &info); 793 if (ret) 794 return; 795 796 seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_ERROR_TYPE_UE ? "UE" : "CE"); 797 seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n", 798 idx, info.socket_id, info.die_id, info.hwid, info.mcatype); 799 800 for (i = 0; i < ARRAY_SIZE(aca_regs); i++) 801 seq_printf(m, "aca entry[%d].regs[%d]: 0x%016llx\n", idx, aca_regs[i].reg_idx, bank->regs[aca_regs[i].reg_idx]); 802 } 803 804 struct aca_dump_context { 805 struct seq_file *m; 806 int idx; 807 }; 808 809 static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank, 810 enum aca_error_type type, void *data) 811 { 812 struct aca_dump_context *ctx = (struct aca_dump_context *)data; 813 814 aca_dump_entry(ctx->m, bank, type, ctx->idx++); 815 816 return handler_aca_log_bank_error(handle, bank, type, NULL); 817 } 818 819 static int aca_dump_show(struct seq_file *m, enum aca_error_type type) 820 { 821 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 822 struct aca_dump_context context = { 823 .m = m, 824 .idx = 0, 825 }; 826 827 return aca_banks_update(adev, type, handler_aca_bank_dump, (void *)&context); 828 } 829 830 static int aca_dump_ce_show(struct seq_file *m, void *unused) 831 { 832 return aca_dump_show(m, ACA_ERROR_TYPE_CE); 833 } 834 835 static int aca_dump_ce_open(struct inode *inode, struct file *file) 836 { 837 return single_open(file, aca_dump_ce_show, inode->i_private); 838 } 839 840 static const struct file_operations aca_ce_dump_debug_fops = { 841 .owner = THIS_MODULE, 842 .open = aca_dump_ce_open, 843 .read = seq_read, 844 .llseek = seq_lseek, 845 .release = single_release, 846 }; 847 848 static int aca_dump_ue_show(struct seq_file *m, void *unused) 849 { 850 return aca_dump_show(m, ACA_ERROR_TYPE_UE); 851 } 852 853 static int aca_dump_ue_open(struct inode *inode, struct file *file) 854 { 855 return single_open(file, aca_dump_ue_show, inode->i_private); 856 } 857 858 static const struct file_operations aca_ue_dump_debug_fops = { 859 .owner = THIS_MODULE, 860 .open = aca_dump_ue_open, 861 .read = seq_read, 862 .llseek = seq_lseek, 863 .release = single_release, 864 }; 865 866 DEFINE_DEBUGFS_ATTRIBUTE(aca_debug_mode_fops, NULL, amdgpu_aca_smu_debug_mode_set, "%llu\n"); 867 #endif 868 869 void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root) 870 { 871 #if defined(CONFIG_DEBUG_FS) 872 if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6)) 873 return; 874 875 debugfs_create_file("aca_debug_mode", 0200, root, adev, &aca_debug_mode_fops); 876 debugfs_create_file("aca_ue_dump", 0400, root, adev, &aca_ue_dump_debug_fops); 877 debugfs_create_file("aca_ce_dump", 0400, root, adev, &aca_ce_dump_debug_fops); 878 #endif 879 } 880