1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/debugfs.h> 25 #include <linux/list.h> 26 #include <linux/module.h> 27 #include <linux/uaccess.h> 28 #include <linux/reboot.h> 29 #include <linux/syscalls.h> 30 #include <linux/pm_runtime.h> 31 32 #include "amdgpu.h" 33 #include "amdgpu_ras.h" 34 #include "amdgpu_atomfirmware.h" 35 #include "amdgpu_xgmi.h" 36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 37 #include "nbio_v4_3.h" 38 #include "atom.h" 39 #include "amdgpu_reset.h" 40 41 #ifdef CONFIG_X86_MCE_AMD 42 #include <asm/mce.h> 43 44 static bool notifier_registered; 45 #endif 46 static const char *RAS_FS_NAME = "ras"; 47 48 const char *ras_error_string[] = { 49 "none", 50 "parity", 51 "single_correctable", 52 "multi_uncorrectable", 53 "poison", 54 }; 55 56 const char *ras_block_string[] = { 57 "umc", 58 "sdma", 59 "gfx", 60 "mmhub", 61 "athub", 62 "pcie_bif", 63 "hdp", 64 "xgmi_wafl", 65 "df", 66 "smn", 67 "sem", 68 "mp0", 69 "mp1", 70 "fuse", 71 "mca", 72 "vcn", 73 "jpeg", 74 }; 75 76 const char *ras_mca_block_string[] = { 77 "mca_mp0", 78 "mca_mp1", 79 "mca_mpio", 80 "mca_iohc", 81 }; 82 83 struct amdgpu_ras_block_list { 84 /* ras block link */ 85 struct list_head node; 86 87 struct amdgpu_ras_block_object *ras_obj; 88 }; 89 90 const char *get_ras_block_str(struct ras_common_if *ras_block) 91 { 92 if (!ras_block) 93 return "NULL"; 94 95 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT) 96 return "OUT OF RANGE"; 97 98 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) 99 return ras_mca_block_string[ras_block->sub_block_index]; 100 101 return ras_block_string[ras_block->block]; 102 } 103 104 #define ras_block_str(_BLOCK_) \ 105 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range") 106 107 #define ras_err_str(i) (ras_error_string[ffs(i)]) 108 109 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) 110 111 /* inject address is 52 bits */ 112 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) 113 114 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ 115 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) 116 117 enum amdgpu_ras_retire_page_reservation { 118 AMDGPU_RAS_RETIRE_PAGE_RESERVED, 119 AMDGPU_RAS_RETIRE_PAGE_PENDING, 120 AMDGPU_RAS_RETIRE_PAGE_FAULT, 121 }; 122 123 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); 124 125 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 126 uint64_t addr); 127 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 128 uint64_t addr); 129 #ifdef CONFIG_X86_MCE_AMD 130 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); 131 struct mce_notifier_adev_list { 132 struct amdgpu_device *devs[MAX_GPU_INSTANCE]; 133 int num_gpu; 134 }; 135 static struct mce_notifier_adev_list mce_adev_list; 136 #endif 137 138 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) 139 { 140 if (adev && amdgpu_ras_get_context(adev)) 141 amdgpu_ras_get_context(adev)->error_query_ready = ready; 142 } 143 144 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) 145 { 146 if (adev && amdgpu_ras_get_context(adev)) 147 return amdgpu_ras_get_context(adev)->error_query_ready; 148 149 return false; 150 } 151 152 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) 153 { 154 struct ras_err_data err_data = {0, 0, 0, NULL}; 155 struct eeprom_table_record err_rec; 156 157 if ((address >= adev->gmc.mc_vram_size) || 158 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 159 dev_warn(adev->dev, 160 "RAS WARN: input address 0x%llx is invalid.\n", 161 address); 162 return -EINVAL; 163 } 164 165 if (amdgpu_ras_check_bad_page(adev, address)) { 166 dev_warn(adev->dev, 167 "RAS WARN: 0x%llx has already been marked as bad page!\n", 168 address); 169 return 0; 170 } 171 172 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); 173 err_data.err_addr = &err_rec; 174 amdgpu_umc_fill_error_record(&err_data, address, 175 (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0); 176 177 if (amdgpu_bad_page_threshold != 0) { 178 amdgpu_ras_add_bad_pages(adev, err_data.err_addr, 179 err_data.err_addr_cnt); 180 amdgpu_ras_save_bad_pages(adev, NULL); 181 } 182 183 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n"); 184 dev_warn(adev->dev, "Clear EEPROM:\n"); 185 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n"); 186 187 return 0; 188 } 189 190 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, 191 size_t size, loff_t *pos) 192 { 193 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; 194 struct ras_query_if info = { 195 .head = obj->head, 196 }; 197 ssize_t s; 198 char val[128]; 199 200 if (amdgpu_ras_query_error_status(obj->adev, &info)) 201 return -EINVAL; 202 203 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ 204 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 205 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 206 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 207 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 208 } 209 210 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", 211 "ue", info.ue_count, 212 "ce", info.ce_count); 213 if (*pos >= s) 214 return 0; 215 216 s -= *pos; 217 s = min_t(u64, s, size); 218 219 220 if (copy_to_user(buf, &val[*pos], s)) 221 return -EINVAL; 222 223 *pos += s; 224 225 return s; 226 } 227 228 static const struct file_operations amdgpu_ras_debugfs_ops = { 229 .owner = THIS_MODULE, 230 .read = amdgpu_ras_debugfs_read, 231 .write = NULL, 232 .llseek = default_llseek 233 }; 234 235 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) 236 { 237 int i; 238 239 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { 240 *block_id = i; 241 if (strcmp(name, ras_block_string[i]) == 0) 242 return 0; 243 } 244 return -EINVAL; 245 } 246 247 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, 248 const char __user *buf, size_t size, 249 loff_t *pos, struct ras_debug_if *data) 250 { 251 ssize_t s = min_t(u64, 64, size); 252 char str[65]; 253 char block_name[33]; 254 char err[9] = "ue"; 255 int op = -1; 256 int block_id; 257 uint32_t sub_block; 258 u64 address, value; 259 260 if (*pos) 261 return -EINVAL; 262 *pos = size; 263 264 memset(str, 0, sizeof(str)); 265 memset(data, 0, sizeof(*data)); 266 267 if (copy_from_user(str, buf, s)) 268 return -EINVAL; 269 270 if (sscanf(str, "disable %32s", block_name) == 1) 271 op = 0; 272 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2) 273 op = 1; 274 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) 275 op = 2; 276 else if (strstr(str, "retire_page") != NULL) 277 op = 3; 278 else if (str[0] && str[1] && str[2] && str[3]) 279 /* ascii string, but commands are not matched. */ 280 return -EINVAL; 281 282 if (op != -1) { 283 if (op == 3) { 284 if (sscanf(str, "%*s 0x%llx", &address) != 1 && 285 sscanf(str, "%*s %llu", &address) != 1) 286 return -EINVAL; 287 288 data->op = op; 289 data->inject.address = address; 290 291 return 0; 292 } 293 294 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) 295 return -EINVAL; 296 297 data->head.block = block_id; 298 /* only ue and ce errors are supported */ 299 if (!memcmp("ue", err, 2)) 300 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 301 else if (!memcmp("ce", err, 2)) 302 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; 303 else 304 return -EINVAL; 305 306 data->op = op; 307 308 if (op == 2) { 309 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", 310 &sub_block, &address, &value) != 3 && 311 sscanf(str, "%*s %*s %*s %u %llu %llu", 312 &sub_block, &address, &value) != 3) 313 return -EINVAL; 314 data->head.sub_block_index = sub_block; 315 data->inject.address = address; 316 data->inject.value = value; 317 } 318 } else { 319 if (size < sizeof(*data)) 320 return -EINVAL; 321 322 if (copy_from_user(data, buf, sizeof(*data))) 323 return -EINVAL; 324 } 325 326 return 0; 327 } 328 329 /** 330 * DOC: AMDGPU RAS debugfs control interface 331 * 332 * The control interface accepts struct ras_debug_if which has two members. 333 * 334 * First member: ras_debug_if::head or ras_debug_if::inject. 335 * 336 * head is used to indicate which IP block will be under control. 337 * 338 * head has four members, they are block, type, sub_block_index, name. 339 * block: which IP will be under control. 340 * type: what kind of error will be enabled/disabled/injected. 341 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. 342 * name: the name of IP. 343 * 344 * inject has two more members than head, they are address, value. 345 * As their names indicate, inject operation will write the 346 * value to the address. 347 * 348 * The second member: struct ras_debug_if::op. 349 * It has three kinds of operations. 350 * 351 * - 0: disable RAS on the block. Take ::head as its data. 352 * - 1: enable RAS on the block. Take ::head as its data. 353 * - 2: inject errors on the block. Take ::inject as its data. 354 * 355 * How to use the interface? 356 * 357 * In a program 358 * 359 * Copy the struct ras_debug_if in your code and initialize it. 360 * Write the struct to the control interface. 361 * 362 * From shell 363 * 364 * .. code-block:: bash 365 * 366 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 367 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 368 * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 369 * 370 * Where N, is the card which you want to affect. 371 * 372 * "disable" requires only the block. 373 * "enable" requires the block and error type. 374 * "inject" requires the block, error type, address, and value. 375 * 376 * The block is one of: umc, sdma, gfx, etc. 377 * see ras_block_string[] for details 378 * 379 * The error type is one of: ue, ce, where, 380 * ue is multi-uncorrectable 381 * ce is single-correctable 382 * 383 * The sub-block is a the sub-block index, pass 0 if there is no sub-block. 384 * The address and value are hexadecimal numbers, leading 0x is optional. 385 * 386 * For instance, 387 * 388 * .. code-block:: bash 389 * 390 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 391 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 392 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl 393 * 394 * How to check the result of the operation? 395 * 396 * To check disable/enable, see "ras" features at, 397 * /sys/class/drm/card[0/1/2...]/device/ras/features 398 * 399 * To check inject, see the corresponding error count at, 400 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count 401 * 402 * .. note:: 403 * Operations are only allowed on blocks which are supported. 404 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask 405 * to see which blocks support RAS on a particular asic. 406 * 407 */ 408 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, 409 const char __user *buf, 410 size_t size, loff_t *pos) 411 { 412 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 413 struct ras_debug_if data; 414 int ret = 0; 415 416 if (!amdgpu_ras_get_error_query_ready(adev)) { 417 dev_warn(adev->dev, "RAS WARN: error injection " 418 "currently inaccessible\n"); 419 return size; 420 } 421 422 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); 423 if (ret) 424 return ret; 425 426 if (data.op == 3) { 427 ret = amdgpu_reserve_page_direct(adev, data.inject.address); 428 if (!ret) 429 return size; 430 else 431 return ret; 432 } 433 434 if (!amdgpu_ras_is_supported(adev, data.head.block)) 435 return -EINVAL; 436 437 switch (data.op) { 438 case 0: 439 ret = amdgpu_ras_feature_enable(adev, &data.head, 0); 440 break; 441 case 1: 442 ret = amdgpu_ras_feature_enable(adev, &data.head, 1); 443 break; 444 case 2: 445 if ((data.inject.address >= adev->gmc.mc_vram_size) || 446 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 447 dev_warn(adev->dev, "RAS WARN: input address " 448 "0x%llx is invalid.", 449 data.inject.address); 450 ret = -EINVAL; 451 break; 452 } 453 454 /* umc ce/ue error injection for a bad page is not allowed */ 455 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && 456 amdgpu_ras_check_bad_page(adev, data.inject.address)) { 457 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has " 458 "already been marked as bad!\n", 459 data.inject.address); 460 break; 461 } 462 463 /* data.inject.address is offset instead of absolute gpu address */ 464 ret = amdgpu_ras_error_inject(adev, &data.inject); 465 break; 466 default: 467 ret = -EINVAL; 468 break; 469 } 470 471 if (ret) 472 return ret; 473 474 return size; 475 } 476 477 /** 478 * DOC: AMDGPU RAS debugfs EEPROM table reset interface 479 * 480 * Some boards contain an EEPROM which is used to persistently store a list of 481 * bad pages which experiences ECC errors in vram. This interface provides 482 * a way to reset the EEPROM, e.g., after testing error injection. 483 * 484 * Usage: 485 * 486 * .. code-block:: bash 487 * 488 * echo 1 > ../ras/ras_eeprom_reset 489 * 490 * will reset EEPROM table to 0 entries. 491 * 492 */ 493 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, 494 const char __user *buf, 495 size_t size, loff_t *pos) 496 { 497 struct amdgpu_device *adev = 498 (struct amdgpu_device *)file_inode(f)->i_private; 499 int ret; 500 501 ret = amdgpu_ras_eeprom_reset_table( 502 &(amdgpu_ras_get_context(adev)->eeprom_control)); 503 504 if (!ret) { 505 /* Something was written to EEPROM. 506 */ 507 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; 508 return size; 509 } else { 510 return ret; 511 } 512 } 513 514 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { 515 .owner = THIS_MODULE, 516 .read = NULL, 517 .write = amdgpu_ras_debugfs_ctrl_write, 518 .llseek = default_llseek 519 }; 520 521 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { 522 .owner = THIS_MODULE, 523 .read = NULL, 524 .write = amdgpu_ras_debugfs_eeprom_write, 525 .llseek = default_llseek 526 }; 527 528 /** 529 * DOC: AMDGPU RAS sysfs Error Count Interface 530 * 531 * It allows the user to read the error count for each IP block on the gpu through 532 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count 533 * 534 * It outputs the multiple lines which report the uncorrected (ue) and corrected 535 * (ce) error counts. 536 * 537 * The format of one line is below, 538 * 539 * [ce|ue]: count 540 * 541 * Example: 542 * 543 * .. code-block:: bash 544 * 545 * ue: 0 546 * ce: 1 547 * 548 */ 549 static ssize_t amdgpu_ras_sysfs_read(struct device *dev, 550 struct device_attribute *attr, char *buf) 551 { 552 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); 553 struct ras_query_if info = { 554 .head = obj->head, 555 }; 556 557 if (!amdgpu_ras_get_error_query_ready(obj->adev)) 558 return sysfs_emit(buf, "Query currently inaccessible\n"); 559 560 if (amdgpu_ras_query_error_status(obj->adev, &info)) 561 return -EINVAL; 562 563 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 564 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 565 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 566 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 567 } 568 569 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, 570 "ce", info.ce_count); 571 } 572 573 /* obj begin */ 574 575 #define get_obj(obj) do { (obj)->use++; } while (0) 576 #define alive_obj(obj) ((obj)->use) 577 578 static inline void put_obj(struct ras_manager *obj) 579 { 580 if (obj && (--obj->use == 0)) 581 list_del(&obj->node); 582 if (obj && (obj->use < 0)) 583 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); 584 } 585 586 /* make one obj and return it. */ 587 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, 588 struct ras_common_if *head) 589 { 590 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 591 struct ras_manager *obj; 592 593 if (!adev->ras_enabled || !con) 594 return NULL; 595 596 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 597 return NULL; 598 599 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 600 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 601 return NULL; 602 603 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 604 } else 605 obj = &con->objs[head->block]; 606 607 /* already exist. return obj? */ 608 if (alive_obj(obj)) 609 return NULL; 610 611 obj->head = *head; 612 obj->adev = adev; 613 list_add(&obj->node, &con->head); 614 get_obj(obj); 615 616 return obj; 617 } 618 619 /* return an obj equal to head, or the first when head is NULL */ 620 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, 621 struct ras_common_if *head) 622 { 623 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 624 struct ras_manager *obj; 625 int i; 626 627 if (!adev->ras_enabled || !con) 628 return NULL; 629 630 if (head) { 631 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 632 return NULL; 633 634 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 635 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 636 return NULL; 637 638 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 639 } else 640 obj = &con->objs[head->block]; 641 642 if (alive_obj(obj)) 643 return obj; 644 } else { 645 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 646 obj = &con->objs[i]; 647 if (alive_obj(obj)) 648 return obj; 649 } 650 } 651 652 return NULL; 653 } 654 /* obj end */ 655 656 /* feature ctl begin */ 657 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, 658 struct ras_common_if *head) 659 { 660 return adev->ras_hw_enabled & BIT(head->block); 661 } 662 663 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, 664 struct ras_common_if *head) 665 { 666 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 667 668 return con->features & BIT(head->block); 669 } 670 671 /* 672 * if obj is not created, then create one. 673 * set feature enable flag. 674 */ 675 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, 676 struct ras_common_if *head, int enable) 677 { 678 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 679 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 680 681 /* If hardware does not support ras, then do not create obj. 682 * But if hardware support ras, we can create the obj. 683 * Ras framework checks con->hw_supported to see if it need do 684 * corresponding initialization. 685 * IP checks con->support to see if it need disable ras. 686 */ 687 if (!amdgpu_ras_is_feature_allowed(adev, head)) 688 return 0; 689 690 if (enable) { 691 if (!obj) { 692 obj = amdgpu_ras_create_obj(adev, head); 693 if (!obj) 694 return -EINVAL; 695 } else { 696 /* In case we create obj somewhere else */ 697 get_obj(obj); 698 } 699 con->features |= BIT(head->block); 700 } else { 701 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { 702 con->features &= ~BIT(head->block); 703 put_obj(obj); 704 } 705 } 706 707 return 0; 708 } 709 710 static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev, 711 struct ras_common_if *head) 712 { 713 if (amdgpu_ras_is_feature_allowed(adev, head) || 714 amdgpu_ras_is_poison_mode_supported(adev)) 715 return 1; 716 else 717 return 0; 718 } 719 720 /* wrapper of psp_ras_enable_features */ 721 int amdgpu_ras_feature_enable(struct amdgpu_device *adev, 722 struct ras_common_if *head, bool enable) 723 { 724 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 725 union ta_ras_cmd_input *info; 726 int ret = 0; 727 728 if (!con) 729 return -EINVAL; 730 731 if (head->block == AMDGPU_RAS_BLOCK__GFX) { 732 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); 733 if (!info) 734 return -ENOMEM; 735 736 if (!enable) { 737 info->disable_features = (struct ta_ras_disable_features_input) { 738 .block_id = amdgpu_ras_block_to_ta(head->block), 739 .error_type = amdgpu_ras_error_to_ta(head->type), 740 }; 741 } else { 742 info->enable_features = (struct ta_ras_enable_features_input) { 743 .block_id = amdgpu_ras_block_to_ta(head->block), 744 .error_type = amdgpu_ras_error_to_ta(head->type), 745 }; 746 } 747 } 748 749 /* Do not enable if it is not allowed. */ 750 if (enable && !amdgpu_ras_check_feature_allowed(adev, head)) 751 goto out; 752 753 /* Only enable ras feature operation handle on host side */ 754 if (head->block == AMDGPU_RAS_BLOCK__GFX && 755 !amdgpu_sriov_vf(adev) && 756 !amdgpu_ras_intr_triggered()) { 757 ret = psp_ras_enable_features(&adev->psp, info, enable); 758 if (ret) { 759 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n", 760 enable ? "enable":"disable", 761 get_ras_block_str(head), 762 amdgpu_ras_is_poison_mode_supported(adev), ret); 763 goto out; 764 } 765 } 766 767 /* setup the obj */ 768 __amdgpu_ras_feature_enable(adev, head, enable); 769 out: 770 if (head->block == AMDGPU_RAS_BLOCK__GFX) 771 kfree(info); 772 return ret; 773 } 774 775 /* Only used in device probe stage and called only once. */ 776 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, 777 struct ras_common_if *head, bool enable) 778 { 779 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 780 int ret; 781 782 if (!con) 783 return -EINVAL; 784 785 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 786 if (enable) { 787 /* There is no harm to issue a ras TA cmd regardless of 788 * the currecnt ras state. 789 * If current state == target state, it will do nothing 790 * But sometimes it requests driver to reset and repost 791 * with error code -EAGAIN. 792 */ 793 ret = amdgpu_ras_feature_enable(adev, head, 1); 794 /* With old ras TA, we might fail to enable ras. 795 * Log it and just setup the object. 796 * TODO need remove this WA in the future. 797 */ 798 if (ret == -EINVAL) { 799 ret = __amdgpu_ras_feature_enable(adev, head, 1); 800 if (!ret) 801 dev_info(adev->dev, 802 "RAS INFO: %s setup object\n", 803 get_ras_block_str(head)); 804 } 805 } else { 806 /* setup the object then issue a ras TA disable cmd.*/ 807 ret = __amdgpu_ras_feature_enable(adev, head, 1); 808 if (ret) 809 return ret; 810 811 /* gfx block ras dsiable cmd must send to ras-ta */ 812 if (head->block == AMDGPU_RAS_BLOCK__GFX) 813 con->features |= BIT(head->block); 814 815 ret = amdgpu_ras_feature_enable(adev, head, 0); 816 817 /* clean gfx block ras features flag */ 818 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) 819 con->features &= ~BIT(head->block); 820 } 821 } else 822 ret = amdgpu_ras_feature_enable(adev, head, enable); 823 824 return ret; 825 } 826 827 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, 828 bool bypass) 829 { 830 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 831 struct ras_manager *obj, *tmp; 832 833 list_for_each_entry_safe(obj, tmp, &con->head, node) { 834 /* bypass psp. 835 * aka just release the obj and corresponding flags 836 */ 837 if (bypass) { 838 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) 839 break; 840 } else { 841 if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) 842 break; 843 } 844 } 845 846 return con->features; 847 } 848 849 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, 850 bool bypass) 851 { 852 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 853 int i; 854 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; 855 856 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { 857 struct ras_common_if head = { 858 .block = i, 859 .type = default_ras_type, 860 .sub_block_index = 0, 861 }; 862 863 if (i == AMDGPU_RAS_BLOCK__MCA) 864 continue; 865 866 if (bypass) { 867 /* 868 * bypass psp. vbios enable ras for us. 869 * so just create the obj 870 */ 871 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 872 break; 873 } else { 874 if (amdgpu_ras_feature_enable(adev, &head, 1)) 875 break; 876 } 877 } 878 879 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 880 struct ras_common_if head = { 881 .block = AMDGPU_RAS_BLOCK__MCA, 882 .type = default_ras_type, 883 .sub_block_index = i, 884 }; 885 886 if (bypass) { 887 /* 888 * bypass psp. vbios enable ras for us. 889 * so just create the obj 890 */ 891 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 892 break; 893 } else { 894 if (amdgpu_ras_feature_enable(adev, &head, 1)) 895 break; 896 } 897 } 898 899 return con->features; 900 } 901 /* feature ctl end */ 902 903 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj, 904 enum amdgpu_ras_block block) 905 { 906 if (!block_obj) 907 return -EINVAL; 908 909 if (block_obj->ras_comm.block == block) 910 return 0; 911 912 return -EINVAL; 913 } 914 915 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev, 916 enum amdgpu_ras_block block, uint32_t sub_block_index) 917 { 918 struct amdgpu_ras_block_list *node, *tmp; 919 struct amdgpu_ras_block_object *obj; 920 921 if (block >= AMDGPU_RAS_BLOCK__LAST) 922 return NULL; 923 924 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 925 if (!node->ras_obj) { 926 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 927 continue; 928 } 929 930 obj = node->ras_obj; 931 if (obj->ras_block_match) { 932 if (obj->ras_block_match(obj, block, sub_block_index) == 0) 933 return obj; 934 } else { 935 if (amdgpu_ras_block_match_default(obj, block) == 0) 936 return obj; 937 } 938 } 939 940 return NULL; 941 } 942 943 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) 944 { 945 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 946 int ret = 0; 947 948 /* 949 * choosing right query method according to 950 * whether smu support query error information 951 */ 952 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); 953 if (ret == -EOPNOTSUPP) { 954 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 955 adev->umc.ras->ras_block.hw_ops->query_ras_error_count) 956 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); 957 958 /* umc query_ras_error_address is also responsible for clearing 959 * error status 960 */ 961 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 962 adev->umc.ras->ras_block.hw_ops->query_ras_error_address) 963 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data); 964 } else if (!ret) { 965 if (adev->umc.ras && 966 adev->umc.ras->ecc_info_query_ras_error_count) 967 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data); 968 969 if (adev->umc.ras && 970 adev->umc.ras->ecc_info_query_ras_error_address) 971 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data); 972 } 973 } 974 975 /* query/inject/cure begin */ 976 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, 977 struct ras_query_if *info) 978 { 979 struct amdgpu_ras_block_object *block_obj = NULL; 980 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 981 struct ras_err_data err_data = {0, 0, 0, NULL}; 982 983 if (!obj) 984 return -EINVAL; 985 986 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { 987 amdgpu_ras_get_ecc_info(adev, &err_data); 988 } else { 989 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); 990 if (!block_obj || !block_obj->hw_ops) { 991 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 992 get_ras_block_str(&info->head)); 993 return -EINVAL; 994 } 995 996 if (block_obj->hw_ops->query_ras_error_count) 997 block_obj->hw_ops->query_ras_error_count(adev, &err_data); 998 999 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || 1000 (info->head.block == AMDGPU_RAS_BLOCK__GFX) || 1001 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { 1002 if (block_obj->hw_ops->query_ras_error_status) 1003 block_obj->hw_ops->query_ras_error_status(adev); 1004 } 1005 } 1006 1007 obj->err_data.ue_count += err_data.ue_count; 1008 obj->err_data.ce_count += err_data.ce_count; 1009 1010 info->ue_count = obj->err_data.ue_count; 1011 info->ce_count = obj->err_data.ce_count; 1012 1013 if (err_data.ce_count) { 1014 if (adev->smuio.funcs && 1015 adev->smuio.funcs->get_socket_id && 1016 adev->smuio.funcs->get_die_id) { 1017 dev_info(adev->dev, "socket: %d, die: %d " 1018 "%ld correctable hardware errors " 1019 "detected in %s block, no user " 1020 "action is needed.\n", 1021 adev->smuio.funcs->get_socket_id(adev), 1022 adev->smuio.funcs->get_die_id(adev), 1023 obj->err_data.ce_count, 1024 get_ras_block_str(&info->head)); 1025 } else { 1026 dev_info(adev->dev, "%ld correctable hardware errors " 1027 "detected in %s block, no user " 1028 "action is needed.\n", 1029 obj->err_data.ce_count, 1030 get_ras_block_str(&info->head)); 1031 } 1032 } 1033 if (err_data.ue_count) { 1034 if (adev->smuio.funcs && 1035 adev->smuio.funcs->get_socket_id && 1036 adev->smuio.funcs->get_die_id) { 1037 dev_info(adev->dev, "socket: %d, die: %d " 1038 "%ld uncorrectable hardware errors " 1039 "detected in %s block\n", 1040 adev->smuio.funcs->get_socket_id(adev), 1041 adev->smuio.funcs->get_die_id(adev), 1042 obj->err_data.ue_count, 1043 get_ras_block_str(&info->head)); 1044 } else { 1045 dev_info(adev->dev, "%ld uncorrectable hardware errors " 1046 "detected in %s block\n", 1047 obj->err_data.ue_count, 1048 get_ras_block_str(&info->head)); 1049 } 1050 } 1051 1052 return 0; 1053 } 1054 1055 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, 1056 enum amdgpu_ras_block block) 1057 { 1058 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1059 1060 if (!amdgpu_ras_is_supported(adev, block)) 1061 return -EINVAL; 1062 1063 if (!block_obj || !block_obj->hw_ops) { 1064 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1065 ras_block_str(block)); 1066 return -EINVAL; 1067 } 1068 1069 if (block_obj->hw_ops->reset_ras_error_count) 1070 block_obj->hw_ops->reset_ras_error_count(adev); 1071 1072 if ((block == AMDGPU_RAS_BLOCK__GFX) || 1073 (block == AMDGPU_RAS_BLOCK__MMHUB)) { 1074 if (block_obj->hw_ops->reset_ras_error_status) 1075 block_obj->hw_ops->reset_ras_error_status(adev); 1076 } 1077 1078 return 0; 1079 } 1080 1081 /* wrapper of psp_ras_trigger_error */ 1082 int amdgpu_ras_error_inject(struct amdgpu_device *adev, 1083 struct ras_inject_if *info) 1084 { 1085 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1086 struct ta_ras_trigger_error_input block_info = { 1087 .block_id = amdgpu_ras_block_to_ta(info->head.block), 1088 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), 1089 .sub_block_index = info->head.sub_block_index, 1090 .address = info->address, 1091 .value = info->value, 1092 }; 1093 int ret = -EINVAL; 1094 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, 1095 info->head.block, 1096 info->head.sub_block_index); 1097 1098 /* inject on guest isn't allowed, return success directly */ 1099 if (amdgpu_sriov_vf(adev)) 1100 return 0; 1101 1102 if (!obj) 1103 return -EINVAL; 1104 1105 if (!block_obj || !block_obj->hw_ops) { 1106 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1107 get_ras_block_str(&info->head)); 1108 return -EINVAL; 1109 } 1110 1111 /* Calculate XGMI relative offset */ 1112 if (adev->gmc.xgmi.num_physical_nodes > 1) { 1113 block_info.address = 1114 amdgpu_xgmi_get_relative_phy_addr(adev, 1115 block_info.address); 1116 } 1117 1118 if (info->head.block == AMDGPU_RAS_BLOCK__GFX) { 1119 if (block_obj->hw_ops->ras_error_inject) 1120 ret = block_obj->hw_ops->ras_error_inject(adev, info); 1121 } else { 1122 /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */ 1123 if (block_obj->hw_ops->ras_error_inject) 1124 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info); 1125 else /*If not defined .ras_error_inject, use default ras_error_inject*/ 1126 ret = psp_ras_trigger_error(&adev->psp, &block_info); 1127 } 1128 1129 if (ret) 1130 dev_err(adev->dev, "ras inject %s failed %d\n", 1131 get_ras_block_str(&info->head), ret); 1132 1133 return ret; 1134 } 1135 1136 /** 1137 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP 1138 * @adev: pointer to AMD GPU device 1139 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1140 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors. 1141 * @query_info: pointer to ras_query_if 1142 * 1143 * Return 0 for query success or do nothing, otherwise return an error 1144 * on failures 1145 */ 1146 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev, 1147 unsigned long *ce_count, 1148 unsigned long *ue_count, 1149 struct ras_query_if *query_info) 1150 { 1151 int ret; 1152 1153 if (!query_info) 1154 /* do nothing if query_info is not specified */ 1155 return 0; 1156 1157 ret = amdgpu_ras_query_error_status(adev, query_info); 1158 if (ret) 1159 return ret; 1160 1161 *ce_count += query_info->ce_count; 1162 *ue_count += query_info->ue_count; 1163 1164 /* some hardware/IP supports read to clear 1165 * no need to explictly reset the err status after the query call */ 1166 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 1167 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 1168 if (amdgpu_ras_reset_error_status(adev, query_info->head.block)) 1169 dev_warn(adev->dev, 1170 "Failed to reset error counter and error status\n"); 1171 } 1172 1173 return 0; 1174 } 1175 1176 /** 1177 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP 1178 * @adev: pointer to AMD GPU device 1179 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1180 * @ue_count: pointer to an integer to be set to the count of uncorrectible 1181 * errors. 1182 * @query_info: pointer to ras_query_if if the query request is only for 1183 * specific ip block; if info is NULL, then the qurey request is for 1184 * all the ip blocks that support query ras error counters/status 1185 * 1186 * If set, @ce_count or @ue_count, count and return the corresponding 1187 * error counts in those integer pointers. Return 0 if the device 1188 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. 1189 */ 1190 int amdgpu_ras_query_error_count(struct amdgpu_device *adev, 1191 unsigned long *ce_count, 1192 unsigned long *ue_count, 1193 struct ras_query_if *query_info) 1194 { 1195 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1196 struct ras_manager *obj; 1197 unsigned long ce, ue; 1198 int ret; 1199 1200 if (!adev->ras_enabled || !con) 1201 return -EOPNOTSUPP; 1202 1203 /* Don't count since no reporting. 1204 */ 1205 if (!ce_count && !ue_count) 1206 return 0; 1207 1208 ce = 0; 1209 ue = 0; 1210 if (!query_info) { 1211 /* query all the ip blocks that support ras query interface */ 1212 list_for_each_entry(obj, &con->head, node) { 1213 struct ras_query_if info = { 1214 .head = obj->head, 1215 }; 1216 1217 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info); 1218 } 1219 } else { 1220 /* query specific ip block */ 1221 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info); 1222 } 1223 1224 if (ret) 1225 return ret; 1226 1227 if (ce_count) 1228 *ce_count = ce; 1229 1230 if (ue_count) 1231 *ue_count = ue; 1232 1233 return 0; 1234 } 1235 /* query/inject/cure end */ 1236 1237 1238 /* sysfs begin */ 1239 1240 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1241 struct ras_badpage **bps, unsigned int *count); 1242 1243 static char *amdgpu_ras_badpage_flags_str(unsigned int flags) 1244 { 1245 switch (flags) { 1246 case AMDGPU_RAS_RETIRE_PAGE_RESERVED: 1247 return "R"; 1248 case AMDGPU_RAS_RETIRE_PAGE_PENDING: 1249 return "P"; 1250 case AMDGPU_RAS_RETIRE_PAGE_FAULT: 1251 default: 1252 return "F"; 1253 } 1254 } 1255 1256 /** 1257 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface 1258 * 1259 * It allows user to read the bad pages of vram on the gpu through 1260 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages 1261 * 1262 * It outputs multiple lines, and each line stands for one gpu page. 1263 * 1264 * The format of one line is below, 1265 * gpu pfn : gpu page size : flags 1266 * 1267 * gpu pfn and gpu page size are printed in hex format. 1268 * flags can be one of below character, 1269 * 1270 * R: reserved, this gpu page is reserved and not able to use. 1271 * 1272 * P: pending for reserve, this gpu page is marked as bad, will be reserved 1273 * in next window of page_reserve. 1274 * 1275 * F: unable to reserve. this gpu page can't be reserved due to some reasons. 1276 * 1277 * Examples: 1278 * 1279 * .. code-block:: bash 1280 * 1281 * 0x00000001 : 0x00001000 : R 1282 * 0x00000002 : 0x00001000 : P 1283 * 1284 */ 1285 1286 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, 1287 struct kobject *kobj, struct bin_attribute *attr, 1288 char *buf, loff_t ppos, size_t count) 1289 { 1290 struct amdgpu_ras *con = 1291 container_of(attr, struct amdgpu_ras, badpages_attr); 1292 struct amdgpu_device *adev = con->adev; 1293 const unsigned int element_size = 1294 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1; 1295 unsigned int start = div64_ul(ppos + element_size - 1, element_size); 1296 unsigned int end = div64_ul(ppos + count - 1, element_size); 1297 ssize_t s = 0; 1298 struct ras_badpage *bps = NULL; 1299 unsigned int bps_count = 0; 1300 1301 memset(buf, 0, count); 1302 1303 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) 1304 return 0; 1305 1306 for (; start < end && start < bps_count; start++) 1307 s += scnprintf(&buf[s], element_size + 1, 1308 "0x%08x : 0x%08x : %1s\n", 1309 bps[start].bp, 1310 bps[start].size, 1311 amdgpu_ras_badpage_flags_str(bps[start].flags)); 1312 1313 kfree(bps); 1314 1315 return s; 1316 } 1317 1318 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, 1319 struct device_attribute *attr, char *buf) 1320 { 1321 struct amdgpu_ras *con = 1322 container_of(attr, struct amdgpu_ras, features_attr); 1323 1324 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); 1325 } 1326 1327 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) 1328 { 1329 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1330 1331 sysfs_remove_file_from_group(&adev->dev->kobj, 1332 &con->badpages_attr.attr, 1333 RAS_FS_NAME); 1334 } 1335 1336 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) 1337 { 1338 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1339 struct attribute *attrs[] = { 1340 &con->features_attr.attr, 1341 NULL 1342 }; 1343 struct attribute_group group = { 1344 .name = RAS_FS_NAME, 1345 .attrs = attrs, 1346 }; 1347 1348 sysfs_remove_group(&adev->dev->kobj, &group); 1349 1350 return 0; 1351 } 1352 1353 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, 1354 struct ras_common_if *head) 1355 { 1356 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1357 1358 if (!obj || obj->attr_inuse) 1359 return -EINVAL; 1360 1361 get_obj(obj); 1362 1363 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name), 1364 "%s_err_count", head->name); 1365 1366 obj->sysfs_attr = (struct device_attribute){ 1367 .attr = { 1368 .name = obj->fs_data.sysfs_name, 1369 .mode = S_IRUGO, 1370 }, 1371 .show = amdgpu_ras_sysfs_read, 1372 }; 1373 sysfs_attr_init(&obj->sysfs_attr.attr); 1374 1375 if (sysfs_add_file_to_group(&adev->dev->kobj, 1376 &obj->sysfs_attr.attr, 1377 RAS_FS_NAME)) { 1378 put_obj(obj); 1379 return -EINVAL; 1380 } 1381 1382 obj->attr_inuse = 1; 1383 1384 return 0; 1385 } 1386 1387 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, 1388 struct ras_common_if *head) 1389 { 1390 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1391 1392 if (!obj || !obj->attr_inuse) 1393 return -EINVAL; 1394 1395 sysfs_remove_file_from_group(&adev->dev->kobj, 1396 &obj->sysfs_attr.attr, 1397 RAS_FS_NAME); 1398 obj->attr_inuse = 0; 1399 put_obj(obj); 1400 1401 return 0; 1402 } 1403 1404 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) 1405 { 1406 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1407 struct ras_manager *obj, *tmp; 1408 1409 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1410 amdgpu_ras_sysfs_remove(adev, &obj->head); 1411 } 1412 1413 if (amdgpu_bad_page_threshold != 0) 1414 amdgpu_ras_sysfs_remove_bad_page_node(adev); 1415 1416 amdgpu_ras_sysfs_remove_feature_node(adev); 1417 1418 return 0; 1419 } 1420 /* sysfs end */ 1421 1422 /** 1423 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors 1424 * 1425 * Normally when there is an uncorrectable error, the driver will reset 1426 * the GPU to recover. However, in the event of an unrecoverable error, 1427 * the driver provides an interface to reboot the system automatically 1428 * in that event. 1429 * 1430 * The following file in debugfs provides that interface: 1431 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot 1432 * 1433 * Usage: 1434 * 1435 * .. code-block:: bash 1436 * 1437 * echo true > .../ras/auto_reboot 1438 * 1439 */ 1440 /* debugfs begin */ 1441 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) 1442 { 1443 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1444 struct drm_minor *minor = adev_to_drm(adev)->primary; 1445 struct dentry *dir; 1446 1447 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); 1448 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev, 1449 &amdgpu_ras_debugfs_ctrl_ops); 1450 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, 1451 &amdgpu_ras_debugfs_eeprom_ops); 1452 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, 1453 &con->bad_page_cnt_threshold); 1454 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled); 1455 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled); 1456 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev, 1457 &amdgpu_ras_debugfs_eeprom_size_ops); 1458 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", 1459 S_IRUGO, dir, adev, 1460 &amdgpu_ras_debugfs_eeprom_table_ops); 1461 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); 1462 1463 /* 1464 * After one uncorrectable error happens, usually GPU recovery will 1465 * be scheduled. But due to the known problem in GPU recovery failing 1466 * to bring GPU back, below interface provides one direct way to 1467 * user to reboot system automatically in such case within 1468 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine 1469 * will never be called. 1470 */ 1471 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); 1472 1473 /* 1474 * User could set this not to clean up hardware's error count register 1475 * of RAS IPs during ras recovery. 1476 */ 1477 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir, 1478 &con->disable_ras_err_cnt_harvest); 1479 return dir; 1480 } 1481 1482 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, 1483 struct ras_fs_if *head, 1484 struct dentry *dir) 1485 { 1486 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); 1487 1488 if (!obj || !dir) 1489 return; 1490 1491 get_obj(obj); 1492 1493 memcpy(obj->fs_data.debugfs_name, 1494 head->debugfs_name, 1495 sizeof(obj->fs_data.debugfs_name)); 1496 1497 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir, 1498 obj, &amdgpu_ras_debugfs_ops); 1499 } 1500 1501 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) 1502 { 1503 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1504 struct dentry *dir; 1505 struct ras_manager *obj; 1506 struct ras_fs_if fs_info; 1507 1508 /* 1509 * it won't be called in resume path, no need to check 1510 * suspend and gpu reset status 1511 */ 1512 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) 1513 return; 1514 1515 dir = amdgpu_ras_debugfs_create_ctrl_node(adev); 1516 1517 list_for_each_entry(obj, &con->head, node) { 1518 if (amdgpu_ras_is_supported(adev, obj->head.block) && 1519 (obj->attr_inuse == 1)) { 1520 sprintf(fs_info.debugfs_name, "%s_err_inject", 1521 get_ras_block_str(&obj->head)); 1522 fs_info.head = obj->head; 1523 amdgpu_ras_debugfs_create(adev, &fs_info, dir); 1524 } 1525 } 1526 } 1527 1528 /* debugfs end */ 1529 1530 /* ras fs */ 1531 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, 1532 amdgpu_ras_sysfs_badpages_read, NULL, 0); 1533 static DEVICE_ATTR(features, S_IRUGO, 1534 amdgpu_ras_sysfs_features_read, NULL); 1535 static int amdgpu_ras_fs_init(struct amdgpu_device *adev) 1536 { 1537 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1538 struct attribute_group group = { 1539 .name = RAS_FS_NAME, 1540 }; 1541 struct attribute *attrs[] = { 1542 &con->features_attr.attr, 1543 NULL 1544 }; 1545 struct bin_attribute *bin_attrs[] = { 1546 NULL, 1547 NULL, 1548 }; 1549 int r; 1550 1551 /* add features entry */ 1552 con->features_attr = dev_attr_features; 1553 group.attrs = attrs; 1554 sysfs_attr_init(attrs[0]); 1555 1556 if (amdgpu_bad_page_threshold != 0) { 1557 /* add bad_page_features entry */ 1558 bin_attr_gpu_vram_bad_pages.private = NULL; 1559 con->badpages_attr = bin_attr_gpu_vram_bad_pages; 1560 bin_attrs[0] = &con->badpages_attr; 1561 group.bin_attrs = bin_attrs; 1562 sysfs_bin_attr_init(bin_attrs[0]); 1563 } 1564 1565 r = sysfs_create_group(&adev->dev->kobj, &group); 1566 if (r) 1567 dev_err(adev->dev, "Failed to create RAS sysfs group!"); 1568 1569 return 0; 1570 } 1571 1572 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) 1573 { 1574 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1575 struct ras_manager *con_obj, *ip_obj, *tmp; 1576 1577 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1578 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { 1579 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); 1580 if (ip_obj) 1581 put_obj(ip_obj); 1582 } 1583 } 1584 1585 amdgpu_ras_sysfs_remove_all(adev); 1586 return 0; 1587 } 1588 /* ras fs end */ 1589 1590 /* ih begin */ 1591 1592 /* For the hardware that cannot enable bif ring for both ras_controller_irq 1593 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status 1594 * register to check whether the interrupt is triggered or not, and properly 1595 * ack the interrupt if it is there 1596 */ 1597 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) 1598 { 1599 /* Fatal error events are handled on host side */ 1600 if (amdgpu_sriov_vf(adev)) 1601 return; 1602 1603 if (adev->nbio.ras && 1604 adev->nbio.ras->handle_ras_controller_intr_no_bifring) 1605 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); 1606 1607 if (adev->nbio.ras && 1608 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring) 1609 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev); 1610 } 1611 1612 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj, 1613 struct amdgpu_iv_entry *entry) 1614 { 1615 bool poison_stat = false; 1616 struct amdgpu_device *adev = obj->adev; 1617 struct amdgpu_ras_block_object *block_obj = 1618 amdgpu_ras_get_ras_block(adev, obj->head.block, 0); 1619 1620 if (!block_obj) 1621 return; 1622 1623 /* both query_poison_status and handle_poison_consumption are optional, 1624 * but at least one of them should be implemented if we need poison 1625 * consumption handler 1626 */ 1627 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) { 1628 poison_stat = block_obj->hw_ops->query_poison_status(adev); 1629 if (!poison_stat) { 1630 /* Not poison consumption interrupt, no need to handle it */ 1631 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n", 1632 block_obj->ras_comm.name); 1633 1634 return; 1635 } 1636 } 1637 1638 if (!adev->gmc.xgmi.connected_to_cpu) 1639 amdgpu_umc_poison_handler(adev, false); 1640 1641 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption) 1642 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); 1643 1644 /* gpu reset is fallback for failed and default cases */ 1645 if (poison_stat) { 1646 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n", 1647 block_obj->ras_comm.name); 1648 amdgpu_ras_reset_gpu(adev); 1649 } else { 1650 amdgpu_gfx_poison_consumption_handler(adev, entry); 1651 } 1652 } 1653 1654 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj, 1655 struct amdgpu_iv_entry *entry) 1656 { 1657 dev_info(obj->adev->dev, 1658 "Poison is created, no user action is needed.\n"); 1659 } 1660 1661 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, 1662 struct amdgpu_iv_entry *entry) 1663 { 1664 struct ras_ih_data *data = &obj->ih_data; 1665 struct ras_err_data err_data = {0, 0, 0, NULL}; 1666 int ret; 1667 1668 if (!data->cb) 1669 return; 1670 1671 /* Let IP handle its data, maybe we need get the output 1672 * from the callback to update the error type/count, etc 1673 */ 1674 ret = data->cb(obj->adev, &err_data, entry); 1675 /* ue will trigger an interrupt, and in that case 1676 * we need do a reset to recovery the whole system. 1677 * But leave IP do that recovery, here we just dispatch 1678 * the error. 1679 */ 1680 if (ret == AMDGPU_RAS_SUCCESS) { 1681 /* these counts could be left as 0 if 1682 * some blocks do not count error number 1683 */ 1684 obj->err_data.ue_count += err_data.ue_count; 1685 obj->err_data.ce_count += err_data.ce_count; 1686 } 1687 } 1688 1689 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) 1690 { 1691 struct ras_ih_data *data = &obj->ih_data; 1692 struct amdgpu_iv_entry entry; 1693 1694 while (data->rptr != data->wptr) { 1695 rmb(); 1696 memcpy(&entry, &data->ring[data->rptr], 1697 data->element_size); 1698 1699 wmb(); 1700 data->rptr = (data->aligned_element_size + 1701 data->rptr) % data->ring_size; 1702 1703 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) { 1704 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 1705 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry); 1706 else 1707 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry); 1708 } else { 1709 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 1710 amdgpu_ras_interrupt_umc_handler(obj, &entry); 1711 else 1712 dev_warn(obj->adev->dev, 1713 "No RAS interrupt handler for non-UMC block with poison disabled.\n"); 1714 } 1715 } 1716 } 1717 1718 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) 1719 { 1720 struct ras_ih_data *data = 1721 container_of(work, struct ras_ih_data, ih_work); 1722 struct ras_manager *obj = 1723 container_of(data, struct ras_manager, ih_data); 1724 1725 amdgpu_ras_interrupt_handler(obj); 1726 } 1727 1728 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, 1729 struct ras_dispatch_if *info) 1730 { 1731 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1732 struct ras_ih_data *data = &obj->ih_data; 1733 1734 if (!obj) 1735 return -EINVAL; 1736 1737 if (data->inuse == 0) 1738 return 0; 1739 1740 /* Might be overflow... */ 1741 memcpy(&data->ring[data->wptr], info->entry, 1742 data->element_size); 1743 1744 wmb(); 1745 data->wptr = (data->aligned_element_size + 1746 data->wptr) % data->ring_size; 1747 1748 schedule_work(&data->ih_work); 1749 1750 return 0; 1751 } 1752 1753 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, 1754 struct ras_common_if *head) 1755 { 1756 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1757 struct ras_ih_data *data; 1758 1759 if (!obj) 1760 return -EINVAL; 1761 1762 data = &obj->ih_data; 1763 if (data->inuse == 0) 1764 return 0; 1765 1766 cancel_work_sync(&data->ih_work); 1767 1768 kfree(data->ring); 1769 memset(data, 0, sizeof(*data)); 1770 put_obj(obj); 1771 1772 return 0; 1773 } 1774 1775 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, 1776 struct ras_common_if *head) 1777 { 1778 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1779 struct ras_ih_data *data; 1780 struct amdgpu_ras_block_object *ras_obj; 1781 1782 if (!obj) { 1783 /* in case we registe the IH before enable ras feature */ 1784 obj = amdgpu_ras_create_obj(adev, head); 1785 if (!obj) 1786 return -EINVAL; 1787 } else 1788 get_obj(obj); 1789 1790 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm); 1791 1792 data = &obj->ih_data; 1793 /* add the callback.etc */ 1794 *data = (struct ras_ih_data) { 1795 .inuse = 0, 1796 .cb = ras_obj->ras_cb, 1797 .element_size = sizeof(struct amdgpu_iv_entry), 1798 .rptr = 0, 1799 .wptr = 0, 1800 }; 1801 1802 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); 1803 1804 data->aligned_element_size = ALIGN(data->element_size, 8); 1805 /* the ring can store 64 iv entries. */ 1806 data->ring_size = 64 * data->aligned_element_size; 1807 data->ring = kmalloc(data->ring_size, GFP_KERNEL); 1808 if (!data->ring) { 1809 put_obj(obj); 1810 return -ENOMEM; 1811 } 1812 1813 /* IH is ready */ 1814 data->inuse = 1; 1815 1816 return 0; 1817 } 1818 1819 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) 1820 { 1821 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1822 struct ras_manager *obj, *tmp; 1823 1824 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1825 amdgpu_ras_interrupt_remove_handler(adev, &obj->head); 1826 } 1827 1828 return 0; 1829 } 1830 /* ih end */ 1831 1832 /* traversal all IPs except NBIO to query error counter */ 1833 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) 1834 { 1835 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1836 struct ras_manager *obj; 1837 1838 if (!adev->ras_enabled || !con) 1839 return; 1840 1841 list_for_each_entry(obj, &con->head, node) { 1842 struct ras_query_if info = { 1843 .head = obj->head, 1844 }; 1845 1846 /* 1847 * PCIE_BIF IP has one different isr by ras controller 1848 * interrupt, the specific ras counter query will be 1849 * done in that isr. So skip such block from common 1850 * sync flood interrupt isr calling. 1851 */ 1852 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) 1853 continue; 1854 1855 /* 1856 * this is a workaround for aldebaran, skip send msg to 1857 * smu to get ecc_info table due to smu handle get ecc 1858 * info table failed temporarily. 1859 * should be removed until smu fix handle ecc_info table. 1860 */ 1861 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && 1862 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))) 1863 continue; 1864 1865 amdgpu_ras_query_error_status(adev, &info); 1866 1867 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 1868 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) && 1869 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) { 1870 if (amdgpu_ras_reset_error_status(adev, info.head.block)) 1871 dev_warn(adev->dev, "Failed to reset error counter and error status"); 1872 } 1873 } 1874 } 1875 1876 /* Parse RdRspStatus and WrRspStatus */ 1877 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, 1878 struct ras_query_if *info) 1879 { 1880 struct amdgpu_ras_block_object *block_obj; 1881 /* 1882 * Only two block need to query read/write 1883 * RspStatus at current state 1884 */ 1885 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) && 1886 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB)) 1887 return; 1888 1889 block_obj = amdgpu_ras_get_ras_block(adev, 1890 info->head.block, 1891 info->head.sub_block_index); 1892 1893 if (!block_obj || !block_obj->hw_ops) { 1894 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1895 get_ras_block_str(&info->head)); 1896 return; 1897 } 1898 1899 if (block_obj->hw_ops->query_ras_error_status) 1900 block_obj->hw_ops->query_ras_error_status(adev); 1901 1902 } 1903 1904 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) 1905 { 1906 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1907 struct ras_manager *obj; 1908 1909 if (!adev->ras_enabled || !con) 1910 return; 1911 1912 list_for_each_entry(obj, &con->head, node) { 1913 struct ras_query_if info = { 1914 .head = obj->head, 1915 }; 1916 1917 amdgpu_ras_error_status_query(adev, &info); 1918 } 1919 } 1920 1921 /* recovery begin */ 1922 1923 /* return 0 on success. 1924 * caller need free bps. 1925 */ 1926 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1927 struct ras_badpage **bps, unsigned int *count) 1928 { 1929 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1930 struct ras_err_handler_data *data; 1931 int i = 0; 1932 int ret = 0, status; 1933 1934 if (!con || !con->eh_data || !bps || !count) 1935 return -EINVAL; 1936 1937 mutex_lock(&con->recovery_lock); 1938 data = con->eh_data; 1939 if (!data || data->count == 0) { 1940 *bps = NULL; 1941 ret = -EINVAL; 1942 goto out; 1943 } 1944 1945 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL); 1946 if (!*bps) { 1947 ret = -ENOMEM; 1948 goto out; 1949 } 1950 1951 for (; i < data->count; i++) { 1952 (*bps)[i] = (struct ras_badpage){ 1953 .bp = data->bps[i].retired_page, 1954 .size = AMDGPU_GPU_PAGE_SIZE, 1955 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, 1956 }; 1957 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, 1958 data->bps[i].retired_page); 1959 if (status == -EBUSY) 1960 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; 1961 else if (status == -ENOENT) 1962 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; 1963 } 1964 1965 *count = data->count; 1966 out: 1967 mutex_unlock(&con->recovery_lock); 1968 return ret; 1969 } 1970 1971 static void amdgpu_ras_do_recovery(struct work_struct *work) 1972 { 1973 struct amdgpu_ras *ras = 1974 container_of(work, struct amdgpu_ras, recovery_work); 1975 struct amdgpu_device *remote_adev = NULL; 1976 struct amdgpu_device *adev = ras->adev; 1977 struct list_head device_list, *device_list_handle = NULL; 1978 1979 if (!ras->disable_ras_err_cnt_harvest) { 1980 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 1981 1982 /* Build list of devices to query RAS related errors */ 1983 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { 1984 device_list_handle = &hive->device_list; 1985 } else { 1986 INIT_LIST_HEAD(&device_list); 1987 list_add_tail(&adev->gmc.xgmi.head, &device_list); 1988 device_list_handle = &device_list; 1989 } 1990 1991 list_for_each_entry(remote_adev, 1992 device_list_handle, gmc.xgmi.head) { 1993 amdgpu_ras_query_err_status(remote_adev); 1994 amdgpu_ras_log_on_err_counter(remote_adev); 1995 } 1996 1997 amdgpu_put_xgmi_hive(hive); 1998 } 1999 2000 if (amdgpu_device_should_recover_gpu(ras->adev)) { 2001 struct amdgpu_reset_context reset_context; 2002 memset(&reset_context, 0, sizeof(reset_context)); 2003 2004 reset_context.method = AMD_RESET_METHOD_NONE; 2005 reset_context.reset_req_dev = adev; 2006 2007 /* Perform full reset in fatal error mode */ 2008 if (!amdgpu_ras_is_poison_mode_supported(ras->adev)) 2009 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2010 else 2011 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2012 2013 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); 2014 } 2015 atomic_set(&ras->in_recovery, 0); 2016 } 2017 2018 /* alloc/realloc bps array */ 2019 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, 2020 struct ras_err_handler_data *data, int pages) 2021 { 2022 unsigned int old_space = data->count + data->space_left; 2023 unsigned int new_space = old_space + pages; 2024 unsigned int align_space = ALIGN(new_space, 512); 2025 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); 2026 2027 if (!bps) { 2028 return -ENOMEM; 2029 } 2030 2031 if (data->bps) { 2032 memcpy(bps, data->bps, 2033 data->count * sizeof(*data->bps)); 2034 kfree(data->bps); 2035 } 2036 2037 data->bps = bps; 2038 data->space_left += align_space - old_space; 2039 return 0; 2040 } 2041 2042 /* it deal with vram only. */ 2043 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, 2044 struct eeprom_table_record *bps, int pages) 2045 { 2046 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2047 struct ras_err_handler_data *data; 2048 int ret = 0; 2049 uint32_t i; 2050 2051 if (!con || !con->eh_data || !bps || pages <= 0) 2052 return 0; 2053 2054 mutex_lock(&con->recovery_lock); 2055 data = con->eh_data; 2056 if (!data) 2057 goto out; 2058 2059 for (i = 0; i < pages; i++) { 2060 if (amdgpu_ras_check_bad_page_unlock(con, 2061 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) 2062 continue; 2063 2064 if (!data->space_left && 2065 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { 2066 ret = -ENOMEM; 2067 goto out; 2068 } 2069 2070 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr, 2071 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, 2072 AMDGPU_GPU_PAGE_SIZE); 2073 2074 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps)); 2075 data->count++; 2076 data->space_left--; 2077 } 2078 out: 2079 mutex_unlock(&con->recovery_lock); 2080 2081 return ret; 2082 } 2083 2084 /* 2085 * write error record array to eeprom, the function should be 2086 * protected by recovery_lock 2087 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL 2088 */ 2089 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, 2090 unsigned long *new_cnt) 2091 { 2092 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2093 struct ras_err_handler_data *data; 2094 struct amdgpu_ras_eeprom_control *control; 2095 int save_count; 2096 2097 if (!con || !con->eh_data) { 2098 if (new_cnt) 2099 *new_cnt = 0; 2100 2101 return 0; 2102 } 2103 2104 mutex_lock(&con->recovery_lock); 2105 control = &con->eeprom_control; 2106 data = con->eh_data; 2107 save_count = data->count - control->ras_num_recs; 2108 mutex_unlock(&con->recovery_lock); 2109 2110 if (new_cnt) 2111 *new_cnt = save_count / adev->umc.retire_unit; 2112 2113 /* only new entries are saved */ 2114 if (save_count > 0) { 2115 if (amdgpu_ras_eeprom_append(control, 2116 &data->bps[control->ras_num_recs], 2117 save_count)) { 2118 dev_err(adev->dev, "Failed to save EEPROM table data!"); 2119 return -EIO; 2120 } 2121 2122 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); 2123 } 2124 2125 return 0; 2126 } 2127 2128 /* 2129 * read error record array in eeprom and reserve enough space for 2130 * storing new bad pages 2131 */ 2132 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) 2133 { 2134 struct amdgpu_ras_eeprom_control *control = 2135 &adev->psp.ras_context.ras->eeprom_control; 2136 struct eeprom_table_record *bps; 2137 int ret; 2138 2139 /* no bad page record, skip eeprom access */ 2140 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0) 2141 return 0; 2142 2143 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL); 2144 if (!bps) 2145 return -ENOMEM; 2146 2147 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs); 2148 if (ret) 2149 dev_err(adev->dev, "Failed to load EEPROM table records!"); 2150 else 2151 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs); 2152 2153 kfree(bps); 2154 return ret; 2155 } 2156 2157 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 2158 uint64_t addr) 2159 { 2160 struct ras_err_handler_data *data = con->eh_data; 2161 int i; 2162 2163 addr >>= AMDGPU_GPU_PAGE_SHIFT; 2164 for (i = 0; i < data->count; i++) 2165 if (addr == data->bps[i].retired_page) 2166 return true; 2167 2168 return false; 2169 } 2170 2171 /* 2172 * check if an address belongs to bad page 2173 * 2174 * Note: this check is only for umc block 2175 */ 2176 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 2177 uint64_t addr) 2178 { 2179 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2180 bool ret = false; 2181 2182 if (!con || !con->eh_data) 2183 return ret; 2184 2185 mutex_lock(&con->recovery_lock); 2186 ret = amdgpu_ras_check_bad_page_unlock(con, addr); 2187 mutex_unlock(&con->recovery_lock); 2188 return ret; 2189 } 2190 2191 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, 2192 uint32_t max_count) 2193 { 2194 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2195 2196 /* 2197 * Justification of value bad_page_cnt_threshold in ras structure 2198 * 2199 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length 2200 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two 2201 * scenarios accordingly. 2202 * 2203 * Bad page retirement enablement: 2204 * - If amdgpu_bad_page_threshold = -2, 2205 * bad_page_cnt_threshold = typical value by formula. 2206 * 2207 * - When the value from user is 0 < amdgpu_bad_page_threshold < 2208 * max record length in eeprom, use it directly. 2209 * 2210 * Bad page retirement disablement: 2211 * - If amdgpu_bad_page_threshold = 0, bad page retirement 2212 * functionality is disabled, and bad_page_cnt_threshold will 2213 * take no effect. 2214 */ 2215 2216 if (amdgpu_bad_page_threshold < 0) { 2217 u64 val = adev->gmc.mc_vram_size; 2218 2219 do_div(val, RAS_BAD_PAGE_COVER); 2220 con->bad_page_cnt_threshold = min(lower_32_bits(val), 2221 max_count); 2222 } else { 2223 con->bad_page_cnt_threshold = min_t(int, max_count, 2224 amdgpu_bad_page_threshold); 2225 } 2226 } 2227 2228 int amdgpu_ras_recovery_init(struct amdgpu_device *adev) 2229 { 2230 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2231 struct ras_err_handler_data **data; 2232 u32 max_eeprom_records_count = 0; 2233 bool exc_err_limit = false; 2234 int ret; 2235 2236 if (!con || amdgpu_sriov_vf(adev)) 2237 return 0; 2238 2239 /* Allow access to RAS EEPROM via debugfs, when the ASIC 2240 * supports RAS and debugfs is enabled, but when 2241 * adev->ras_enabled is unset, i.e. when "ras_enable" 2242 * module parameter is set to 0. 2243 */ 2244 con->adev = adev; 2245 2246 if (!adev->ras_enabled) 2247 return 0; 2248 2249 data = &con->eh_data; 2250 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); 2251 if (!*data) { 2252 ret = -ENOMEM; 2253 goto out; 2254 } 2255 2256 mutex_init(&con->recovery_lock); 2257 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); 2258 atomic_set(&con->in_recovery, 0); 2259 con->eeprom_control.bad_channel_bitmap = 0; 2260 2261 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(); 2262 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); 2263 2264 /* Todo: During test the SMU might fail to read the eeprom through I2C 2265 * when the GPU is pending on XGMI reset during probe time 2266 * (Mostly after second bus reset), skip it now 2267 */ 2268 if (adev->gmc.xgmi.pending_reset) 2269 return 0; 2270 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); 2271 /* 2272 * This calling fails when exc_err_limit is true or 2273 * ret != 0. 2274 */ 2275 if (exc_err_limit || ret) 2276 goto free; 2277 2278 if (con->eeprom_control.ras_num_recs) { 2279 ret = amdgpu_ras_load_bad_pages(adev); 2280 if (ret) 2281 goto free; 2282 2283 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); 2284 2285 if (con->update_channel_flag == true) { 2286 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); 2287 con->update_channel_flag = false; 2288 } 2289 } 2290 2291 #ifdef CONFIG_X86_MCE_AMD 2292 if ((adev->asic_type == CHIP_ALDEBARAN) && 2293 (adev->gmc.xgmi.connected_to_cpu)) 2294 amdgpu_register_bad_pages_mca_notifier(adev); 2295 #endif 2296 return 0; 2297 2298 free: 2299 kfree((*data)->bps); 2300 kfree(*data); 2301 con->eh_data = NULL; 2302 out: 2303 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret); 2304 2305 /* 2306 * Except error threshold exceeding case, other failure cases in this 2307 * function would not fail amdgpu driver init. 2308 */ 2309 if (!exc_err_limit) 2310 ret = 0; 2311 else 2312 ret = -EINVAL; 2313 2314 return ret; 2315 } 2316 2317 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) 2318 { 2319 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2320 struct ras_err_handler_data *data = con->eh_data; 2321 2322 /* recovery_init failed to init it, fini is useless */ 2323 if (!data) 2324 return 0; 2325 2326 cancel_work_sync(&con->recovery_work); 2327 2328 mutex_lock(&con->recovery_lock); 2329 con->eh_data = NULL; 2330 kfree(data->bps); 2331 kfree(data); 2332 mutex_unlock(&con->recovery_lock); 2333 2334 return 0; 2335 } 2336 /* recovery end */ 2337 2338 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) 2339 { 2340 if (amdgpu_sriov_vf(adev)) { 2341 switch (adev->ip_versions[MP0_HWIP][0]) { 2342 case IP_VERSION(13, 0, 2): 2343 return true; 2344 default: 2345 return false; 2346 } 2347 } 2348 2349 if (adev->asic_type == CHIP_IP_DISCOVERY) { 2350 switch (adev->ip_versions[MP0_HWIP][0]) { 2351 case IP_VERSION(13, 0, 0): 2352 case IP_VERSION(13, 0, 10): 2353 return true; 2354 default: 2355 return false; 2356 } 2357 } 2358 2359 return adev->asic_type == CHIP_VEGA10 || 2360 adev->asic_type == CHIP_VEGA20 || 2361 adev->asic_type == CHIP_ARCTURUS || 2362 adev->asic_type == CHIP_ALDEBARAN || 2363 adev->asic_type == CHIP_SIENNA_CICHLID; 2364 } 2365 2366 /* 2367 * this is workaround for vega20 workstation sku, 2368 * force enable gfx ras, ignore vbios gfx ras flag 2369 * due to GC EDC can not write 2370 */ 2371 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) 2372 { 2373 struct atom_context *ctx = adev->mode_info.atom_context; 2374 2375 if (!ctx) 2376 return; 2377 2378 if (strnstr(ctx->vbios_version, "D16406", 2379 sizeof(ctx->vbios_version)) || 2380 strnstr(ctx->vbios_version, "D36002", 2381 sizeof(ctx->vbios_version))) 2382 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); 2383 } 2384 2385 /* 2386 * check hardware's ras ability which will be saved in hw_supported. 2387 * if hardware does not support ras, we can skip some ras initializtion and 2388 * forbid some ras operations from IP. 2389 * if software itself, say boot parameter, limit the ras ability. We still 2390 * need allow IP do some limited operations, like disable. In such case, 2391 * we have to initialize ras as normal. but need check if operation is 2392 * allowed or not in each function. 2393 */ 2394 static void amdgpu_ras_check_supported(struct amdgpu_device *adev) 2395 { 2396 adev->ras_hw_enabled = adev->ras_enabled = 0; 2397 2398 if (!adev->is_atom_fw || 2399 !amdgpu_ras_asic_supported(adev)) 2400 return; 2401 2402 if (!adev->gmc.xgmi.connected_to_cpu) { 2403 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { 2404 dev_info(adev->dev, "MEM ECC is active.\n"); 2405 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | 2406 1 << AMDGPU_RAS_BLOCK__DF); 2407 } else { 2408 dev_info(adev->dev, "MEM ECC is not presented.\n"); 2409 } 2410 2411 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { 2412 dev_info(adev->dev, "SRAM ECC is active.\n"); 2413 if (!amdgpu_sriov_vf(adev)) 2414 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | 2415 1 << AMDGPU_RAS_BLOCK__DF); 2416 else 2417 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | 2418 1 << AMDGPU_RAS_BLOCK__SDMA | 2419 1 << AMDGPU_RAS_BLOCK__GFX); 2420 2421 /* VCN/JPEG RAS can be supported on both bare metal and 2422 * SRIOV environment 2423 */ 2424 if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) || 2425 adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0)) 2426 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 2427 1 << AMDGPU_RAS_BLOCK__JPEG); 2428 else 2429 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | 2430 1 << AMDGPU_RAS_BLOCK__JPEG); 2431 2432 /* 2433 * XGMI RAS is not supported if xgmi num physical nodes 2434 * is zero 2435 */ 2436 if (!adev->gmc.xgmi.num_physical_nodes) 2437 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL); 2438 } else { 2439 dev_info(adev->dev, "SRAM ECC is not presented.\n"); 2440 } 2441 } else { 2442 /* driver only manages a few IP blocks RAS feature 2443 * when GPU is connected cpu through XGMI */ 2444 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | 2445 1 << AMDGPU_RAS_BLOCK__SDMA | 2446 1 << AMDGPU_RAS_BLOCK__MMHUB); 2447 } 2448 2449 amdgpu_ras_get_quirks(adev); 2450 2451 /* hw_supported needs to be aligned with RAS block mask. */ 2452 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; 2453 2454 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : 2455 adev->ras_hw_enabled & amdgpu_ras_mask; 2456 } 2457 2458 static void amdgpu_ras_counte_dw(struct work_struct *work) 2459 { 2460 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, 2461 ras_counte_delay_work.work); 2462 struct amdgpu_device *adev = con->adev; 2463 struct drm_device *dev = adev_to_drm(adev); 2464 unsigned long ce_count, ue_count; 2465 int res; 2466 2467 res = pm_runtime_get_sync(dev->dev); 2468 if (res < 0) 2469 goto Out; 2470 2471 /* Cache new values. 2472 */ 2473 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) { 2474 atomic_set(&con->ras_ce_count, ce_count); 2475 atomic_set(&con->ras_ue_count, ue_count); 2476 } 2477 2478 pm_runtime_mark_last_busy(dev->dev); 2479 Out: 2480 pm_runtime_put_autosuspend(dev->dev); 2481 } 2482 2483 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) 2484 { 2485 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2486 bool df_poison, umc_poison; 2487 2488 /* poison setting is useless on SRIOV guest */ 2489 if (amdgpu_sriov_vf(adev) || !con) 2490 return; 2491 2492 /* Init poison supported flag, the default value is false */ 2493 if (adev->gmc.xgmi.connected_to_cpu) { 2494 /* enabled by default when GPU is connected to CPU */ 2495 con->poison_supported = true; 2496 } else if (adev->df.funcs && 2497 adev->df.funcs->query_ras_poison_mode && 2498 adev->umc.ras && 2499 adev->umc.ras->query_ras_poison_mode) { 2500 df_poison = 2501 adev->df.funcs->query_ras_poison_mode(adev); 2502 umc_poison = 2503 adev->umc.ras->query_ras_poison_mode(adev); 2504 2505 /* Only poison is set in both DF and UMC, we can support it */ 2506 if (df_poison && umc_poison) 2507 con->poison_supported = true; 2508 else if (df_poison != umc_poison) 2509 dev_warn(adev->dev, 2510 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", 2511 df_poison, umc_poison); 2512 } 2513 } 2514 2515 int amdgpu_ras_init(struct amdgpu_device *adev) 2516 { 2517 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2518 int r; 2519 2520 if (con) 2521 return 0; 2522 2523 con = kmalloc(sizeof(struct amdgpu_ras) + 2524 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + 2525 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, 2526 GFP_KERNEL|__GFP_ZERO); 2527 if (!con) 2528 return -ENOMEM; 2529 2530 con->adev = adev; 2531 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); 2532 atomic_set(&con->ras_ce_count, 0); 2533 atomic_set(&con->ras_ue_count, 0); 2534 2535 con->objs = (struct ras_manager *)(con + 1); 2536 2537 amdgpu_ras_set_context(adev, con); 2538 2539 amdgpu_ras_check_supported(adev); 2540 2541 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { 2542 /* set gfx block ras context feature for VEGA20 Gaming 2543 * send ras disable cmd to ras ta during ras late init. 2544 */ 2545 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { 2546 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); 2547 2548 return 0; 2549 } 2550 2551 r = 0; 2552 goto release_con; 2553 } 2554 2555 con->update_channel_flag = false; 2556 con->features = 0; 2557 INIT_LIST_HEAD(&con->head); 2558 /* Might need get this flag from vbios. */ 2559 con->flags = RAS_DEFAULT_FLAGS; 2560 2561 /* initialize nbio ras function ahead of any other 2562 * ras functions so hardware fatal error interrupt 2563 * can be enabled as early as possible */ 2564 switch (adev->ip_versions[NBIO_HWIP][0]) { 2565 case IP_VERSION(7, 4, 0): 2566 case IP_VERSION(7, 4, 1): 2567 case IP_VERSION(7, 4, 4): 2568 if (!adev->gmc.xgmi.connected_to_cpu) 2569 adev->nbio.ras = &nbio_v7_4_ras; 2570 break; 2571 case IP_VERSION(4, 3, 0): 2572 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF)) 2573 /* unlike other generation of nbio ras, 2574 * nbio v4_3 only support fatal error interrupt 2575 * to inform software that DF is freezed due to 2576 * system fatal error event. driver should not 2577 * enable nbio ras in such case. Instead, 2578 * check DF RAS */ 2579 adev->nbio.ras = &nbio_v4_3_ras; 2580 break; 2581 default: 2582 /* nbio ras is not available */ 2583 break; 2584 } 2585 2586 /* nbio ras block needs to be enabled ahead of other ras blocks 2587 * to handle fatal error */ 2588 r = amdgpu_nbio_ras_sw_init(adev); 2589 if (r) 2590 return r; 2591 2592 if (adev->nbio.ras && 2593 adev->nbio.ras->init_ras_controller_interrupt) { 2594 r = adev->nbio.ras->init_ras_controller_interrupt(adev); 2595 if (r) 2596 goto release_con; 2597 } 2598 2599 if (adev->nbio.ras && 2600 adev->nbio.ras->init_ras_err_event_athub_interrupt) { 2601 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev); 2602 if (r) 2603 goto release_con; 2604 } 2605 2606 amdgpu_ras_query_poison_mode(adev); 2607 2608 if (amdgpu_ras_fs_init(adev)) { 2609 r = -EINVAL; 2610 goto release_con; 2611 } 2612 2613 dev_info(adev->dev, "RAS INFO: ras initialized successfully, " 2614 "hardware ability[%x] ras_mask[%x]\n", 2615 adev->ras_hw_enabled, adev->ras_enabled); 2616 2617 return 0; 2618 release_con: 2619 amdgpu_ras_set_context(adev, NULL); 2620 kfree(con); 2621 2622 return r; 2623 } 2624 2625 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) 2626 { 2627 if (adev->gmc.xgmi.connected_to_cpu) 2628 return 1; 2629 return 0; 2630 } 2631 2632 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, 2633 struct ras_common_if *ras_block) 2634 { 2635 struct ras_query_if info = { 2636 .head = *ras_block, 2637 }; 2638 2639 if (!amdgpu_persistent_edc_harvesting_supported(adev)) 2640 return 0; 2641 2642 if (amdgpu_ras_query_error_status(adev, &info) != 0) 2643 DRM_WARN("RAS init harvest failure"); 2644 2645 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) 2646 DRM_WARN("RAS init harvest reset failure"); 2647 2648 return 0; 2649 } 2650 2651 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) 2652 { 2653 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2654 2655 if (!con) 2656 return false; 2657 2658 return con->poison_supported; 2659 } 2660 2661 /* helper function to handle common stuff in ip late init phase */ 2662 int amdgpu_ras_block_late_init(struct amdgpu_device *adev, 2663 struct ras_common_if *ras_block) 2664 { 2665 struct amdgpu_ras_block_object *ras_obj = NULL; 2666 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2667 struct ras_query_if *query_info; 2668 unsigned long ue_count, ce_count; 2669 int r; 2670 2671 /* disable RAS feature per IP block if it is not supported */ 2672 if (!amdgpu_ras_is_supported(adev, ras_block->block)) { 2673 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); 2674 return 0; 2675 } 2676 2677 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); 2678 if (r) { 2679 if (adev->in_suspend || amdgpu_in_reset(adev)) { 2680 /* in resume phase, if fail to enable ras, 2681 * clean up all ras fs nodes, and disable ras */ 2682 goto cleanup; 2683 } else 2684 return r; 2685 } 2686 2687 /* check for errors on warm reset edc persisant supported ASIC */ 2688 amdgpu_persistent_edc_harvesting(adev, ras_block); 2689 2690 /* in resume phase, no need to create ras fs node */ 2691 if (adev->in_suspend || amdgpu_in_reset(adev)) 2692 return 0; 2693 2694 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 2695 if (ras_obj->ras_cb || (ras_obj->hw_ops && 2696 (ras_obj->hw_ops->query_poison_status || 2697 ras_obj->hw_ops->handle_poison_consumption))) { 2698 r = amdgpu_ras_interrupt_add_handler(adev, ras_block); 2699 if (r) 2700 goto cleanup; 2701 } 2702 2703 r = amdgpu_ras_sysfs_create(adev, ras_block); 2704 if (r) 2705 goto interrupt; 2706 2707 /* Those are the cached values at init. 2708 */ 2709 query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL); 2710 if (!query_info) 2711 return -ENOMEM; 2712 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if)); 2713 2714 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) { 2715 atomic_set(&con->ras_ce_count, ce_count); 2716 atomic_set(&con->ras_ue_count, ue_count); 2717 } 2718 2719 kfree(query_info); 2720 return 0; 2721 2722 interrupt: 2723 if (ras_obj->ras_cb) 2724 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 2725 cleanup: 2726 amdgpu_ras_feature_enable(adev, ras_block, 0); 2727 return r; 2728 } 2729 2730 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev, 2731 struct ras_common_if *ras_block) 2732 { 2733 return amdgpu_ras_block_late_init(adev, ras_block); 2734 } 2735 2736 /* helper function to remove ras fs node and interrupt handler */ 2737 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev, 2738 struct ras_common_if *ras_block) 2739 { 2740 struct amdgpu_ras_block_object *ras_obj; 2741 if (!ras_block) 2742 return; 2743 2744 amdgpu_ras_sysfs_remove(adev, ras_block); 2745 2746 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 2747 if (ras_obj->ras_cb) 2748 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 2749 } 2750 2751 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev, 2752 struct ras_common_if *ras_block) 2753 { 2754 return amdgpu_ras_block_late_fini(adev, ras_block); 2755 } 2756 2757 /* do some init work after IP late init as dependence. 2758 * and it runs in resume/gpu reset/booting up cases. 2759 */ 2760 void amdgpu_ras_resume(struct amdgpu_device *adev) 2761 { 2762 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2763 struct ras_manager *obj, *tmp; 2764 2765 if (!adev->ras_enabled || !con) { 2766 /* clean ras context for VEGA20 Gaming after send ras disable cmd */ 2767 amdgpu_release_ras_context(adev); 2768 2769 return; 2770 } 2771 2772 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 2773 /* Set up all other IPs which are not implemented. There is a 2774 * tricky thing that IP's actual ras error type should be 2775 * MULTI_UNCORRECTABLE, but as driver does not handle it, so 2776 * ERROR_NONE make sense anyway. 2777 */ 2778 amdgpu_ras_enable_all_features(adev, 1); 2779 2780 /* We enable ras on all hw_supported block, but as boot 2781 * parameter might disable some of them and one or more IP has 2782 * not implemented yet. So we disable them on behalf. 2783 */ 2784 list_for_each_entry_safe(obj, tmp, &con->head, node) { 2785 if (!amdgpu_ras_is_supported(adev, obj->head.block)) { 2786 amdgpu_ras_feature_enable(adev, &obj->head, 0); 2787 /* there should be no any reference. */ 2788 WARN_ON(alive_obj(obj)); 2789 } 2790 } 2791 } 2792 } 2793 2794 void amdgpu_ras_suspend(struct amdgpu_device *adev) 2795 { 2796 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2797 2798 if (!adev->ras_enabled || !con) 2799 return; 2800 2801 amdgpu_ras_disable_all_features(adev, 0); 2802 /* Make sure all ras objects are disabled. */ 2803 if (con->features) 2804 amdgpu_ras_disable_all_features(adev, 1); 2805 } 2806 2807 int amdgpu_ras_late_init(struct amdgpu_device *adev) 2808 { 2809 struct amdgpu_ras_block_list *node, *tmp; 2810 struct amdgpu_ras_block_object *obj; 2811 int r; 2812 2813 /* Guest side doesn't need init ras feature */ 2814 if (amdgpu_sriov_vf(adev)) 2815 return 0; 2816 2817 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 2818 if (!node->ras_obj) { 2819 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 2820 continue; 2821 } 2822 2823 obj = node->ras_obj; 2824 if (obj->ras_late_init) { 2825 r = obj->ras_late_init(adev, &obj->ras_comm); 2826 if (r) { 2827 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n", 2828 obj->ras_comm.name, r); 2829 return r; 2830 } 2831 } else 2832 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm); 2833 } 2834 2835 return 0; 2836 } 2837 2838 /* do some fini work before IP fini as dependence */ 2839 int amdgpu_ras_pre_fini(struct amdgpu_device *adev) 2840 { 2841 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2842 2843 if (!adev->ras_enabled || !con) 2844 return 0; 2845 2846 2847 /* Need disable ras on all IPs here before ip [hw/sw]fini */ 2848 if (con->features) 2849 amdgpu_ras_disable_all_features(adev, 0); 2850 amdgpu_ras_recovery_fini(adev); 2851 return 0; 2852 } 2853 2854 int amdgpu_ras_fini(struct amdgpu_device *adev) 2855 { 2856 struct amdgpu_ras_block_list *ras_node, *tmp; 2857 struct amdgpu_ras_block_object *obj = NULL; 2858 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2859 2860 if (!adev->ras_enabled || !con) 2861 return 0; 2862 2863 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) { 2864 if (ras_node->ras_obj) { 2865 obj = ras_node->ras_obj; 2866 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) && 2867 obj->ras_fini) 2868 obj->ras_fini(adev, &obj->ras_comm); 2869 else 2870 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm); 2871 } 2872 2873 /* Clear ras blocks from ras_list and free ras block list node */ 2874 list_del(&ras_node->node); 2875 kfree(ras_node); 2876 } 2877 2878 amdgpu_ras_fs_fini(adev); 2879 amdgpu_ras_interrupt_remove_all(adev); 2880 2881 WARN(con->features, "Feature mask is not cleared"); 2882 2883 if (con->features) 2884 amdgpu_ras_disable_all_features(adev, 1); 2885 2886 cancel_delayed_work_sync(&con->ras_counte_delay_work); 2887 2888 amdgpu_ras_set_context(adev, NULL); 2889 kfree(con); 2890 2891 return 0; 2892 } 2893 2894 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) 2895 { 2896 amdgpu_ras_check_supported(adev); 2897 if (!adev->ras_hw_enabled) 2898 return; 2899 2900 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { 2901 dev_info(adev->dev, "uncorrectable hardware error" 2902 "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); 2903 2904 amdgpu_ras_reset_gpu(adev); 2905 } 2906 } 2907 2908 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) 2909 { 2910 if (adev->asic_type == CHIP_VEGA20 && 2911 adev->pm.fw_version <= 0x283400) { 2912 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) && 2913 amdgpu_ras_intr_triggered(); 2914 } 2915 2916 return false; 2917 } 2918 2919 void amdgpu_release_ras_context(struct amdgpu_device *adev) 2920 { 2921 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2922 2923 if (!con) 2924 return; 2925 2926 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { 2927 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); 2928 amdgpu_ras_set_context(adev, NULL); 2929 kfree(con); 2930 } 2931 } 2932 2933 #ifdef CONFIG_X86_MCE_AMD 2934 static struct amdgpu_device *find_adev(uint32_t node_id) 2935 { 2936 int i; 2937 struct amdgpu_device *adev = NULL; 2938 2939 for (i = 0; i < mce_adev_list.num_gpu; i++) { 2940 adev = mce_adev_list.devs[i]; 2941 2942 if (adev && adev->gmc.xgmi.connected_to_cpu && 2943 adev->gmc.xgmi.physical_node_id == node_id) 2944 break; 2945 adev = NULL; 2946 } 2947 2948 return adev; 2949 } 2950 2951 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) 2952 #define GET_UMC_INST(m) (((m) >> 21) & 0x7) 2953 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) 2954 #define GPU_ID_OFFSET 8 2955 2956 static int amdgpu_bad_page_notifier(struct notifier_block *nb, 2957 unsigned long val, void *data) 2958 { 2959 struct mce *m = (struct mce *)data; 2960 struct amdgpu_device *adev = NULL; 2961 uint32_t gpu_id = 0; 2962 uint32_t umc_inst = 0, ch_inst = 0; 2963 2964 /* 2965 * If the error was generated in UMC_V2, which belongs to GPU UMCs, 2966 * and error occurred in DramECC (Extended error code = 0) then only 2967 * process the error, else bail out. 2968 */ 2969 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) && 2970 (XEC(m->status, 0x3f) == 0x0))) 2971 return NOTIFY_DONE; 2972 2973 /* 2974 * If it is correctable error, return. 2975 */ 2976 if (mce_is_correctable(m)) 2977 return NOTIFY_OK; 2978 2979 /* 2980 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. 2981 */ 2982 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; 2983 2984 adev = find_adev(gpu_id); 2985 if (!adev) { 2986 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__, 2987 gpu_id); 2988 return NOTIFY_DONE; 2989 } 2990 2991 /* 2992 * If it is uncorrectable error, then find out UMC instance and 2993 * channel index. 2994 */ 2995 umc_inst = GET_UMC_INST(m->ipid); 2996 ch_inst = GET_CHAN_INDEX(m->ipid); 2997 2998 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d", 2999 umc_inst, ch_inst); 3000 3001 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst)) 3002 return NOTIFY_OK; 3003 else 3004 return NOTIFY_DONE; 3005 } 3006 3007 static struct notifier_block amdgpu_bad_page_nb = { 3008 .notifier_call = amdgpu_bad_page_notifier, 3009 .priority = MCE_PRIO_UC, 3010 }; 3011 3012 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) 3013 { 3014 /* 3015 * Add the adev to the mce_adev_list. 3016 * During mode2 reset, amdgpu device is temporarily 3017 * removed from the mgpu_info list which can cause 3018 * page retirement to fail. 3019 * Use this list instead of mgpu_info to find the amdgpu 3020 * device on which the UMC error was reported. 3021 */ 3022 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; 3023 3024 /* 3025 * Register the x86 notifier only once 3026 * with MCE subsystem. 3027 */ 3028 if (notifier_registered == false) { 3029 mce_register_decode_chain(&amdgpu_bad_page_nb); 3030 notifier_registered = true; 3031 } 3032 } 3033 #endif 3034 3035 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) 3036 { 3037 if (!adev) 3038 return NULL; 3039 3040 return adev->psp.ras_context.ras; 3041 } 3042 3043 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con) 3044 { 3045 if (!adev) 3046 return -EINVAL; 3047 3048 adev->psp.ras_context.ras = ras_con; 3049 return 0; 3050 } 3051 3052 /* check if ras is supported on block, say, sdma, gfx */ 3053 int amdgpu_ras_is_supported(struct amdgpu_device *adev, 3054 unsigned int block) 3055 { 3056 int ret = 0; 3057 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3058 3059 if (block >= AMDGPU_RAS_BLOCK_COUNT) 3060 return 0; 3061 3062 ret = ras && (adev->ras_enabled & (1 << block)); 3063 3064 /* For the special asic with mem ecc enabled but sram ecc 3065 * not enabled, even if the ras block is not supported on 3066 * .ras_enabled, if the asic supports poison mode and the 3067 * ras block has ras configuration, it can be considered 3068 * that the ras block supports ras function. 3069 */ 3070 if (!ret && 3071 amdgpu_ras_is_poison_mode_supported(adev) && 3072 amdgpu_ras_get_ras_block(adev, block, 0)) 3073 ret = 1; 3074 3075 return ret; 3076 } 3077 3078 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) 3079 { 3080 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3081 3082 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) 3083 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); 3084 return 0; 3085 } 3086 3087 3088 /* Register each ip ras block into amdgpu ras */ 3089 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, 3090 struct amdgpu_ras_block_object *ras_block_obj) 3091 { 3092 struct amdgpu_ras_block_list *ras_node; 3093 if (!adev || !ras_block_obj) 3094 return -EINVAL; 3095 3096 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL); 3097 if (!ras_node) 3098 return -ENOMEM; 3099 3100 INIT_LIST_HEAD(&ras_node->node); 3101 ras_node->ras_obj = ras_block_obj; 3102 list_add_tail(&ras_node->node, &adev->ras_list); 3103 3104 return 0; 3105 } 3106