1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/debugfs.h> 25 #include <linux/list.h> 26 #include <linux/module.h> 27 #include <linux/uaccess.h> 28 #include <linux/reboot.h> 29 #include <linux/syscalls.h> 30 #include <linux/pm_runtime.h> 31 #include <linux/list_sort.h> 32 33 #include "amdgpu.h" 34 #include "amdgpu_ras.h" 35 #include "amdgpu_atomfirmware.h" 36 #include "amdgpu_xgmi.h" 37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 38 #include "nbio_v4_3.h" 39 #include "nbif_v6_3_1.h" 40 #include "nbio_v7_9.h" 41 #include "atom.h" 42 #include "amdgpu_reset.h" 43 #include "amdgpu_psp.h" 44 45 #ifdef CONFIG_X86_MCE_AMD 46 #include <asm/mce.h> 47 48 static bool notifier_registered; 49 #endif 50 static const char *RAS_FS_NAME = "ras"; 51 52 const char *ras_error_string[] = { 53 "none", 54 "parity", 55 "single_correctable", 56 "multi_uncorrectable", 57 "poison", 58 }; 59 60 const char *ras_block_string[] = { 61 "umc", 62 "sdma", 63 "gfx", 64 "mmhub", 65 "athub", 66 "pcie_bif", 67 "hdp", 68 "xgmi_wafl", 69 "df", 70 "smn", 71 "sem", 72 "mp0", 73 "mp1", 74 "fuse", 75 "mca", 76 "vcn", 77 "jpeg", 78 "ih", 79 "mpio", 80 "mmsch", 81 }; 82 83 const char *ras_mca_block_string[] = { 84 "mca_mp0", 85 "mca_mp1", 86 "mca_mpio", 87 "mca_iohc", 88 }; 89 90 struct amdgpu_ras_block_list { 91 /* ras block link */ 92 struct list_head node; 93 94 struct amdgpu_ras_block_object *ras_obj; 95 }; 96 97 const char *get_ras_block_str(struct ras_common_if *ras_block) 98 { 99 if (!ras_block) 100 return "NULL"; 101 102 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT || 103 ras_block->block >= ARRAY_SIZE(ras_block_string)) 104 return "OUT OF RANGE"; 105 106 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) 107 return ras_mca_block_string[ras_block->sub_block_index]; 108 109 return ras_block_string[ras_block->block]; 110 } 111 112 #define ras_block_str(_BLOCK_) \ 113 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range") 114 115 #define ras_err_str(i) (ras_error_string[ffs(i)]) 116 117 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) 118 119 /* inject address is 52 bits */ 120 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) 121 122 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ 123 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) 124 125 #define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms 126 127 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms 128 129 #define MAX_FLUSH_RETIRE_DWORK_TIMES 100 130 131 #define BYPASS_ALLOCATED_ADDRESS 0x0 132 #define BYPASS_INITIALIZATION_ADDRESS 0x1 133 134 enum amdgpu_ras_retire_page_reservation { 135 AMDGPU_RAS_RETIRE_PAGE_RESERVED, 136 AMDGPU_RAS_RETIRE_PAGE_PENDING, 137 AMDGPU_RAS_RETIRE_PAGE_FAULT, 138 }; 139 140 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); 141 142 static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 143 uint64_t addr); 144 static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 145 uint64_t addr); 146 147 static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev); 148 static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev); 149 150 #ifdef CONFIG_X86_MCE_AMD 151 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); 152 struct mce_notifier_adev_list { 153 struct amdgpu_device *devs[MAX_GPU_INSTANCE]; 154 int num_gpu; 155 }; 156 static struct mce_notifier_adev_list mce_adev_list; 157 #endif 158 159 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) 160 { 161 if (adev && amdgpu_ras_get_context(adev)) 162 amdgpu_ras_get_context(adev)->error_query_ready = ready; 163 } 164 165 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) 166 { 167 if (adev && amdgpu_ras_get_context(adev)) 168 return amdgpu_ras_get_context(adev)->error_query_ready; 169 170 return false; 171 } 172 173 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) 174 { 175 struct ras_err_data err_data; 176 struct eeprom_table_record err_rec; 177 int ret; 178 179 ret = amdgpu_ras_check_bad_page(adev, address); 180 if (ret == -EINVAL) { 181 dev_warn(adev->dev, 182 "RAS WARN: input address 0x%llx is invalid.\n", 183 address); 184 return -EINVAL; 185 } else if (ret == 1) { 186 dev_warn(adev->dev, 187 "RAS WARN: 0x%llx has already been marked as bad page!\n", 188 address); 189 return 0; 190 } 191 192 ret = amdgpu_ras_error_data_init(&err_data); 193 if (ret) 194 return ret; 195 196 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); 197 err_data.err_addr = &err_rec; 198 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0); 199 200 if (amdgpu_bad_page_threshold != 0) { 201 amdgpu_ras_add_bad_pages(adev, err_data.err_addr, 202 err_data.err_addr_cnt, false); 203 amdgpu_ras_save_bad_pages(adev, NULL); 204 } 205 206 amdgpu_ras_error_data_fini(&err_data); 207 208 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n"); 209 dev_warn(adev->dev, "Clear EEPROM:\n"); 210 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n"); 211 212 return 0; 213 } 214 215 static int amdgpu_check_address_validity(struct amdgpu_device *adev, 216 uint64_t address, uint64_t flags) 217 { 218 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 219 struct amdgpu_vram_block_info blk_info; 220 uint64_t page_pfns[32] = {0}; 221 int i, ret, count; 222 223 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) 224 return 0; 225 226 if ((address >= adev->gmc.mc_vram_size) || 227 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) 228 return -EFAULT; 229 230 count = amdgpu_umc_lookup_bad_pages_in_a_row(adev, 231 address, page_pfns, ARRAY_SIZE(page_pfns)); 232 if (count <= 0) 233 return -EPERM; 234 235 for (i = 0; i < count; i++) { 236 memset(&blk_info, 0, sizeof(blk_info)); 237 ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr, 238 page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info); 239 if (!ret) { 240 /* The input address that needs to be checked is allocated by 241 * current calling process, so it is necessary to exclude 242 * the calling process. 243 */ 244 if ((flags == BYPASS_ALLOCATED_ADDRESS) && 245 ((blk_info.task.pid != task_pid_nr(current)) || 246 strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN))) 247 return -EACCES; 248 else if ((flags == BYPASS_INITIALIZATION_ADDRESS) && 249 (blk_info.task.pid == con->init_task_pid) && 250 !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN)) 251 return -EACCES; 252 } 253 } 254 255 return 0; 256 } 257 258 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, 259 size_t size, loff_t *pos) 260 { 261 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; 262 struct ras_query_if info = { 263 .head = obj->head, 264 }; 265 ssize_t s; 266 char val[128]; 267 268 if (amdgpu_ras_query_error_status(obj->adev, &info)) 269 return -EINVAL; 270 271 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ 272 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 273 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 274 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 275 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 276 } 277 278 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", 279 "ue", info.ue_count, 280 "ce", info.ce_count); 281 if (*pos >= s) 282 return 0; 283 284 s -= *pos; 285 s = min_t(u64, s, size); 286 287 288 if (copy_to_user(buf, &val[*pos], s)) 289 return -EINVAL; 290 291 *pos += s; 292 293 return s; 294 } 295 296 static const struct file_operations amdgpu_ras_debugfs_ops = { 297 .owner = THIS_MODULE, 298 .read = amdgpu_ras_debugfs_read, 299 .write = NULL, 300 .llseek = default_llseek 301 }; 302 303 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) 304 { 305 int i; 306 307 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { 308 *block_id = i; 309 if (strcmp(name, ras_block_string[i]) == 0) 310 return 0; 311 } 312 return -EINVAL; 313 } 314 315 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, 316 const char __user *buf, size_t size, 317 loff_t *pos, struct ras_debug_if *data) 318 { 319 ssize_t s = min_t(u64, 64, size); 320 char str[65]; 321 char block_name[33]; 322 char err[9] = "ue"; 323 int op = -1; 324 int block_id; 325 uint32_t sub_block; 326 u64 address, value; 327 /* default value is 0 if the mask is not set by user */ 328 u32 instance_mask = 0; 329 330 if (*pos) 331 return -EINVAL; 332 *pos = size; 333 334 memset(str, 0, sizeof(str)); 335 memset(data, 0, sizeof(*data)); 336 337 if (copy_from_user(str, buf, s)) 338 return -EINVAL; 339 340 if (sscanf(str, "disable %32s", block_name) == 1) 341 op = 0; 342 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2) 343 op = 1; 344 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) 345 op = 2; 346 else if (strstr(str, "retire_page") != NULL) 347 op = 3; 348 else if (strstr(str, "check_address") != NULL) 349 op = 4; 350 else if (str[0] && str[1] && str[2] && str[3]) 351 /* ascii string, but commands are not matched. */ 352 return -EINVAL; 353 354 if (op != -1) { 355 if (op == 3) { 356 if (sscanf(str, "%*s 0x%llx", &address) != 1 && 357 sscanf(str, "%*s %llu", &address) != 1) 358 return -EINVAL; 359 360 data->op = op; 361 data->inject.address = address; 362 363 return 0; 364 } else if (op == 4) { 365 if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 && 366 sscanf(str, "%*s %llu %llu", &address, &value) != 2) 367 return -EINVAL; 368 369 data->op = op; 370 data->inject.address = address; 371 data->inject.value = value; 372 return 0; 373 } 374 375 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) 376 return -EINVAL; 377 378 data->head.block = block_id; 379 /* only ue, ce and poison errors are supported */ 380 if (!memcmp("ue", err, 2)) 381 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 382 else if (!memcmp("ce", err, 2)) 383 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; 384 else if (!memcmp("poison", err, 6)) 385 data->head.type = AMDGPU_RAS_ERROR__POISON; 386 else 387 return -EINVAL; 388 389 data->op = op; 390 391 if (op == 2) { 392 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x", 393 &sub_block, &address, &value, &instance_mask) != 4 && 394 sscanf(str, "%*s %*s %*s %u %llu %llu %u", 395 &sub_block, &address, &value, &instance_mask) != 4 && 396 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", 397 &sub_block, &address, &value) != 3 && 398 sscanf(str, "%*s %*s %*s %u %llu %llu", 399 &sub_block, &address, &value) != 3) 400 return -EINVAL; 401 data->head.sub_block_index = sub_block; 402 data->inject.address = address; 403 data->inject.value = value; 404 data->inject.instance_mask = instance_mask; 405 } 406 } else { 407 if (size < sizeof(*data)) 408 return -EINVAL; 409 410 if (copy_from_user(data, buf, sizeof(*data))) 411 return -EINVAL; 412 } 413 414 return 0; 415 } 416 417 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev, 418 struct ras_debug_if *data) 419 { 420 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; 421 uint32_t mask, inst_mask = data->inject.instance_mask; 422 423 /* no need to set instance mask if there is only one instance */ 424 if (num_xcc <= 1 && inst_mask) { 425 data->inject.instance_mask = 0; 426 dev_dbg(adev->dev, 427 "RAS inject mask(0x%x) isn't supported and force it to 0.\n", 428 inst_mask); 429 430 return; 431 } 432 433 switch (data->head.block) { 434 case AMDGPU_RAS_BLOCK__GFX: 435 mask = GENMASK(num_xcc - 1, 0); 436 break; 437 case AMDGPU_RAS_BLOCK__SDMA: 438 mask = GENMASK(adev->sdma.num_instances - 1, 0); 439 break; 440 case AMDGPU_RAS_BLOCK__VCN: 441 case AMDGPU_RAS_BLOCK__JPEG: 442 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0); 443 break; 444 default: 445 mask = inst_mask; 446 break; 447 } 448 449 /* remove invalid bits in instance mask */ 450 data->inject.instance_mask &= mask; 451 if (inst_mask != data->inject.instance_mask) 452 dev_dbg(adev->dev, 453 "Adjust RAS inject mask 0x%x to 0x%x\n", 454 inst_mask, data->inject.instance_mask); 455 } 456 457 /** 458 * DOC: AMDGPU RAS debugfs control interface 459 * 460 * The control interface accepts struct ras_debug_if which has two members. 461 * 462 * First member: ras_debug_if::head or ras_debug_if::inject. 463 * 464 * head is used to indicate which IP block will be under control. 465 * 466 * head has four members, they are block, type, sub_block_index, name. 467 * block: which IP will be under control. 468 * type: what kind of error will be enabled/disabled/injected. 469 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. 470 * name: the name of IP. 471 * 472 * inject has three more members than head, they are address, value and mask. 473 * As their names indicate, inject operation will write the 474 * value to the address. 475 * 476 * The second member: struct ras_debug_if::op. 477 * It has three kinds of operations. 478 * 479 * - 0: disable RAS on the block. Take ::head as its data. 480 * - 1: enable RAS on the block. Take ::head as its data. 481 * - 2: inject errors on the block. Take ::inject as its data. 482 * 483 * How to use the interface? 484 * 485 * In a program 486 * 487 * Copy the struct ras_debug_if in your code and initialize it. 488 * Write the struct to the control interface. 489 * 490 * From shell 491 * 492 * .. code-block:: bash 493 * 494 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 495 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 496 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 497 * 498 * Where N, is the card which you want to affect. 499 * 500 * "disable" requires only the block. 501 * "enable" requires the block and error type. 502 * "inject" requires the block, error type, address, and value. 503 * 504 * The block is one of: umc, sdma, gfx, etc. 505 * see ras_block_string[] for details 506 * 507 * The error type is one of: ue, ce and poison where, 508 * ue is multi-uncorrectable 509 * ce is single-correctable 510 * poison is poison 511 * 512 * The sub-block is a the sub-block index, pass 0 if there is no sub-block. 513 * The address and value are hexadecimal numbers, leading 0x is optional. 514 * The mask means instance mask, is optional, default value is 0x1. 515 * 516 * For instance, 517 * 518 * .. code-block:: bash 519 * 520 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 521 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl 522 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl 523 * 524 * How to check the result of the operation? 525 * 526 * To check disable/enable, see "ras" features at, 527 * /sys/class/drm/card[0/1/2...]/device/ras/features 528 * 529 * To check inject, see the corresponding error count at, 530 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count 531 * 532 * .. note:: 533 * Operations are only allowed on blocks which are supported. 534 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask 535 * to see which blocks support RAS on a particular asic. 536 * 537 */ 538 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, 539 const char __user *buf, 540 size_t size, loff_t *pos) 541 { 542 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 543 struct ras_debug_if data; 544 int ret = 0; 545 546 if (!amdgpu_ras_get_error_query_ready(adev)) { 547 dev_warn(adev->dev, "RAS WARN: error injection " 548 "currently inaccessible\n"); 549 return size; 550 } 551 552 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); 553 if (ret) 554 return ret; 555 556 if (data.op == 3) { 557 ret = amdgpu_reserve_page_direct(adev, data.inject.address); 558 if (!ret) 559 return size; 560 else 561 return ret; 562 } else if (data.op == 4) { 563 ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value); 564 return ret ? ret : size; 565 } 566 567 if (!amdgpu_ras_is_supported(adev, data.head.block)) 568 return -EINVAL; 569 570 switch (data.op) { 571 case 0: 572 ret = amdgpu_ras_feature_enable(adev, &data.head, 0); 573 break; 574 case 1: 575 ret = amdgpu_ras_feature_enable(adev, &data.head, 1); 576 break; 577 case 2: 578 /* umc ce/ue error injection for a bad page is not allowed */ 579 if (data.head.block == AMDGPU_RAS_BLOCK__UMC) 580 ret = amdgpu_ras_check_bad_page(adev, data.inject.address); 581 if (ret == -EINVAL) { 582 dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.", 583 data.inject.address); 584 break; 585 } else if (ret == 1) { 586 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n", 587 data.inject.address); 588 break; 589 } 590 591 amdgpu_ras_instance_mask_check(adev, &data); 592 593 /* data.inject.address is offset instead of absolute gpu address */ 594 ret = amdgpu_ras_error_inject(adev, &data.inject); 595 break; 596 default: 597 ret = -EINVAL; 598 break; 599 } 600 601 if (ret) 602 return ret; 603 604 return size; 605 } 606 607 /** 608 * DOC: AMDGPU RAS debugfs EEPROM table reset interface 609 * 610 * Some boards contain an EEPROM which is used to persistently store a list of 611 * bad pages which experiences ECC errors in vram. This interface provides 612 * a way to reset the EEPROM, e.g., after testing error injection. 613 * 614 * Usage: 615 * 616 * .. code-block:: bash 617 * 618 * echo 1 > ../ras/ras_eeprom_reset 619 * 620 * will reset EEPROM table to 0 entries. 621 * 622 */ 623 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, 624 const char __user *buf, 625 size_t size, loff_t *pos) 626 { 627 struct amdgpu_device *adev = 628 (struct amdgpu_device *)file_inode(f)->i_private; 629 int ret; 630 631 ret = amdgpu_ras_eeprom_reset_table( 632 &(amdgpu_ras_get_context(adev)->eeprom_control)); 633 634 if (!ret) { 635 /* Something was written to EEPROM. 636 */ 637 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; 638 return size; 639 } else { 640 return ret; 641 } 642 } 643 644 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { 645 .owner = THIS_MODULE, 646 .read = NULL, 647 .write = amdgpu_ras_debugfs_ctrl_write, 648 .llseek = default_llseek 649 }; 650 651 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { 652 .owner = THIS_MODULE, 653 .read = NULL, 654 .write = amdgpu_ras_debugfs_eeprom_write, 655 .llseek = default_llseek 656 }; 657 658 /** 659 * DOC: AMDGPU RAS sysfs Error Count Interface 660 * 661 * It allows the user to read the error count for each IP block on the gpu through 662 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count 663 * 664 * It outputs the multiple lines which report the uncorrected (ue) and corrected 665 * (ce) error counts. 666 * 667 * The format of one line is below, 668 * 669 * [ce|ue]: count 670 * 671 * Example: 672 * 673 * .. code-block:: bash 674 * 675 * ue: 0 676 * ce: 1 677 * 678 */ 679 static ssize_t amdgpu_ras_sysfs_read(struct device *dev, 680 struct device_attribute *attr, char *buf) 681 { 682 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); 683 struct ras_query_if info = { 684 .head = obj->head, 685 }; 686 687 if (!amdgpu_ras_get_error_query_ready(obj->adev)) 688 return sysfs_emit(buf, "Query currently inaccessible\n"); 689 690 if (amdgpu_ras_query_error_status(obj->adev, &info)) 691 return -EINVAL; 692 693 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 694 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 695 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 696 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 697 } 698 699 if (info.head.block == AMDGPU_RAS_BLOCK__UMC) 700 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count, 701 "ce", info.ce_count, "de", info.de_count); 702 else 703 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, 704 "ce", info.ce_count); 705 } 706 707 /* obj begin */ 708 709 #define get_obj(obj) do { (obj)->use++; } while (0) 710 #define alive_obj(obj) ((obj)->use) 711 712 static inline void put_obj(struct ras_manager *obj) 713 { 714 if (obj && (--obj->use == 0)) { 715 list_del(&obj->node); 716 amdgpu_ras_error_data_fini(&obj->err_data); 717 } 718 719 if (obj && (obj->use < 0)) 720 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); 721 } 722 723 /* make one obj and return it. */ 724 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, 725 struct ras_common_if *head) 726 { 727 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 728 struct ras_manager *obj; 729 730 if (!adev->ras_enabled || !con) 731 return NULL; 732 733 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 734 return NULL; 735 736 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 737 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 738 return NULL; 739 740 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 741 } else 742 obj = &con->objs[head->block]; 743 744 /* already exist. return obj? */ 745 if (alive_obj(obj)) 746 return NULL; 747 748 if (amdgpu_ras_error_data_init(&obj->err_data)) 749 return NULL; 750 751 obj->head = *head; 752 obj->adev = adev; 753 list_add(&obj->node, &con->head); 754 get_obj(obj); 755 756 return obj; 757 } 758 759 /* return an obj equal to head, or the first when head is NULL */ 760 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, 761 struct ras_common_if *head) 762 { 763 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 764 struct ras_manager *obj; 765 int i; 766 767 if (!adev->ras_enabled || !con) 768 return NULL; 769 770 if (head) { 771 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 772 return NULL; 773 774 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 775 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 776 return NULL; 777 778 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 779 } else 780 obj = &con->objs[head->block]; 781 782 if (alive_obj(obj)) 783 return obj; 784 } else { 785 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 786 obj = &con->objs[i]; 787 if (alive_obj(obj)) 788 return obj; 789 } 790 } 791 792 return NULL; 793 } 794 /* obj end */ 795 796 /* feature ctl begin */ 797 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, 798 struct ras_common_if *head) 799 { 800 return adev->ras_hw_enabled & BIT(head->block); 801 } 802 803 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, 804 struct ras_common_if *head) 805 { 806 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 807 808 return con->features & BIT(head->block); 809 } 810 811 /* 812 * if obj is not created, then create one. 813 * set feature enable flag. 814 */ 815 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, 816 struct ras_common_if *head, int enable) 817 { 818 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 819 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 820 821 /* If hardware does not support ras, then do not create obj. 822 * But if hardware support ras, we can create the obj. 823 * Ras framework checks con->hw_supported to see if it need do 824 * corresponding initialization. 825 * IP checks con->support to see if it need disable ras. 826 */ 827 if (!amdgpu_ras_is_feature_allowed(adev, head)) 828 return 0; 829 830 if (enable) { 831 if (!obj) { 832 obj = amdgpu_ras_create_obj(adev, head); 833 if (!obj) 834 return -EINVAL; 835 } else { 836 /* In case we create obj somewhere else */ 837 get_obj(obj); 838 } 839 con->features |= BIT(head->block); 840 } else { 841 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { 842 con->features &= ~BIT(head->block); 843 put_obj(obj); 844 } 845 } 846 847 return 0; 848 } 849 850 /* wrapper of psp_ras_enable_features */ 851 int amdgpu_ras_feature_enable(struct amdgpu_device *adev, 852 struct ras_common_if *head, bool enable) 853 { 854 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 855 union ta_ras_cmd_input *info; 856 int ret; 857 858 if (!con) 859 return -EINVAL; 860 861 /* For non-gfx ip, do not enable ras feature if it is not allowed */ 862 /* For gfx ip, regardless of feature support status, */ 863 /* Force issue enable or disable ras feature commands */ 864 if (head->block != AMDGPU_RAS_BLOCK__GFX && 865 !amdgpu_ras_is_feature_allowed(adev, head)) 866 return 0; 867 868 /* Only enable gfx ras feature from host side */ 869 if (head->block == AMDGPU_RAS_BLOCK__GFX && 870 !amdgpu_sriov_vf(adev) && 871 !amdgpu_ras_intr_triggered()) { 872 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); 873 if (!info) 874 return -ENOMEM; 875 876 if (!enable) { 877 info->disable_features = (struct ta_ras_disable_features_input) { 878 .block_id = amdgpu_ras_block_to_ta(head->block), 879 .error_type = amdgpu_ras_error_to_ta(head->type), 880 }; 881 } else { 882 info->enable_features = (struct ta_ras_enable_features_input) { 883 .block_id = amdgpu_ras_block_to_ta(head->block), 884 .error_type = amdgpu_ras_error_to_ta(head->type), 885 }; 886 } 887 888 ret = psp_ras_enable_features(&adev->psp, info, enable); 889 if (ret) { 890 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n", 891 enable ? "enable":"disable", 892 get_ras_block_str(head), 893 amdgpu_ras_is_poison_mode_supported(adev), ret); 894 kfree(info); 895 return ret; 896 } 897 898 kfree(info); 899 } 900 901 /* setup the obj */ 902 __amdgpu_ras_feature_enable(adev, head, enable); 903 904 return 0; 905 } 906 907 /* Only used in device probe stage and called only once. */ 908 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, 909 struct ras_common_if *head, bool enable) 910 { 911 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 912 int ret; 913 914 if (!con) 915 return -EINVAL; 916 917 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 918 if (enable) { 919 /* There is no harm to issue a ras TA cmd regardless of 920 * the currecnt ras state. 921 * If current state == target state, it will do nothing 922 * But sometimes it requests driver to reset and repost 923 * with error code -EAGAIN. 924 */ 925 ret = amdgpu_ras_feature_enable(adev, head, 1); 926 /* With old ras TA, we might fail to enable ras. 927 * Log it and just setup the object. 928 * TODO need remove this WA in the future. 929 */ 930 if (ret == -EINVAL) { 931 ret = __amdgpu_ras_feature_enable(adev, head, 1); 932 if (!ret) 933 dev_info(adev->dev, 934 "RAS INFO: %s setup object\n", 935 get_ras_block_str(head)); 936 } 937 } else { 938 /* setup the object then issue a ras TA disable cmd.*/ 939 ret = __amdgpu_ras_feature_enable(adev, head, 1); 940 if (ret) 941 return ret; 942 943 /* gfx block ras disable cmd must send to ras-ta */ 944 if (head->block == AMDGPU_RAS_BLOCK__GFX) 945 con->features |= BIT(head->block); 946 947 ret = amdgpu_ras_feature_enable(adev, head, 0); 948 949 /* clean gfx block ras features flag */ 950 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) 951 con->features &= ~BIT(head->block); 952 } 953 } else 954 ret = amdgpu_ras_feature_enable(adev, head, enable); 955 956 return ret; 957 } 958 959 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, 960 bool bypass) 961 { 962 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 963 struct ras_manager *obj, *tmp; 964 965 list_for_each_entry_safe(obj, tmp, &con->head, node) { 966 /* bypass psp. 967 * aka just release the obj and corresponding flags 968 */ 969 if (bypass) { 970 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) 971 break; 972 } else { 973 if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) 974 break; 975 } 976 } 977 978 return con->features; 979 } 980 981 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, 982 bool bypass) 983 { 984 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 985 int i; 986 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; 987 988 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { 989 struct ras_common_if head = { 990 .block = i, 991 .type = default_ras_type, 992 .sub_block_index = 0, 993 }; 994 995 if (i == AMDGPU_RAS_BLOCK__MCA) 996 continue; 997 998 if (bypass) { 999 /* 1000 * bypass psp. vbios enable ras for us. 1001 * so just create the obj 1002 */ 1003 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 1004 break; 1005 } else { 1006 if (amdgpu_ras_feature_enable(adev, &head, 1)) 1007 break; 1008 } 1009 } 1010 1011 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 1012 struct ras_common_if head = { 1013 .block = AMDGPU_RAS_BLOCK__MCA, 1014 .type = default_ras_type, 1015 .sub_block_index = i, 1016 }; 1017 1018 if (bypass) { 1019 /* 1020 * bypass psp. vbios enable ras for us. 1021 * so just create the obj 1022 */ 1023 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 1024 break; 1025 } else { 1026 if (amdgpu_ras_feature_enable(adev, &head, 1)) 1027 break; 1028 } 1029 } 1030 1031 return con->features; 1032 } 1033 /* feature ctl end */ 1034 1035 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj, 1036 enum amdgpu_ras_block block) 1037 { 1038 if (!block_obj) 1039 return -EINVAL; 1040 1041 if (block_obj->ras_comm.block == block) 1042 return 0; 1043 1044 return -EINVAL; 1045 } 1046 1047 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev, 1048 enum amdgpu_ras_block block, uint32_t sub_block_index) 1049 { 1050 struct amdgpu_ras_block_list *node, *tmp; 1051 struct amdgpu_ras_block_object *obj; 1052 1053 if (block >= AMDGPU_RAS_BLOCK__LAST) 1054 return NULL; 1055 1056 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 1057 if (!node->ras_obj) { 1058 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 1059 continue; 1060 } 1061 1062 obj = node->ras_obj; 1063 if (obj->ras_block_match) { 1064 if (obj->ras_block_match(obj, block, sub_block_index) == 0) 1065 return obj; 1066 } else { 1067 if (amdgpu_ras_block_match_default(obj, block) == 0) 1068 return obj; 1069 } 1070 } 1071 1072 return NULL; 1073 } 1074 1075 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) 1076 { 1077 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1078 int ret = 0; 1079 1080 /* 1081 * choosing right query method according to 1082 * whether smu support query error information 1083 */ 1084 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); 1085 if (ret == -EOPNOTSUPP) { 1086 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 1087 adev->umc.ras->ras_block.hw_ops->query_ras_error_count) 1088 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); 1089 1090 /* umc query_ras_error_address is also responsible for clearing 1091 * error status 1092 */ 1093 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 1094 adev->umc.ras->ras_block.hw_ops->query_ras_error_address) 1095 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data); 1096 } else if (!ret) { 1097 if (adev->umc.ras && 1098 adev->umc.ras->ecc_info_query_ras_error_count) 1099 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data); 1100 1101 if (adev->umc.ras && 1102 adev->umc.ras->ecc_info_query_ras_error_address) 1103 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data); 1104 } 1105 } 1106 1107 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, 1108 struct ras_manager *ras_mgr, 1109 struct ras_err_data *err_data, 1110 struct ras_query_context *qctx, 1111 const char *blk_name, 1112 bool is_ue, 1113 bool is_de) 1114 { 1115 struct amdgpu_smuio_mcm_config_info *mcm_info; 1116 struct ras_err_node *err_node; 1117 struct ras_err_info *err_info; 1118 u64 event_id = qctx->evid.event_id; 1119 1120 if (is_ue) { 1121 for_each_ras_error(err_node, err_data) { 1122 err_info = &err_node->err_info; 1123 mcm_info = &err_info->mcm_info; 1124 if (err_info->ue_count) { 1125 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1126 "%lld new uncorrectable hardware errors detected in %s block\n", 1127 mcm_info->socket_id, 1128 mcm_info->die_id, 1129 err_info->ue_count, 1130 blk_name); 1131 } 1132 } 1133 1134 for_each_ras_error(err_node, &ras_mgr->err_data) { 1135 err_info = &err_node->err_info; 1136 mcm_info = &err_info->mcm_info; 1137 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1138 "%lld uncorrectable hardware errors detected in total in %s block\n", 1139 mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); 1140 } 1141 1142 } else { 1143 if (is_de) { 1144 for_each_ras_error(err_node, err_data) { 1145 err_info = &err_node->err_info; 1146 mcm_info = &err_info->mcm_info; 1147 if (err_info->de_count) { 1148 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1149 "%lld new deferred hardware errors detected in %s block\n", 1150 mcm_info->socket_id, 1151 mcm_info->die_id, 1152 err_info->de_count, 1153 blk_name); 1154 } 1155 } 1156 1157 for_each_ras_error(err_node, &ras_mgr->err_data) { 1158 err_info = &err_node->err_info; 1159 mcm_info = &err_info->mcm_info; 1160 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1161 "%lld deferred hardware errors detected in total in %s block\n", 1162 mcm_info->socket_id, mcm_info->die_id, 1163 err_info->de_count, blk_name); 1164 } 1165 } else { 1166 if (adev->debug_disable_ce_logs) 1167 return; 1168 1169 for_each_ras_error(err_node, err_data) { 1170 err_info = &err_node->err_info; 1171 mcm_info = &err_info->mcm_info; 1172 if (err_info->ce_count) { 1173 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1174 "%lld new correctable hardware errors detected in %s block\n", 1175 mcm_info->socket_id, 1176 mcm_info->die_id, 1177 err_info->ce_count, 1178 blk_name); 1179 } 1180 } 1181 1182 for_each_ras_error(err_node, &ras_mgr->err_data) { 1183 err_info = &err_node->err_info; 1184 mcm_info = &err_info->mcm_info; 1185 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1186 "%lld correctable hardware errors detected in total in %s block\n", 1187 mcm_info->socket_id, mcm_info->die_id, 1188 err_info->ce_count, blk_name); 1189 } 1190 } 1191 } 1192 } 1193 1194 static inline bool err_data_has_source_info(struct ras_err_data *data) 1195 { 1196 return !list_empty(&data->err_node_list); 1197 } 1198 1199 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, 1200 struct ras_query_if *query_if, 1201 struct ras_err_data *err_data, 1202 struct ras_query_context *qctx) 1203 { 1204 struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head); 1205 const char *blk_name = get_ras_block_str(&query_if->head); 1206 u64 event_id = qctx->evid.event_id; 1207 1208 if (err_data->ce_count) { 1209 if (err_data_has_source_info(err_data)) { 1210 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, 1211 blk_name, false, false); 1212 } else if (!adev->aid_mask && 1213 adev->smuio.funcs && 1214 adev->smuio.funcs->get_socket_id && 1215 adev->smuio.funcs->get_die_id) { 1216 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " 1217 "%ld correctable hardware errors " 1218 "detected in %s block\n", 1219 adev->smuio.funcs->get_socket_id(adev), 1220 adev->smuio.funcs->get_die_id(adev), 1221 ras_mgr->err_data.ce_count, 1222 blk_name); 1223 } else { 1224 RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors " 1225 "detected in %s block\n", 1226 ras_mgr->err_data.ce_count, 1227 blk_name); 1228 } 1229 } 1230 1231 if (err_data->ue_count) { 1232 if (err_data_has_source_info(err_data)) { 1233 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, 1234 blk_name, true, false); 1235 } else if (!adev->aid_mask && 1236 adev->smuio.funcs && 1237 adev->smuio.funcs->get_socket_id && 1238 adev->smuio.funcs->get_die_id) { 1239 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " 1240 "%ld uncorrectable hardware errors " 1241 "detected in %s block\n", 1242 adev->smuio.funcs->get_socket_id(adev), 1243 adev->smuio.funcs->get_die_id(adev), 1244 ras_mgr->err_data.ue_count, 1245 blk_name); 1246 } else { 1247 RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors " 1248 "detected in %s block\n", 1249 ras_mgr->err_data.ue_count, 1250 blk_name); 1251 } 1252 } 1253 1254 if (err_data->de_count) { 1255 if (err_data_has_source_info(err_data)) { 1256 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, 1257 blk_name, false, true); 1258 } else if (!adev->aid_mask && 1259 adev->smuio.funcs && 1260 adev->smuio.funcs->get_socket_id && 1261 adev->smuio.funcs->get_die_id) { 1262 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " 1263 "%ld deferred hardware errors " 1264 "detected in %s block\n", 1265 adev->smuio.funcs->get_socket_id(adev), 1266 adev->smuio.funcs->get_die_id(adev), 1267 ras_mgr->err_data.de_count, 1268 blk_name); 1269 } else { 1270 RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors " 1271 "detected in %s block\n", 1272 ras_mgr->err_data.de_count, 1273 blk_name); 1274 } 1275 } 1276 } 1277 1278 static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev, 1279 struct ras_query_if *query_if, 1280 struct ras_err_data *err_data, 1281 struct ras_query_context *qctx) 1282 { 1283 unsigned long new_ue, new_ce, new_de; 1284 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head); 1285 const char *blk_name = get_ras_block_str(&query_if->head); 1286 u64 event_id = qctx->evid.event_id; 1287 1288 new_ce = err_data->ce_count - obj->err_data.ce_count; 1289 new_ue = err_data->ue_count - obj->err_data.ue_count; 1290 new_de = err_data->de_count - obj->err_data.de_count; 1291 1292 if (new_ce) { 1293 RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors " 1294 "detected in %s block\n", 1295 new_ce, 1296 blk_name); 1297 } 1298 1299 if (new_ue) { 1300 RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors " 1301 "detected in %s block\n", 1302 new_ue, 1303 blk_name); 1304 } 1305 1306 if (new_de) { 1307 RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors " 1308 "detected in %s block\n", 1309 new_de, 1310 blk_name); 1311 } 1312 } 1313 1314 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) 1315 { 1316 struct ras_err_node *err_node; 1317 struct ras_err_info *err_info; 1318 1319 if (err_data_has_source_info(err_data)) { 1320 for_each_ras_error(err_node, err_data) { 1321 err_info = &err_node->err_info; 1322 amdgpu_ras_error_statistic_de_count(&obj->err_data, 1323 &err_info->mcm_info, err_info->de_count); 1324 amdgpu_ras_error_statistic_ce_count(&obj->err_data, 1325 &err_info->mcm_info, err_info->ce_count); 1326 amdgpu_ras_error_statistic_ue_count(&obj->err_data, 1327 &err_info->mcm_info, err_info->ue_count); 1328 } 1329 } else { 1330 /* for legacy asic path which doesn't has error source info */ 1331 obj->err_data.ue_count += err_data->ue_count; 1332 obj->err_data.ce_count += err_data->ce_count; 1333 obj->err_data.de_count += err_data->de_count; 1334 } 1335 } 1336 1337 static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj, 1338 struct ras_err_data *err_data) 1339 { 1340 /* Host reports absolute counts */ 1341 obj->err_data.ue_count = err_data->ue_count; 1342 obj->err_data.ce_count = err_data->ce_count; 1343 obj->err_data.de_count = err_data->de_count; 1344 } 1345 1346 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk) 1347 { 1348 struct ras_common_if head; 1349 1350 memset(&head, 0, sizeof(head)); 1351 head.block = blk; 1352 1353 return amdgpu_ras_find_obj(adev, &head); 1354 } 1355 1356 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 1357 const struct aca_info *aca_info, void *data) 1358 { 1359 struct ras_manager *obj; 1360 1361 /* in resume phase, no need to create aca fs node */ 1362 if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) 1363 return 0; 1364 1365 obj = get_ras_manager(adev, blk); 1366 if (!obj) 1367 return -EINVAL; 1368 1369 return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data); 1370 } 1371 1372 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk) 1373 { 1374 struct ras_manager *obj; 1375 1376 obj = get_ras_manager(adev, blk); 1377 if (!obj) 1378 return -EINVAL; 1379 1380 amdgpu_aca_remove_handle(&obj->aca_handle); 1381 1382 return 0; 1383 } 1384 1385 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 1386 enum aca_error_type type, struct ras_err_data *err_data, 1387 struct ras_query_context *qctx) 1388 { 1389 struct ras_manager *obj; 1390 1391 obj = get_ras_manager(adev, blk); 1392 if (!obj) 1393 return -EINVAL; 1394 1395 return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx); 1396 } 1397 1398 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr, 1399 struct aca_handle *handle, char *buf, void *data) 1400 { 1401 struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle); 1402 struct ras_query_if info = { 1403 .head = obj->head, 1404 }; 1405 1406 if (!amdgpu_ras_get_error_query_ready(obj->adev)) 1407 return sysfs_emit(buf, "Query currently inaccessible\n"); 1408 1409 if (amdgpu_ras_query_error_status(obj->adev, &info)) 1410 return -EINVAL; 1411 1412 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count, 1413 "ce", info.ce_count, "de", info.de_count); 1414 } 1415 1416 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, 1417 struct ras_query_if *info, 1418 struct ras_err_data *err_data, 1419 struct ras_query_context *qctx, 1420 unsigned int error_query_mode) 1421 { 1422 enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT; 1423 struct amdgpu_ras_block_object *block_obj = NULL; 1424 int ret; 1425 1426 if (blk == AMDGPU_RAS_BLOCK_COUNT) 1427 return -EINVAL; 1428 1429 if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY) 1430 return -EINVAL; 1431 1432 if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) { 1433 return amdgpu_virt_req_ras_err_count(adev, blk, err_data); 1434 } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { 1435 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { 1436 amdgpu_ras_get_ecc_info(adev, err_data); 1437 } else { 1438 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); 1439 if (!block_obj || !block_obj->hw_ops) { 1440 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1441 get_ras_block_str(&info->head)); 1442 return -EINVAL; 1443 } 1444 1445 if (block_obj->hw_ops->query_ras_error_count) 1446 block_obj->hw_ops->query_ras_error_count(adev, err_data); 1447 1448 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || 1449 (info->head.block == AMDGPU_RAS_BLOCK__GFX) || 1450 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { 1451 if (block_obj->hw_ops->query_ras_error_status) 1452 block_obj->hw_ops->query_ras_error_status(adev); 1453 } 1454 } 1455 } else { 1456 if (amdgpu_aca_is_enabled(adev)) { 1457 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx); 1458 if (ret) 1459 return ret; 1460 1461 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx); 1462 if (ret) 1463 return ret; 1464 1465 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx); 1466 if (ret) 1467 return ret; 1468 } else { 1469 /* FIXME: add code to check return value later */ 1470 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx); 1471 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx); 1472 } 1473 } 1474 1475 return 0; 1476 } 1477 1478 /* query/inject/cure begin */ 1479 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev, 1480 struct ras_query_if *info, 1481 enum ras_event_type type) 1482 { 1483 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1484 struct ras_err_data err_data; 1485 struct ras_query_context qctx; 1486 unsigned int error_query_mode; 1487 int ret; 1488 1489 if (!obj) 1490 return -EINVAL; 1491 1492 ret = amdgpu_ras_error_data_init(&err_data); 1493 if (ret) 1494 return ret; 1495 1496 if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) 1497 return -EINVAL; 1498 1499 memset(&qctx, 0, sizeof(qctx)); 1500 qctx.evid.type = type; 1501 qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type); 1502 1503 if (!down_read_trylock(&adev->reset_domain->sem)) { 1504 ret = -EIO; 1505 goto out_fini_err_data; 1506 } 1507 1508 ret = amdgpu_ras_query_error_status_helper(adev, info, 1509 &err_data, 1510 &qctx, 1511 error_query_mode); 1512 up_read(&adev->reset_domain->sem); 1513 if (ret) 1514 goto out_fini_err_data; 1515 1516 if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) { 1517 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); 1518 amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); 1519 } else { 1520 /* Host provides absolute error counts. First generate the report 1521 * using the previous VF internal count against new host count. 1522 * Then Update VF internal count. 1523 */ 1524 amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx); 1525 amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data); 1526 } 1527 1528 info->ue_count = obj->err_data.ue_count; 1529 info->ce_count = obj->err_data.ce_count; 1530 info->de_count = obj->err_data.de_count; 1531 1532 out_fini_err_data: 1533 amdgpu_ras_error_data_fini(&err_data); 1534 1535 return ret; 1536 } 1537 1538 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) 1539 { 1540 return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID); 1541 } 1542 1543 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, 1544 enum amdgpu_ras_block block) 1545 { 1546 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1547 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 1548 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; 1549 1550 if (!block_obj || !block_obj->hw_ops) { 1551 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1552 ras_block_str(block)); 1553 return -EOPNOTSUPP; 1554 } 1555 1556 if (!amdgpu_ras_is_supported(adev, block) || 1557 !amdgpu_ras_get_aca_debug_mode(adev)) 1558 return -EOPNOTSUPP; 1559 1560 if (amdgpu_sriov_vf(adev)) 1561 return -EOPNOTSUPP; 1562 1563 /* skip ras error reset in gpu reset */ 1564 if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) && 1565 ((smu_funcs && smu_funcs->set_debug_mode) || 1566 (mca_funcs && mca_funcs->mca_set_debug_mode))) 1567 return -EOPNOTSUPP; 1568 1569 if (block_obj->hw_ops->reset_ras_error_count) 1570 block_obj->hw_ops->reset_ras_error_count(adev); 1571 1572 return 0; 1573 } 1574 1575 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, 1576 enum amdgpu_ras_block block) 1577 { 1578 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1579 1580 if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP) 1581 return 0; 1582 1583 if ((block == AMDGPU_RAS_BLOCK__GFX) || 1584 (block == AMDGPU_RAS_BLOCK__MMHUB)) { 1585 if (block_obj->hw_ops->reset_ras_error_status) 1586 block_obj->hw_ops->reset_ras_error_status(adev); 1587 } 1588 1589 return 0; 1590 } 1591 1592 /* wrapper of psp_ras_trigger_error */ 1593 int amdgpu_ras_error_inject(struct amdgpu_device *adev, 1594 struct ras_inject_if *info) 1595 { 1596 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1597 struct ta_ras_trigger_error_input block_info = { 1598 .block_id = amdgpu_ras_block_to_ta(info->head.block), 1599 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), 1600 .sub_block_index = info->head.sub_block_index, 1601 .address = info->address, 1602 .value = info->value, 1603 }; 1604 int ret = -EINVAL; 1605 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, 1606 info->head.block, 1607 info->head.sub_block_index); 1608 1609 /* inject on guest isn't allowed, return success directly */ 1610 if (amdgpu_sriov_vf(adev)) 1611 return 0; 1612 1613 if (!obj) 1614 return -EINVAL; 1615 1616 if (!block_obj || !block_obj->hw_ops) { 1617 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1618 get_ras_block_str(&info->head)); 1619 return -EINVAL; 1620 } 1621 1622 /* Calculate XGMI relative offset */ 1623 if (adev->gmc.xgmi.num_physical_nodes > 1 && 1624 info->head.block != AMDGPU_RAS_BLOCK__GFX) { 1625 block_info.address = 1626 amdgpu_xgmi_get_relative_phy_addr(adev, 1627 block_info.address); 1628 } 1629 1630 if (block_obj->hw_ops->ras_error_inject) { 1631 if (info->head.block == AMDGPU_RAS_BLOCK__GFX) 1632 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask); 1633 else /* Special ras_error_inject is defined (e.g: xgmi) */ 1634 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info, 1635 info->instance_mask); 1636 } else { 1637 /* default path */ 1638 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask); 1639 } 1640 1641 if (ret) 1642 dev_err(adev->dev, "ras inject %s failed %d\n", 1643 get_ras_block_str(&info->head), ret); 1644 1645 return ret; 1646 } 1647 1648 /** 1649 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP 1650 * @adev: pointer to AMD GPU device 1651 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1652 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors. 1653 * @query_info: pointer to ras_query_if 1654 * 1655 * Return 0 for query success or do nothing, otherwise return an error 1656 * on failures 1657 */ 1658 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev, 1659 unsigned long *ce_count, 1660 unsigned long *ue_count, 1661 struct ras_query_if *query_info) 1662 { 1663 int ret; 1664 1665 if (!query_info) 1666 /* do nothing if query_info is not specified */ 1667 return 0; 1668 1669 ret = amdgpu_ras_query_error_status(adev, query_info); 1670 if (ret) 1671 return ret; 1672 1673 *ce_count += query_info->ce_count; 1674 *ue_count += query_info->ue_count; 1675 1676 /* some hardware/IP supports read to clear 1677 * no need to explictly reset the err status after the query call */ 1678 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 1679 amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 1680 if (amdgpu_ras_reset_error_status(adev, query_info->head.block)) 1681 dev_warn(adev->dev, 1682 "Failed to reset error counter and error status\n"); 1683 } 1684 1685 return 0; 1686 } 1687 1688 /** 1689 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP 1690 * @adev: pointer to AMD GPU device 1691 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1692 * @ue_count: pointer to an integer to be set to the count of uncorrectible 1693 * errors. 1694 * @query_info: pointer to ras_query_if if the query request is only for 1695 * specific ip block; if info is NULL, then the qurey request is for 1696 * all the ip blocks that support query ras error counters/status 1697 * 1698 * If set, @ce_count or @ue_count, count and return the corresponding 1699 * error counts in those integer pointers. Return 0 if the device 1700 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. 1701 */ 1702 int amdgpu_ras_query_error_count(struct amdgpu_device *adev, 1703 unsigned long *ce_count, 1704 unsigned long *ue_count, 1705 struct ras_query_if *query_info) 1706 { 1707 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1708 struct ras_manager *obj; 1709 unsigned long ce, ue; 1710 int ret; 1711 1712 if (!adev->ras_enabled || !con) 1713 return -EOPNOTSUPP; 1714 1715 /* Don't count since no reporting. 1716 */ 1717 if (!ce_count && !ue_count) 1718 return 0; 1719 1720 ce = 0; 1721 ue = 0; 1722 if (!query_info) { 1723 /* query all the ip blocks that support ras query interface */ 1724 list_for_each_entry(obj, &con->head, node) { 1725 struct ras_query_if info = { 1726 .head = obj->head, 1727 }; 1728 1729 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info); 1730 } 1731 } else { 1732 /* query specific ip block */ 1733 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info); 1734 } 1735 1736 if (ret) 1737 return ret; 1738 1739 if (ce_count) 1740 *ce_count = ce; 1741 1742 if (ue_count) 1743 *ue_count = ue; 1744 1745 return 0; 1746 } 1747 /* query/inject/cure end */ 1748 1749 1750 /* sysfs begin */ 1751 1752 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1753 struct ras_badpage **bps, unsigned int *count); 1754 1755 static char *amdgpu_ras_badpage_flags_str(unsigned int flags) 1756 { 1757 switch (flags) { 1758 case AMDGPU_RAS_RETIRE_PAGE_RESERVED: 1759 return "R"; 1760 case AMDGPU_RAS_RETIRE_PAGE_PENDING: 1761 return "P"; 1762 case AMDGPU_RAS_RETIRE_PAGE_FAULT: 1763 default: 1764 return "F"; 1765 } 1766 } 1767 1768 /** 1769 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface 1770 * 1771 * It allows user to read the bad pages of vram on the gpu through 1772 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages 1773 * 1774 * It outputs multiple lines, and each line stands for one gpu page. 1775 * 1776 * The format of one line is below, 1777 * gpu pfn : gpu page size : flags 1778 * 1779 * gpu pfn and gpu page size are printed in hex format. 1780 * flags can be one of below character, 1781 * 1782 * R: reserved, this gpu page is reserved and not able to use. 1783 * 1784 * P: pending for reserve, this gpu page is marked as bad, will be reserved 1785 * in next window of page_reserve. 1786 * 1787 * F: unable to reserve. this gpu page can't be reserved due to some reasons. 1788 * 1789 * Examples: 1790 * 1791 * .. code-block:: bash 1792 * 1793 * 0x00000001 : 0x00001000 : R 1794 * 0x00000002 : 0x00001000 : P 1795 * 1796 */ 1797 1798 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, 1799 struct kobject *kobj, const struct bin_attribute *attr, 1800 char *buf, loff_t ppos, size_t count) 1801 { 1802 struct amdgpu_ras *con = 1803 container_of(attr, struct amdgpu_ras, badpages_attr); 1804 struct amdgpu_device *adev = con->adev; 1805 const unsigned int element_size = 1806 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1; 1807 unsigned int start = div64_ul(ppos + element_size - 1, element_size); 1808 unsigned int end = div64_ul(ppos + count - 1, element_size); 1809 ssize_t s = 0; 1810 struct ras_badpage *bps = NULL; 1811 unsigned int bps_count = 0; 1812 1813 memset(buf, 0, count); 1814 1815 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) 1816 return 0; 1817 1818 for (; start < end && start < bps_count; start++) 1819 s += scnprintf(&buf[s], element_size + 1, 1820 "0x%08x : 0x%08x : %1s\n", 1821 bps[start].bp, 1822 bps[start].size, 1823 amdgpu_ras_badpage_flags_str(bps[start].flags)); 1824 1825 kfree(bps); 1826 1827 return s; 1828 } 1829 1830 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, 1831 struct device_attribute *attr, char *buf) 1832 { 1833 struct amdgpu_ras *con = 1834 container_of(attr, struct amdgpu_ras, features_attr); 1835 1836 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); 1837 } 1838 1839 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev, 1840 struct device_attribute *attr, char *buf) 1841 { 1842 struct amdgpu_ras *con = 1843 container_of(attr, struct amdgpu_ras, version_attr); 1844 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); 1845 } 1846 1847 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev, 1848 struct device_attribute *attr, char *buf) 1849 { 1850 struct amdgpu_ras *con = 1851 container_of(attr, struct amdgpu_ras, schema_attr); 1852 return sysfs_emit(buf, "schema: 0x%x\n", con->schema); 1853 } 1854 1855 static struct { 1856 enum ras_event_type type; 1857 const char *name; 1858 } dump_event[] = { 1859 {RAS_EVENT_TYPE_FATAL, "Fatal Error"}, 1860 {RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"}, 1861 {RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"}, 1862 }; 1863 1864 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev, 1865 struct device_attribute *attr, char *buf) 1866 { 1867 struct amdgpu_ras *con = 1868 container_of(attr, struct amdgpu_ras, event_state_attr); 1869 struct ras_event_manager *event_mgr = con->event_mgr; 1870 struct ras_event_state *event_state; 1871 int i, size = 0; 1872 1873 if (!event_mgr) 1874 return -EINVAL; 1875 1876 size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno)); 1877 for (i = 0; i < ARRAY_SIZE(dump_event); i++) { 1878 event_state = &event_mgr->event_state[dump_event[i].type]; 1879 size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n", 1880 dump_event[i].name, 1881 atomic64_read(&event_state->count), 1882 event_state->last_seqno); 1883 } 1884 1885 return (ssize_t)size; 1886 } 1887 1888 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) 1889 { 1890 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1891 1892 if (adev->dev->kobj.sd) 1893 sysfs_remove_file_from_group(&adev->dev->kobj, 1894 &con->badpages_attr.attr, 1895 RAS_FS_NAME); 1896 } 1897 1898 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev) 1899 { 1900 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1901 struct attribute *attrs[] = { 1902 &con->features_attr.attr, 1903 &con->version_attr.attr, 1904 &con->schema_attr.attr, 1905 &con->event_state_attr.attr, 1906 NULL 1907 }; 1908 struct attribute_group group = { 1909 .name = RAS_FS_NAME, 1910 .attrs = attrs, 1911 }; 1912 1913 if (adev->dev->kobj.sd) 1914 sysfs_remove_group(&adev->dev->kobj, &group); 1915 1916 return 0; 1917 } 1918 1919 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, 1920 struct ras_common_if *head) 1921 { 1922 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1923 1924 if (amdgpu_aca_is_enabled(adev)) 1925 return 0; 1926 1927 if (!obj || obj->attr_inuse) 1928 return -EINVAL; 1929 1930 if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block)) 1931 return 0; 1932 1933 get_obj(obj); 1934 1935 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name), 1936 "%s_err_count", head->name); 1937 1938 obj->sysfs_attr = (struct device_attribute){ 1939 .attr = { 1940 .name = obj->fs_data.sysfs_name, 1941 .mode = S_IRUGO, 1942 }, 1943 .show = amdgpu_ras_sysfs_read, 1944 }; 1945 sysfs_attr_init(&obj->sysfs_attr.attr); 1946 1947 if (sysfs_add_file_to_group(&adev->dev->kobj, 1948 &obj->sysfs_attr.attr, 1949 RAS_FS_NAME)) { 1950 put_obj(obj); 1951 return -EINVAL; 1952 } 1953 1954 obj->attr_inuse = 1; 1955 1956 return 0; 1957 } 1958 1959 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, 1960 struct ras_common_if *head) 1961 { 1962 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1963 1964 if (amdgpu_aca_is_enabled(adev)) 1965 return 0; 1966 1967 if (!obj || !obj->attr_inuse) 1968 return -EINVAL; 1969 1970 if (adev->dev->kobj.sd) 1971 sysfs_remove_file_from_group(&adev->dev->kobj, 1972 &obj->sysfs_attr.attr, 1973 RAS_FS_NAME); 1974 obj->attr_inuse = 0; 1975 put_obj(obj); 1976 1977 return 0; 1978 } 1979 1980 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) 1981 { 1982 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1983 struct ras_manager *obj, *tmp; 1984 1985 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1986 amdgpu_ras_sysfs_remove(adev, &obj->head); 1987 } 1988 1989 if (amdgpu_bad_page_threshold != 0) 1990 amdgpu_ras_sysfs_remove_bad_page_node(adev); 1991 1992 amdgpu_ras_sysfs_remove_dev_attr_node(adev); 1993 1994 return 0; 1995 } 1996 /* sysfs end */ 1997 1998 /** 1999 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors 2000 * 2001 * Normally when there is an uncorrectable error, the driver will reset 2002 * the GPU to recover. However, in the event of an unrecoverable error, 2003 * the driver provides an interface to reboot the system automatically 2004 * in that event. 2005 * 2006 * The following file in debugfs provides that interface: 2007 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot 2008 * 2009 * Usage: 2010 * 2011 * .. code-block:: bash 2012 * 2013 * echo true > .../ras/auto_reboot 2014 * 2015 */ 2016 /* debugfs begin */ 2017 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) 2018 { 2019 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2020 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; 2021 struct drm_minor *minor = adev_to_drm(adev)->primary; 2022 struct dentry *dir; 2023 2024 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); 2025 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev, 2026 &amdgpu_ras_debugfs_ctrl_ops); 2027 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, 2028 &amdgpu_ras_debugfs_eeprom_ops); 2029 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, 2030 &con->bad_page_cnt_threshold); 2031 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs); 2032 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled); 2033 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled); 2034 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev, 2035 &amdgpu_ras_debugfs_eeprom_size_ops); 2036 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", 2037 S_IRUGO, dir, adev, 2038 &amdgpu_ras_debugfs_eeprom_table_ops); 2039 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); 2040 2041 /* 2042 * After one uncorrectable error happens, usually GPU recovery will 2043 * be scheduled. But due to the known problem in GPU recovery failing 2044 * to bring GPU back, below interface provides one direct way to 2045 * user to reboot system automatically in such case within 2046 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine 2047 * will never be called. 2048 */ 2049 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); 2050 2051 /* 2052 * User could set this not to clean up hardware's error count register 2053 * of RAS IPs during ras recovery. 2054 */ 2055 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir, 2056 &con->disable_ras_err_cnt_harvest); 2057 return dir; 2058 } 2059 2060 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, 2061 struct ras_fs_if *head, 2062 struct dentry *dir) 2063 { 2064 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); 2065 2066 if (!obj || !dir) 2067 return; 2068 2069 get_obj(obj); 2070 2071 memcpy(obj->fs_data.debugfs_name, 2072 head->debugfs_name, 2073 sizeof(obj->fs_data.debugfs_name)); 2074 2075 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir, 2076 obj, &amdgpu_ras_debugfs_ops); 2077 } 2078 2079 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev) 2080 { 2081 bool ret; 2082 2083 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2084 case IP_VERSION(13, 0, 6): 2085 case IP_VERSION(13, 0, 12): 2086 case IP_VERSION(13, 0, 14): 2087 ret = true; 2088 break; 2089 default: 2090 ret = false; 2091 break; 2092 } 2093 2094 return ret; 2095 } 2096 2097 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) 2098 { 2099 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2100 struct dentry *dir; 2101 struct ras_manager *obj; 2102 struct ras_fs_if fs_info; 2103 2104 /* 2105 * it won't be called in resume path, no need to check 2106 * suspend and gpu reset status 2107 */ 2108 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) 2109 return; 2110 2111 dir = amdgpu_ras_debugfs_create_ctrl_node(adev); 2112 2113 list_for_each_entry(obj, &con->head, node) { 2114 if (amdgpu_ras_is_supported(adev, obj->head.block) && 2115 (obj->attr_inuse == 1)) { 2116 sprintf(fs_info.debugfs_name, "%s_err_inject", 2117 get_ras_block_str(&obj->head)); 2118 fs_info.head = obj->head; 2119 amdgpu_ras_debugfs_create(adev, &fs_info, dir); 2120 } 2121 } 2122 2123 if (amdgpu_ras_aca_is_supported(adev)) { 2124 if (amdgpu_aca_is_enabled(adev)) 2125 amdgpu_aca_smu_debugfs_init(adev, dir); 2126 else 2127 amdgpu_mca_smu_debugfs_init(adev, dir); 2128 } 2129 } 2130 2131 /* debugfs end */ 2132 2133 /* ras fs */ 2134 static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, 2135 amdgpu_ras_sysfs_badpages_read, NULL, 0); 2136 static DEVICE_ATTR(features, S_IRUGO, 2137 amdgpu_ras_sysfs_features_read, NULL); 2138 static DEVICE_ATTR(version, 0444, 2139 amdgpu_ras_sysfs_version_show, NULL); 2140 static DEVICE_ATTR(schema, 0444, 2141 amdgpu_ras_sysfs_schema_show, NULL); 2142 static DEVICE_ATTR(event_state, 0444, 2143 amdgpu_ras_sysfs_event_state_show, NULL); 2144 static int amdgpu_ras_fs_init(struct amdgpu_device *adev) 2145 { 2146 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2147 struct attribute_group group = { 2148 .name = RAS_FS_NAME, 2149 }; 2150 struct attribute *attrs[] = { 2151 &con->features_attr.attr, 2152 &con->version_attr.attr, 2153 &con->schema_attr.attr, 2154 &con->event_state_attr.attr, 2155 NULL 2156 }; 2157 const struct bin_attribute *bin_attrs[] = { 2158 NULL, 2159 NULL, 2160 }; 2161 int r; 2162 2163 group.attrs = attrs; 2164 2165 /* add features entry */ 2166 con->features_attr = dev_attr_features; 2167 sysfs_attr_init(attrs[0]); 2168 2169 /* add version entry */ 2170 con->version_attr = dev_attr_version; 2171 sysfs_attr_init(attrs[1]); 2172 2173 /* add schema entry */ 2174 con->schema_attr = dev_attr_schema; 2175 sysfs_attr_init(attrs[2]); 2176 2177 /* add event_state entry */ 2178 con->event_state_attr = dev_attr_event_state; 2179 sysfs_attr_init(attrs[3]); 2180 2181 if (amdgpu_bad_page_threshold != 0) { 2182 /* add bad_page_features entry */ 2183 con->badpages_attr = bin_attr_gpu_vram_bad_pages; 2184 sysfs_bin_attr_init(&con->badpages_attr); 2185 bin_attrs[0] = &con->badpages_attr; 2186 group.bin_attrs_new = bin_attrs; 2187 } 2188 2189 r = sysfs_create_group(&adev->dev->kobj, &group); 2190 if (r) 2191 dev_err(adev->dev, "Failed to create RAS sysfs group!"); 2192 2193 return 0; 2194 } 2195 2196 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) 2197 { 2198 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2199 struct ras_manager *con_obj, *ip_obj, *tmp; 2200 2201 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 2202 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { 2203 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); 2204 if (ip_obj) 2205 put_obj(ip_obj); 2206 } 2207 } 2208 2209 amdgpu_ras_sysfs_remove_all(adev); 2210 return 0; 2211 } 2212 /* ras fs end */ 2213 2214 /* ih begin */ 2215 2216 /* For the hardware that cannot enable bif ring for both ras_controller_irq 2217 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status 2218 * register to check whether the interrupt is triggered or not, and properly 2219 * ack the interrupt if it is there 2220 */ 2221 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) 2222 { 2223 /* Fatal error events are handled on host side */ 2224 if (amdgpu_sriov_vf(adev)) 2225 return; 2226 /* 2227 * If the current interrupt is caused by a non-fatal RAS error, skip 2228 * check for fatal error. For fatal errors, FED status of all devices 2229 * in XGMI hive gets set when the first device gets fatal error 2230 * interrupt. The error gets propagated to other devices as well, so 2231 * make sure to ack the interrupt regardless of FED status. 2232 */ 2233 if (!amdgpu_ras_get_fed_status(adev) && 2234 amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY)) 2235 return; 2236 2237 if (adev->nbio.ras && 2238 adev->nbio.ras->handle_ras_controller_intr_no_bifring) 2239 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); 2240 2241 if (adev->nbio.ras && 2242 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring) 2243 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev); 2244 } 2245 2246 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj, 2247 struct amdgpu_iv_entry *entry) 2248 { 2249 bool poison_stat = false; 2250 struct amdgpu_device *adev = obj->adev; 2251 struct amdgpu_ras_block_object *block_obj = 2252 amdgpu_ras_get_ras_block(adev, obj->head.block, 0); 2253 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2254 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION; 2255 u64 event_id; 2256 int ret; 2257 2258 if (!block_obj || !con) 2259 return; 2260 2261 ret = amdgpu_ras_mark_ras_event(adev, type); 2262 if (ret) 2263 return; 2264 2265 amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block); 2266 /* both query_poison_status and handle_poison_consumption are optional, 2267 * but at least one of them should be implemented if we need poison 2268 * consumption handler 2269 */ 2270 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) { 2271 poison_stat = block_obj->hw_ops->query_poison_status(adev); 2272 if (!poison_stat) { 2273 /* Not poison consumption interrupt, no need to handle it */ 2274 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n", 2275 block_obj->ras_comm.name); 2276 2277 return; 2278 } 2279 } 2280 2281 amdgpu_umc_poison_handler(adev, obj->head.block, 0); 2282 2283 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption) 2284 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); 2285 2286 /* gpu reset is fallback for failed and default cases. 2287 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset. 2288 */ 2289 if (poison_stat && !amdgpu_ras_is_rma(adev)) { 2290 event_id = amdgpu_ras_acquire_event_id(adev, type); 2291 RAS_EVENT_LOG(adev, event_id, 2292 "GPU reset for %s RAS poison consumption is issued!\n", 2293 block_obj->ras_comm.name); 2294 amdgpu_ras_reset_gpu(adev); 2295 } 2296 2297 if (!poison_stat) 2298 amdgpu_gfx_poison_consumption_handler(adev, entry); 2299 } 2300 2301 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj, 2302 struct amdgpu_iv_entry *entry) 2303 { 2304 struct amdgpu_device *adev = obj->adev; 2305 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION; 2306 u64 event_id; 2307 int ret; 2308 2309 ret = amdgpu_ras_mark_ras_event(adev, type); 2310 if (ret) 2311 return; 2312 2313 event_id = amdgpu_ras_acquire_event_id(adev, type); 2314 RAS_EVENT_LOG(adev, event_id, "Poison is created\n"); 2315 2316 if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) { 2317 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); 2318 2319 atomic_inc(&con->page_retirement_req_cnt); 2320 atomic_inc(&con->poison_creation_count); 2321 2322 wake_up(&con->page_retirement_wq); 2323 } 2324 } 2325 2326 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, 2327 struct amdgpu_iv_entry *entry) 2328 { 2329 struct ras_ih_data *data = &obj->ih_data; 2330 struct ras_err_data err_data; 2331 int ret; 2332 2333 if (!data->cb) 2334 return; 2335 2336 ret = amdgpu_ras_error_data_init(&err_data); 2337 if (ret) 2338 return; 2339 2340 /* Let IP handle its data, maybe we need get the output 2341 * from the callback to update the error type/count, etc 2342 */ 2343 amdgpu_ras_set_fed(obj->adev, true); 2344 ret = data->cb(obj->adev, &err_data, entry); 2345 /* ue will trigger an interrupt, and in that case 2346 * we need do a reset to recovery the whole system. 2347 * But leave IP do that recovery, here we just dispatch 2348 * the error. 2349 */ 2350 if (ret == AMDGPU_RAS_SUCCESS) { 2351 /* these counts could be left as 0 if 2352 * some blocks do not count error number 2353 */ 2354 obj->err_data.ue_count += err_data.ue_count; 2355 obj->err_data.ce_count += err_data.ce_count; 2356 obj->err_data.de_count += err_data.de_count; 2357 } 2358 2359 amdgpu_ras_error_data_fini(&err_data); 2360 } 2361 2362 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) 2363 { 2364 struct ras_ih_data *data = &obj->ih_data; 2365 struct amdgpu_iv_entry entry; 2366 2367 while (data->rptr != data->wptr) { 2368 rmb(); 2369 memcpy(&entry, &data->ring[data->rptr], 2370 data->element_size); 2371 2372 wmb(); 2373 data->rptr = (data->aligned_element_size + 2374 data->rptr) % data->ring_size; 2375 2376 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) { 2377 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 2378 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry); 2379 else 2380 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry); 2381 } else { 2382 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 2383 amdgpu_ras_interrupt_umc_handler(obj, &entry); 2384 else 2385 dev_warn(obj->adev->dev, 2386 "No RAS interrupt handler for non-UMC block with poison disabled.\n"); 2387 } 2388 } 2389 } 2390 2391 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) 2392 { 2393 struct ras_ih_data *data = 2394 container_of(work, struct ras_ih_data, ih_work); 2395 struct ras_manager *obj = 2396 container_of(data, struct ras_manager, ih_data); 2397 2398 amdgpu_ras_interrupt_handler(obj); 2399 } 2400 2401 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, 2402 struct ras_dispatch_if *info) 2403 { 2404 struct ras_manager *obj; 2405 struct ras_ih_data *data; 2406 2407 obj = amdgpu_ras_find_obj(adev, &info->head); 2408 if (!obj) 2409 return -EINVAL; 2410 2411 data = &obj->ih_data; 2412 2413 if (data->inuse == 0) 2414 return 0; 2415 2416 /* Might be overflow... */ 2417 memcpy(&data->ring[data->wptr], info->entry, 2418 data->element_size); 2419 2420 wmb(); 2421 data->wptr = (data->aligned_element_size + 2422 data->wptr) % data->ring_size; 2423 2424 schedule_work(&data->ih_work); 2425 2426 return 0; 2427 } 2428 2429 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, 2430 struct ras_common_if *head) 2431 { 2432 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 2433 struct ras_ih_data *data; 2434 2435 if (!obj) 2436 return -EINVAL; 2437 2438 data = &obj->ih_data; 2439 if (data->inuse == 0) 2440 return 0; 2441 2442 cancel_work_sync(&data->ih_work); 2443 2444 kfree(data->ring); 2445 memset(data, 0, sizeof(*data)); 2446 put_obj(obj); 2447 2448 return 0; 2449 } 2450 2451 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, 2452 struct ras_common_if *head) 2453 { 2454 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 2455 struct ras_ih_data *data; 2456 struct amdgpu_ras_block_object *ras_obj; 2457 2458 if (!obj) { 2459 /* in case we registe the IH before enable ras feature */ 2460 obj = amdgpu_ras_create_obj(adev, head); 2461 if (!obj) 2462 return -EINVAL; 2463 } else 2464 get_obj(obj); 2465 2466 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm); 2467 2468 data = &obj->ih_data; 2469 /* add the callback.etc */ 2470 *data = (struct ras_ih_data) { 2471 .inuse = 0, 2472 .cb = ras_obj->ras_cb, 2473 .element_size = sizeof(struct amdgpu_iv_entry), 2474 .rptr = 0, 2475 .wptr = 0, 2476 }; 2477 2478 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); 2479 2480 data->aligned_element_size = ALIGN(data->element_size, 8); 2481 /* the ring can store 64 iv entries. */ 2482 data->ring_size = 64 * data->aligned_element_size; 2483 data->ring = kmalloc(data->ring_size, GFP_KERNEL); 2484 if (!data->ring) { 2485 put_obj(obj); 2486 return -ENOMEM; 2487 } 2488 2489 /* IH is ready */ 2490 data->inuse = 1; 2491 2492 return 0; 2493 } 2494 2495 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) 2496 { 2497 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2498 struct ras_manager *obj, *tmp; 2499 2500 list_for_each_entry_safe(obj, tmp, &con->head, node) { 2501 amdgpu_ras_interrupt_remove_handler(adev, &obj->head); 2502 } 2503 2504 return 0; 2505 } 2506 /* ih end */ 2507 2508 /* traversal all IPs except NBIO to query error counter */ 2509 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type) 2510 { 2511 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2512 struct ras_manager *obj; 2513 2514 if (!adev->ras_enabled || !con) 2515 return; 2516 2517 list_for_each_entry(obj, &con->head, node) { 2518 struct ras_query_if info = { 2519 .head = obj->head, 2520 }; 2521 2522 /* 2523 * PCIE_BIF IP has one different isr by ras controller 2524 * interrupt, the specific ras counter query will be 2525 * done in that isr. So skip such block from common 2526 * sync flood interrupt isr calling. 2527 */ 2528 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) 2529 continue; 2530 2531 /* 2532 * this is a workaround for aldebaran, skip send msg to 2533 * smu to get ecc_info table due to smu handle get ecc 2534 * info table failed temporarily. 2535 * should be removed until smu fix handle ecc_info table. 2536 */ 2537 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && 2538 (amdgpu_ip_version(adev, MP1_HWIP, 0) == 2539 IP_VERSION(13, 0, 2))) 2540 continue; 2541 2542 amdgpu_ras_query_error_status_with_event(adev, &info, type); 2543 2544 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != 2545 IP_VERSION(11, 0, 2) && 2546 amdgpu_ip_version(adev, MP0_HWIP, 0) != 2547 IP_VERSION(11, 0, 4) && 2548 amdgpu_ip_version(adev, MP0_HWIP, 0) != 2549 IP_VERSION(13, 0, 0)) { 2550 if (amdgpu_ras_reset_error_status(adev, info.head.block)) 2551 dev_warn(adev->dev, "Failed to reset error counter and error status"); 2552 } 2553 } 2554 } 2555 2556 /* Parse RdRspStatus and WrRspStatus */ 2557 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, 2558 struct ras_query_if *info) 2559 { 2560 struct amdgpu_ras_block_object *block_obj; 2561 /* 2562 * Only two block need to query read/write 2563 * RspStatus at current state 2564 */ 2565 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) && 2566 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB)) 2567 return; 2568 2569 block_obj = amdgpu_ras_get_ras_block(adev, 2570 info->head.block, 2571 info->head.sub_block_index); 2572 2573 if (!block_obj || !block_obj->hw_ops) { 2574 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 2575 get_ras_block_str(&info->head)); 2576 return; 2577 } 2578 2579 if (block_obj->hw_ops->query_ras_error_status) 2580 block_obj->hw_ops->query_ras_error_status(adev); 2581 2582 } 2583 2584 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) 2585 { 2586 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2587 struct ras_manager *obj; 2588 2589 if (!adev->ras_enabled || !con) 2590 return; 2591 2592 list_for_each_entry(obj, &con->head, node) { 2593 struct ras_query_if info = { 2594 .head = obj->head, 2595 }; 2596 2597 amdgpu_ras_error_status_query(adev, &info); 2598 } 2599 } 2600 2601 /* recovery begin */ 2602 2603 /* return 0 on success. 2604 * caller need free bps. 2605 */ 2606 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 2607 struct ras_badpage **bps, unsigned int *count) 2608 { 2609 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2610 struct ras_err_handler_data *data; 2611 int i = 0; 2612 int ret = 0, status; 2613 2614 if (!con || !con->eh_data || !bps || !count) 2615 return -EINVAL; 2616 2617 mutex_lock(&con->recovery_lock); 2618 data = con->eh_data; 2619 if (!data || data->count == 0) { 2620 *bps = NULL; 2621 ret = -EINVAL; 2622 goto out; 2623 } 2624 2625 *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL); 2626 if (!*bps) { 2627 ret = -ENOMEM; 2628 goto out; 2629 } 2630 2631 for (; i < data->count; i++) { 2632 if (!data->bps[i].ts) 2633 continue; 2634 2635 (*bps)[i] = (struct ras_badpage){ 2636 .bp = data->bps[i].retired_page, 2637 .size = AMDGPU_GPU_PAGE_SIZE, 2638 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, 2639 }; 2640 2641 if (amdgpu_ras_check_critical_address(adev, 2642 data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) 2643 continue; 2644 2645 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, 2646 data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT); 2647 if (status == -EBUSY) 2648 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; 2649 else if (status == -ENOENT) 2650 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; 2651 } 2652 2653 *count = con->bad_page_num; 2654 out: 2655 mutex_unlock(&con->recovery_lock); 2656 return ret; 2657 } 2658 2659 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev, 2660 struct amdgpu_hive_info *hive, bool status) 2661 { 2662 struct amdgpu_device *tmp_adev; 2663 2664 if (hive) { 2665 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 2666 amdgpu_ras_set_fed(tmp_adev, status); 2667 } else { 2668 amdgpu_ras_set_fed(adev, status); 2669 } 2670 } 2671 2672 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev) 2673 { 2674 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2675 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 2676 int hive_ras_recovery = 0; 2677 2678 if (hive) { 2679 hive_ras_recovery = atomic_read(&hive->ras_recovery); 2680 amdgpu_put_xgmi_hive(hive); 2681 } 2682 2683 if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) 2684 return true; 2685 2686 return false; 2687 } 2688 2689 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev) 2690 { 2691 if (amdgpu_ras_intr_triggered()) 2692 return RAS_EVENT_TYPE_FATAL; 2693 else 2694 return RAS_EVENT_TYPE_POISON_CONSUMPTION; 2695 } 2696 2697 static void amdgpu_ras_do_recovery(struct work_struct *work) 2698 { 2699 struct amdgpu_ras *ras = 2700 container_of(work, struct amdgpu_ras, recovery_work); 2701 struct amdgpu_device *remote_adev = NULL; 2702 struct amdgpu_device *adev = ras->adev; 2703 struct list_head device_list, *device_list_handle = NULL; 2704 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2705 enum ras_event_type type; 2706 2707 if (hive) { 2708 atomic_set(&hive->ras_recovery, 1); 2709 2710 /* If any device which is part of the hive received RAS fatal 2711 * error interrupt, set fatal error status on all. This 2712 * condition will need a recovery, and flag will be cleared 2713 * as part of recovery. 2714 */ 2715 list_for_each_entry(remote_adev, &hive->device_list, 2716 gmc.xgmi.head) 2717 if (amdgpu_ras_get_fed_status(remote_adev)) { 2718 amdgpu_ras_set_fed_all(adev, hive, true); 2719 break; 2720 } 2721 } 2722 if (!ras->disable_ras_err_cnt_harvest) { 2723 2724 /* Build list of devices to query RAS related errors */ 2725 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { 2726 device_list_handle = &hive->device_list; 2727 } else { 2728 INIT_LIST_HEAD(&device_list); 2729 list_add_tail(&adev->gmc.xgmi.head, &device_list); 2730 device_list_handle = &device_list; 2731 } 2732 2733 type = amdgpu_ras_get_fatal_error_event(adev); 2734 list_for_each_entry(remote_adev, 2735 device_list_handle, gmc.xgmi.head) { 2736 amdgpu_ras_query_err_status(remote_adev); 2737 amdgpu_ras_log_on_err_counter(remote_adev, type); 2738 } 2739 2740 } 2741 2742 if (amdgpu_device_should_recover_gpu(ras->adev)) { 2743 struct amdgpu_reset_context reset_context; 2744 memset(&reset_context, 0, sizeof(reset_context)); 2745 2746 reset_context.method = AMD_RESET_METHOD_NONE; 2747 reset_context.reset_req_dev = adev; 2748 reset_context.src = AMDGPU_RESET_SRC_RAS; 2749 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 2750 2751 /* Perform full reset in fatal error mode */ 2752 if (!amdgpu_ras_is_poison_mode_supported(ras->adev)) 2753 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2754 else { 2755 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2756 2757 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) { 2758 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET; 2759 reset_context.method = AMD_RESET_METHOD_MODE2; 2760 } 2761 2762 /* Fatal error occurs in poison mode, mode1 reset is used to 2763 * recover gpu. 2764 */ 2765 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) { 2766 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET; 2767 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2768 2769 psp_fatal_error_recovery_quirk(&adev->psp); 2770 } 2771 } 2772 2773 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); 2774 } 2775 atomic_set(&ras->in_recovery, 0); 2776 if (hive) { 2777 atomic_set(&hive->ras_recovery, 0); 2778 amdgpu_put_xgmi_hive(hive); 2779 } 2780 } 2781 2782 /* alloc/realloc bps array */ 2783 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, 2784 struct ras_err_handler_data *data, int pages) 2785 { 2786 unsigned int old_space = data->count + data->space_left; 2787 unsigned int new_space = old_space + pages; 2788 unsigned int align_space = ALIGN(new_space, 512); 2789 void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL); 2790 2791 if (!bps) { 2792 return -ENOMEM; 2793 } 2794 2795 if (data->bps) { 2796 memcpy(bps, data->bps, 2797 data->count * sizeof(*data->bps)); 2798 kfree(data->bps); 2799 } 2800 2801 data->bps = bps; 2802 data->space_left += align_space - old_space; 2803 return 0; 2804 } 2805 2806 static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev, 2807 struct eeprom_table_record *bps, 2808 struct ras_err_data *err_data) 2809 { 2810 struct ta_ras_query_address_input addr_in; 2811 uint32_t socket = 0; 2812 int ret = 0; 2813 2814 if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) 2815 socket = adev->smuio.funcs->get_socket_id(adev); 2816 2817 /* reinit err_data */ 2818 err_data->err_addr_cnt = 0; 2819 err_data->err_addr_len = adev->umc.retire_unit; 2820 2821 memset(&addr_in, 0, sizeof(addr_in)); 2822 addr_in.ma.err_addr = bps->address; 2823 addr_in.ma.socket_id = socket; 2824 addr_in.ma.ch_inst = bps->mem_channel; 2825 /* tell RAS TA the node instance is not used */ 2826 addr_in.ma.node_inst = TA_RAS_INV_NODE; 2827 2828 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) 2829 ret = adev->umc.ras->convert_ras_err_addr(adev, err_data, 2830 &addr_in, NULL, false); 2831 2832 return ret; 2833 } 2834 2835 static int amdgpu_ras_mca2pa(struct amdgpu_device *adev, 2836 struct eeprom_table_record *bps, 2837 struct ras_err_data *err_data) 2838 { 2839 struct ta_ras_query_address_input addr_in; 2840 uint32_t die_id, socket = 0; 2841 2842 if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) 2843 socket = adev->smuio.funcs->get_socket_id(adev); 2844 2845 /* although die id is gotten from PA in nps1 mode, the id is 2846 * fitable for any nps mode 2847 */ 2848 if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa) 2849 die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address, 2850 bps->retired_page << AMDGPU_GPU_PAGE_SHIFT); 2851 else 2852 return -EINVAL; 2853 2854 /* reinit err_data */ 2855 err_data->err_addr_cnt = 0; 2856 err_data->err_addr_len = adev->umc.retire_unit; 2857 2858 memset(&addr_in, 0, sizeof(addr_in)); 2859 addr_in.ma.err_addr = bps->address; 2860 addr_in.ma.ch_inst = bps->mem_channel; 2861 addr_in.ma.umc_inst = bps->mcumc_id; 2862 addr_in.ma.node_inst = die_id; 2863 addr_in.ma.socket_id = socket; 2864 2865 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) 2866 return adev->umc.ras->convert_ras_err_addr(adev, err_data, 2867 &addr_in, NULL, false); 2868 else 2869 return -EINVAL; 2870 } 2871 2872 static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev, 2873 struct eeprom_table_record *bps, int count) 2874 { 2875 int j; 2876 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2877 struct ras_err_handler_data *data = con->eh_data; 2878 2879 for (j = 0; j < count; j++) { 2880 if (amdgpu_ras_check_bad_page_unlock(con, 2881 bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) { 2882 data->count++; 2883 data->space_left--; 2884 continue; 2885 } 2886 2887 if (!data->space_left && 2888 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { 2889 return -ENOMEM; 2890 } 2891 2892 amdgpu_ras_reserve_page(adev, bps[j].retired_page); 2893 2894 memcpy(&data->bps[data->count], &(bps[j]), 2895 sizeof(struct eeprom_table_record)); 2896 data->count++; 2897 data->space_left--; 2898 con->bad_page_num++; 2899 } 2900 2901 return 0; 2902 } 2903 2904 static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev, 2905 struct eeprom_table_record *bps, struct ras_err_data *err_data, 2906 enum amdgpu_memory_partition nps) 2907 { 2908 int i = 0; 2909 enum amdgpu_memory_partition save_nps; 2910 2911 save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK; 2912 2913 /*old asics just have pa in eeprom*/ 2914 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) { 2915 memcpy(err_data->err_addr, bps, 2916 sizeof(struct eeprom_table_record) * adev->umc.retire_unit); 2917 goto out; 2918 } 2919 2920 for (i = 0; i < adev->umc.retire_unit; i++) 2921 bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT); 2922 2923 if (save_nps) { 2924 if (save_nps == nps) { 2925 if (amdgpu_umc_pages_in_a_row(adev, err_data, 2926 bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT)) 2927 return -EINVAL; 2928 for (i = 0; i < adev->umc.retire_unit; i++) { 2929 err_data->err_addr[i].address = bps[0].address; 2930 err_data->err_addr[i].mem_channel = bps[0].mem_channel; 2931 err_data->err_addr[i].bank = bps[0].bank; 2932 err_data->err_addr[i].err_type = bps[0].err_type; 2933 err_data->err_addr[i].mcumc_id = bps[0].mcumc_id; 2934 } 2935 } else { 2936 if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data)) 2937 return -EINVAL; 2938 } 2939 } else { 2940 if (bps[0].address == 0) { 2941 /* for specific old eeprom data, mca address is not stored, 2942 * calc it from pa 2943 */ 2944 if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT, 2945 &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE)) 2946 return -EINVAL; 2947 } 2948 2949 if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) { 2950 if (nps == AMDGPU_NPS1_PARTITION_MODE) 2951 memcpy(err_data->err_addr, bps, 2952 sizeof(struct eeprom_table_record) * adev->umc.retire_unit); 2953 else 2954 return -EOPNOTSUPP; 2955 } 2956 } 2957 2958 out: 2959 return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit); 2960 } 2961 2962 static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev, 2963 struct eeprom_table_record *bps, struct ras_err_data *err_data, 2964 enum amdgpu_memory_partition nps) 2965 { 2966 int i = 0; 2967 enum amdgpu_memory_partition save_nps; 2968 2969 save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK; 2970 bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT); 2971 2972 if (save_nps == nps) { 2973 if (amdgpu_umc_pages_in_a_row(adev, err_data, 2974 bps->retired_page << AMDGPU_GPU_PAGE_SHIFT)) 2975 return -EINVAL; 2976 for (i = 0; i < adev->umc.retire_unit; i++) { 2977 err_data->err_addr[i].address = bps->address; 2978 err_data->err_addr[i].mem_channel = bps->mem_channel; 2979 err_data->err_addr[i].bank = bps->bank; 2980 err_data->err_addr[i].err_type = bps->err_type; 2981 err_data->err_addr[i].mcumc_id = bps->mcumc_id; 2982 } 2983 } else { 2984 if (bps->address) { 2985 if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data)) 2986 return -EINVAL; 2987 } else { 2988 /* for specific old eeprom data, mca address is not stored, 2989 * calc it from pa 2990 */ 2991 if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT, 2992 &(bps->address), AMDGPU_NPS1_PARTITION_MODE)) 2993 return -EINVAL; 2994 2995 if (amdgpu_ras_mca2pa(adev, bps, err_data)) 2996 return -EOPNOTSUPP; 2997 } 2998 } 2999 3000 return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, 3001 adev->umc.retire_unit); 3002 } 3003 3004 /* it deal with vram only. */ 3005 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, 3006 struct eeprom_table_record *bps, int pages, bool from_rom) 3007 { 3008 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3009 struct ras_err_data err_data; 3010 struct amdgpu_ras_eeprom_control *control = 3011 &adev->psp.ras_context.ras->eeprom_control; 3012 enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; 3013 int ret = 0; 3014 uint32_t i = 0; 3015 3016 if (!con || !con->eh_data || !bps || pages <= 0) 3017 return 0; 3018 3019 if (from_rom) { 3020 err_data.err_addr = 3021 kcalloc(adev->umc.retire_unit, 3022 sizeof(struct eeprom_table_record), GFP_KERNEL); 3023 if (!err_data.err_addr) { 3024 dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n"); 3025 return -ENOMEM; 3026 } 3027 3028 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 3029 nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 3030 } 3031 3032 mutex_lock(&con->recovery_lock); 3033 3034 if (from_rom) { 3035 /* there is no pa recs in V3, so skip pa recs processing */ 3036 if (control->tbl_hdr.version < RAS_TABLE_VER_V3) { 3037 for (i = 0; i < pages; i++) { 3038 if (control->ras_num_recs - i >= adev->umc.retire_unit) { 3039 if ((bps[i].address == bps[i + 1].address) && 3040 (bps[i].mem_channel == bps[i + 1].mem_channel)) { 3041 /* deal with retire_unit records a time */ 3042 ret = __amdgpu_ras_convert_rec_array_from_rom(adev, 3043 &bps[i], &err_data, nps); 3044 if (ret) 3045 con->bad_page_num -= adev->umc.retire_unit; 3046 i += (adev->umc.retire_unit - 1); 3047 } else { 3048 break; 3049 } 3050 } else { 3051 break; 3052 } 3053 } 3054 } 3055 for (; i < pages; i++) { 3056 ret = __amdgpu_ras_convert_rec_from_rom(adev, 3057 &bps[i], &err_data, nps); 3058 if (ret) 3059 con->bad_page_num -= adev->umc.retire_unit; 3060 } 3061 3062 con->eh_data->count_saved = con->eh_data->count; 3063 } else { 3064 ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages); 3065 } 3066 3067 if (from_rom) 3068 kfree(err_data.err_addr); 3069 mutex_unlock(&con->recovery_lock); 3070 3071 return ret; 3072 } 3073 3074 /* 3075 * write error record array to eeprom, the function should be 3076 * protected by recovery_lock 3077 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL 3078 */ 3079 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, 3080 unsigned long *new_cnt) 3081 { 3082 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3083 struct ras_err_handler_data *data; 3084 struct amdgpu_ras_eeprom_control *control; 3085 int save_count, unit_num, i; 3086 3087 if (!con || !con->eh_data) { 3088 if (new_cnt) 3089 *new_cnt = 0; 3090 3091 return 0; 3092 } 3093 3094 if (!con->eeprom_control.is_eeprom_valid) { 3095 dev_warn(adev->dev, 3096 "Failed to save EEPROM table data because of EEPROM data corruption!"); 3097 if (new_cnt) 3098 *new_cnt = 0; 3099 3100 return 0; 3101 } 3102 3103 mutex_lock(&con->recovery_lock); 3104 control = &con->eeprom_control; 3105 data = con->eh_data; 3106 unit_num = data->count / adev->umc.retire_unit - control->ras_num_recs; 3107 save_count = con->bad_page_num - control->ras_num_bad_pages; 3108 mutex_unlock(&con->recovery_lock); 3109 3110 if (new_cnt) 3111 *new_cnt = unit_num; 3112 3113 /* only new entries are saved */ 3114 if (unit_num > 0) { 3115 /*old asics only save pa to eeprom like before*/ 3116 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) { 3117 if (amdgpu_ras_eeprom_append(control, 3118 &data->bps[data->count_saved], unit_num)) { 3119 dev_err(adev->dev, "Failed to save EEPROM table data!"); 3120 return -EIO; 3121 } 3122 } else { 3123 for (i = 0; i < unit_num; i++) { 3124 if (amdgpu_ras_eeprom_append(control, 3125 &data->bps[data->count_saved + 3126 i * adev->umc.retire_unit], 1)) { 3127 dev_err(adev->dev, "Failed to save EEPROM table data!"); 3128 return -EIO; 3129 } 3130 } 3131 } 3132 3133 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); 3134 data->count_saved = data->count; 3135 } 3136 3137 return 0; 3138 } 3139 3140 /* 3141 * read error record array in eeprom and reserve enough space for 3142 * storing new bad pages 3143 */ 3144 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) 3145 { 3146 struct amdgpu_ras_eeprom_control *control = 3147 &adev->psp.ras_context.ras->eeprom_control; 3148 struct eeprom_table_record *bps; 3149 int ret, i = 0; 3150 3151 /* no bad page record, skip eeprom access */ 3152 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0) 3153 return 0; 3154 3155 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL); 3156 if (!bps) 3157 return -ENOMEM; 3158 3159 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs); 3160 if (ret) { 3161 dev_err(adev->dev, "Failed to load EEPROM table records!"); 3162 } else { 3163 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) { 3164 /*In V3, there is no pa recs, and some cases(when address==0) may be parsed 3165 as pa recs, so add verion check to avoid it. 3166 */ 3167 if (control->tbl_hdr.version < RAS_TABLE_VER_V3) { 3168 for (i = 0; i < control->ras_num_recs; i++) { 3169 if ((control->ras_num_recs - i) >= adev->umc.retire_unit) { 3170 if ((bps[i].address == bps[i + 1].address) && 3171 (bps[i].mem_channel == bps[i + 1].mem_channel)) { 3172 control->ras_num_pa_recs += adev->umc.retire_unit; 3173 i += (adev->umc.retire_unit - 1); 3174 } else { 3175 control->ras_num_mca_recs += 3176 (control->ras_num_recs - i); 3177 break; 3178 } 3179 } else { 3180 control->ras_num_mca_recs += (control->ras_num_recs - i); 3181 break; 3182 } 3183 } 3184 } else { 3185 control->ras_num_mca_recs = control->ras_num_recs; 3186 } 3187 } 3188 3189 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true); 3190 if (ret) 3191 goto out; 3192 3193 ret = amdgpu_ras_eeprom_check(control); 3194 if (ret) 3195 goto out; 3196 3197 /* HW not usable */ 3198 if (amdgpu_ras_is_rma(adev)) 3199 ret = -EHWPOISON; 3200 } 3201 3202 out: 3203 kfree(bps); 3204 return ret; 3205 } 3206 3207 static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 3208 uint64_t addr) 3209 { 3210 struct ras_err_handler_data *data = con->eh_data; 3211 struct amdgpu_device *adev = con->adev; 3212 int i; 3213 3214 if ((addr >= adev->gmc.mc_vram_size && 3215 adev->gmc.mc_vram_size) || 3216 (addr >= RAS_UMC_INJECT_ADDR_LIMIT)) 3217 return -EINVAL; 3218 3219 addr >>= AMDGPU_GPU_PAGE_SHIFT; 3220 for (i = 0; i < data->count; i++) 3221 if (addr == data->bps[i].retired_page) 3222 return 1; 3223 3224 return 0; 3225 } 3226 3227 /* 3228 * check if an address belongs to bad page 3229 * 3230 * Note: this check is only for umc block 3231 */ 3232 static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 3233 uint64_t addr) 3234 { 3235 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3236 int ret = 0; 3237 3238 if (!con || !con->eh_data) 3239 return ret; 3240 3241 mutex_lock(&con->recovery_lock); 3242 ret = amdgpu_ras_check_bad_page_unlock(con, addr); 3243 mutex_unlock(&con->recovery_lock); 3244 return ret; 3245 } 3246 3247 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, 3248 uint32_t max_count) 3249 { 3250 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3251 3252 /* 3253 * amdgpu_bad_page_threshold is used to config 3254 * the threshold for the number of bad pages. 3255 * -1: Threshold is set to default value 3256 * Driver will issue a warning message when threshold is reached 3257 * and continue runtime services. 3258 * 0: Disable bad page retirement 3259 * Driver will not retire bad pages 3260 * which is intended for debugging purpose. 3261 * -2: Threshold is determined by a formula 3262 * that assumes 1 bad page per 100M of local memory. 3263 * Driver will continue runtime services when threhold is reached. 3264 * 0 < threshold < max number of bad page records in EEPROM, 3265 * A user-defined threshold is set 3266 * Driver will halt runtime services when this custom threshold is reached. 3267 */ 3268 if (amdgpu_bad_page_threshold == -2) { 3269 u64 val = adev->gmc.mc_vram_size; 3270 3271 do_div(val, RAS_BAD_PAGE_COVER); 3272 con->bad_page_cnt_threshold = min(lower_32_bits(val), 3273 max_count); 3274 } else if (amdgpu_bad_page_threshold == -1) { 3275 con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4; 3276 } else { 3277 con->bad_page_cnt_threshold = min_t(int, max_count, 3278 amdgpu_bad_page_threshold); 3279 } 3280 } 3281 3282 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev, 3283 enum amdgpu_ras_block block, uint16_t pasid, 3284 pasid_notify pasid_fn, void *data, uint32_t reset) 3285 { 3286 int ret = 0; 3287 struct ras_poison_msg poison_msg; 3288 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3289 3290 memset(&poison_msg, 0, sizeof(poison_msg)); 3291 poison_msg.block = block; 3292 poison_msg.pasid = pasid; 3293 poison_msg.reset = reset; 3294 poison_msg.pasid_fn = pasid_fn; 3295 poison_msg.data = data; 3296 3297 ret = kfifo_put(&con->poison_fifo, poison_msg); 3298 if (!ret) { 3299 dev_err(adev->dev, "Poison message fifo is full!\n"); 3300 return -ENOSPC; 3301 } 3302 3303 return 0; 3304 } 3305 3306 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev, 3307 struct ras_poison_msg *poison_msg) 3308 { 3309 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3310 3311 return kfifo_get(&con->poison_fifo, poison_msg); 3312 } 3313 3314 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log) 3315 { 3316 mutex_init(&ecc_log->lock); 3317 3318 INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL); 3319 ecc_log->de_queried_count = 0; 3320 ecc_log->prev_de_queried_count = 0; 3321 } 3322 3323 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log) 3324 { 3325 struct radix_tree_iter iter; 3326 void __rcu **slot; 3327 struct ras_ecc_err *ecc_err; 3328 3329 mutex_lock(&ecc_log->lock); 3330 radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) { 3331 ecc_err = radix_tree_deref_slot(slot); 3332 kfree(ecc_err->err_pages.pfn); 3333 kfree(ecc_err); 3334 radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot); 3335 } 3336 mutex_unlock(&ecc_log->lock); 3337 3338 mutex_destroy(&ecc_log->lock); 3339 ecc_log->de_queried_count = 0; 3340 ecc_log->prev_de_queried_count = 0; 3341 } 3342 3343 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con, 3344 uint32_t delayed_ms) 3345 { 3346 int ret; 3347 3348 mutex_lock(&con->umc_ecc_log.lock); 3349 ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree, 3350 UMC_ECC_NEW_DETECTED_TAG); 3351 mutex_unlock(&con->umc_ecc_log.lock); 3352 3353 if (ret) 3354 schedule_delayed_work(&con->page_retirement_dwork, 3355 msecs_to_jiffies(delayed_ms)); 3356 3357 return ret ? true : false; 3358 } 3359 3360 static void amdgpu_ras_do_page_retirement(struct work_struct *work) 3361 { 3362 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, 3363 page_retirement_dwork.work); 3364 struct amdgpu_device *adev = con->adev; 3365 struct ras_err_data err_data; 3366 3367 /* If gpu reset is ongoing, delay retiring the bad pages */ 3368 if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) { 3369 amdgpu_ras_schedule_retirement_dwork(con, 3370 AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3); 3371 return; 3372 } 3373 3374 amdgpu_ras_error_data_init(&err_data); 3375 3376 amdgpu_umc_handle_bad_pages(adev, &err_data); 3377 3378 amdgpu_ras_error_data_fini(&err_data); 3379 3380 amdgpu_ras_schedule_retirement_dwork(con, 3381 AMDGPU_RAS_RETIRE_PAGE_INTERVAL); 3382 } 3383 3384 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, 3385 uint32_t poison_creation_count) 3386 { 3387 int ret = 0; 3388 struct ras_ecc_log_info *ecc_log; 3389 struct ras_query_if info; 3390 uint32_t timeout = 0; 3391 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3392 uint64_t de_queried_count; 3393 uint32_t new_detect_count, total_detect_count; 3394 uint32_t need_query_count = poison_creation_count; 3395 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION; 3396 3397 memset(&info, 0, sizeof(info)); 3398 info.head.block = AMDGPU_RAS_BLOCK__UMC; 3399 3400 ecc_log = &ras->umc_ecc_log; 3401 total_detect_count = 0; 3402 do { 3403 ret = amdgpu_ras_query_error_status_with_event(adev, &info, type); 3404 if (ret) 3405 return ret; 3406 3407 de_queried_count = ecc_log->de_queried_count; 3408 if (de_queried_count > ecc_log->prev_de_queried_count) { 3409 new_detect_count = de_queried_count - ecc_log->prev_de_queried_count; 3410 ecc_log->prev_de_queried_count = de_queried_count; 3411 timeout = 0; 3412 } else { 3413 new_detect_count = 0; 3414 } 3415 3416 if (new_detect_count) { 3417 total_detect_count += new_detect_count; 3418 } else { 3419 if (!timeout && need_query_count) 3420 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC; 3421 3422 if (timeout) { 3423 if (!--timeout) 3424 break; 3425 msleep(1); 3426 } 3427 } 3428 } while (total_detect_count < need_query_count); 3429 3430 if (total_detect_count) 3431 schedule_delayed_work(&ras->page_retirement_dwork, 0); 3432 3433 if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0) 3434 amdgpu_ras_reset_gpu(adev); 3435 3436 return 0; 3437 } 3438 3439 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev) 3440 { 3441 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3442 struct ras_poison_msg msg; 3443 int ret; 3444 3445 do { 3446 ret = kfifo_get(&con->poison_fifo, &msg); 3447 } while (ret); 3448 } 3449 3450 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev, 3451 uint32_t msg_count, uint32_t *gpu_reset) 3452 { 3453 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3454 uint32_t reset_flags = 0, reset = 0; 3455 struct ras_poison_msg msg; 3456 int ret, i; 3457 3458 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 3459 3460 for (i = 0; i < msg_count; i++) { 3461 ret = amdgpu_ras_get_poison_req(adev, &msg); 3462 if (!ret) 3463 continue; 3464 3465 if (msg.pasid_fn) 3466 msg.pasid_fn(adev, msg.pasid, msg.data); 3467 3468 reset_flags |= msg.reset; 3469 } 3470 3471 /* 3472 * Try to ensure poison creation handler is completed first 3473 * to set rma if bad page exceed threshold. 3474 */ 3475 flush_delayed_work(&con->page_retirement_dwork); 3476 3477 /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */ 3478 if (reset_flags && !amdgpu_ras_is_rma(adev)) { 3479 if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) 3480 reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; 3481 else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) 3482 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; 3483 else 3484 reset = reset_flags; 3485 3486 con->gpu_reset_flags |= reset; 3487 amdgpu_ras_reset_gpu(adev); 3488 3489 *gpu_reset = reset; 3490 3491 /* Wait for gpu recovery to complete */ 3492 flush_work(&con->recovery_work); 3493 } 3494 3495 return 0; 3496 } 3497 3498 static int amdgpu_ras_page_retirement_thread(void *param) 3499 { 3500 struct amdgpu_device *adev = (struct amdgpu_device *)param; 3501 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3502 uint32_t poison_creation_count, msg_count; 3503 uint32_t gpu_reset; 3504 int ret; 3505 3506 while (!kthread_should_stop()) { 3507 3508 wait_event_interruptible(con->page_retirement_wq, 3509 kthread_should_stop() || 3510 atomic_read(&con->page_retirement_req_cnt)); 3511 3512 if (kthread_should_stop()) 3513 break; 3514 3515 gpu_reset = 0; 3516 3517 do { 3518 poison_creation_count = atomic_read(&con->poison_creation_count); 3519 ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count); 3520 if (ret == -EIO) 3521 break; 3522 3523 if (poison_creation_count) { 3524 atomic_sub(poison_creation_count, &con->poison_creation_count); 3525 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt); 3526 } 3527 } while (atomic_read(&con->poison_creation_count)); 3528 3529 if (ret != -EIO) { 3530 msg_count = kfifo_len(&con->poison_fifo); 3531 if (msg_count) { 3532 ret = amdgpu_ras_poison_consumption_handler(adev, 3533 msg_count, &gpu_reset); 3534 if ((ret != -EIO) && 3535 (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET)) 3536 atomic_sub(msg_count, &con->page_retirement_req_cnt); 3537 } 3538 } 3539 3540 if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) { 3541 /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */ 3542 /* Clear poison creation request */ 3543 atomic_set(&con->poison_creation_count, 0); 3544 3545 /* Clear poison fifo */ 3546 amdgpu_ras_clear_poison_fifo(adev); 3547 3548 /* Clear all poison requests */ 3549 atomic_set(&con->page_retirement_req_cnt, 0); 3550 3551 if (ret == -EIO) { 3552 /* Wait for mode-1 reset to complete */ 3553 down_read(&adev->reset_domain->sem); 3554 up_read(&adev->reset_domain->sem); 3555 } 3556 3557 /* Wake up work to save bad pages to eeprom */ 3558 schedule_delayed_work(&con->page_retirement_dwork, 0); 3559 } else if (gpu_reset) { 3560 /* gpu just completed mode-2 reset or other reset */ 3561 /* Clear poison consumption messages cached in fifo */ 3562 msg_count = kfifo_len(&con->poison_fifo); 3563 if (msg_count) { 3564 amdgpu_ras_clear_poison_fifo(adev); 3565 atomic_sub(msg_count, &con->page_retirement_req_cnt); 3566 } 3567 3568 /* Wake up work to save bad pages to eeprom */ 3569 schedule_delayed_work(&con->page_retirement_dwork, 0); 3570 } 3571 } 3572 3573 return 0; 3574 } 3575 3576 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) 3577 { 3578 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3579 struct amdgpu_ras_eeprom_control *control; 3580 int ret; 3581 3582 if (!con || amdgpu_sriov_vf(adev)) 3583 return 0; 3584 3585 control = &con->eeprom_control; 3586 ret = amdgpu_ras_eeprom_init(control); 3587 control->is_eeprom_valid = !ret; 3588 3589 if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr) 3590 control->ras_num_pa_recs = control->ras_num_recs; 3591 3592 if (adev->umc.ras && 3593 adev->umc.ras->get_retire_flip_bits) 3594 adev->umc.ras->get_retire_flip_bits(adev); 3595 3596 if (control->ras_num_recs && control->is_eeprom_valid) { 3597 ret = amdgpu_ras_load_bad_pages(adev); 3598 if (ret) { 3599 control->is_eeprom_valid = false; 3600 return 0; 3601 } 3602 3603 amdgpu_dpm_send_hbm_bad_pages_num( 3604 adev, control->ras_num_bad_pages); 3605 3606 if (con->update_channel_flag == true) { 3607 amdgpu_dpm_send_hbm_bad_channel_flag( 3608 adev, control->bad_channel_bitmap); 3609 con->update_channel_flag = false; 3610 } 3611 3612 /* The format action is only applied to new ASICs */ 3613 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 && 3614 control->tbl_hdr.version < RAS_TABLE_VER_V3) 3615 if (!amdgpu_ras_eeprom_reset_table(control)) 3616 if (amdgpu_ras_save_bad_pages(adev, NULL)) 3617 dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n"); 3618 } 3619 3620 return 0; 3621 } 3622 3623 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info) 3624 { 3625 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3626 struct ras_err_handler_data **data; 3627 u32 max_eeprom_records_count = 0; 3628 int ret; 3629 3630 if (!con || amdgpu_sriov_vf(adev)) 3631 return 0; 3632 3633 /* Allow access to RAS EEPROM via debugfs, when the ASIC 3634 * supports RAS and debugfs is enabled, but when 3635 * adev->ras_enabled is unset, i.e. when "ras_enable" 3636 * module parameter is set to 0. 3637 */ 3638 con->adev = adev; 3639 3640 if (!adev->ras_enabled) 3641 return 0; 3642 3643 data = &con->eh_data; 3644 *data = kzalloc(sizeof(**data), GFP_KERNEL); 3645 if (!*data) { 3646 ret = -ENOMEM; 3647 goto out; 3648 } 3649 3650 mutex_init(&con->recovery_lock); 3651 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); 3652 atomic_set(&con->in_recovery, 0); 3653 atomic_set(&con->rma_in_recovery, 0); 3654 con->eeprom_control.bad_channel_bitmap = 0; 3655 3656 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); 3657 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); 3658 3659 if (init_bp_info) { 3660 ret = amdgpu_ras_init_badpage_info(adev); 3661 if (ret) 3662 goto free; 3663 } 3664 3665 mutex_init(&con->page_rsv_lock); 3666 INIT_KFIFO(con->poison_fifo); 3667 mutex_init(&con->page_retirement_lock); 3668 init_waitqueue_head(&con->page_retirement_wq); 3669 atomic_set(&con->page_retirement_req_cnt, 0); 3670 atomic_set(&con->poison_creation_count, 0); 3671 con->page_retirement_thread = 3672 kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement"); 3673 if (IS_ERR(con->page_retirement_thread)) { 3674 con->page_retirement_thread = NULL; 3675 dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n"); 3676 } 3677 3678 INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement); 3679 amdgpu_ras_ecc_log_init(&con->umc_ecc_log); 3680 #ifdef CONFIG_X86_MCE_AMD 3681 if ((adev->asic_type == CHIP_ALDEBARAN) && 3682 (adev->gmc.xgmi.connected_to_cpu)) 3683 amdgpu_register_bad_pages_mca_notifier(adev); 3684 #endif 3685 return 0; 3686 3687 free: 3688 kfree((*data)->bps); 3689 kfree(*data); 3690 con->eh_data = NULL; 3691 out: 3692 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret); 3693 3694 /* 3695 * Except error threshold exceeding case, other failure cases in this 3696 * function would not fail amdgpu driver init. 3697 */ 3698 if (!amdgpu_ras_is_rma(adev)) 3699 ret = 0; 3700 else 3701 ret = -EINVAL; 3702 3703 return ret; 3704 } 3705 3706 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) 3707 { 3708 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3709 struct ras_err_handler_data *data = con->eh_data; 3710 int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES; 3711 bool ret; 3712 3713 /* recovery_init failed to init it, fini is useless */ 3714 if (!data) 3715 return 0; 3716 3717 /* Save all cached bad pages to eeprom */ 3718 do { 3719 flush_delayed_work(&con->page_retirement_dwork); 3720 ret = amdgpu_ras_schedule_retirement_dwork(con, 0); 3721 } while (ret && max_flush_timeout--); 3722 3723 if (con->page_retirement_thread) 3724 kthread_stop(con->page_retirement_thread); 3725 3726 atomic_set(&con->page_retirement_req_cnt, 0); 3727 atomic_set(&con->poison_creation_count, 0); 3728 3729 mutex_destroy(&con->page_rsv_lock); 3730 3731 cancel_work_sync(&con->recovery_work); 3732 3733 cancel_delayed_work_sync(&con->page_retirement_dwork); 3734 3735 amdgpu_ras_ecc_log_fini(&con->umc_ecc_log); 3736 3737 mutex_lock(&con->recovery_lock); 3738 con->eh_data = NULL; 3739 kfree(data->bps); 3740 kfree(data); 3741 mutex_unlock(&con->recovery_lock); 3742 3743 amdgpu_ras_critical_region_init(adev); 3744 3745 return 0; 3746 } 3747 /* recovery end */ 3748 3749 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) 3750 { 3751 if (amdgpu_sriov_vf(adev)) { 3752 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 3753 case IP_VERSION(13, 0, 2): 3754 case IP_VERSION(13, 0, 6): 3755 case IP_VERSION(13, 0, 12): 3756 case IP_VERSION(13, 0, 14): 3757 return true; 3758 default: 3759 return false; 3760 } 3761 } 3762 3763 if (adev->asic_type == CHIP_IP_DISCOVERY) { 3764 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 3765 case IP_VERSION(13, 0, 0): 3766 case IP_VERSION(13, 0, 6): 3767 case IP_VERSION(13, 0, 10): 3768 case IP_VERSION(13, 0, 12): 3769 case IP_VERSION(13, 0, 14): 3770 case IP_VERSION(14, 0, 3): 3771 return true; 3772 default: 3773 return false; 3774 } 3775 } 3776 3777 return adev->asic_type == CHIP_VEGA10 || 3778 adev->asic_type == CHIP_VEGA20 || 3779 adev->asic_type == CHIP_ARCTURUS || 3780 adev->asic_type == CHIP_ALDEBARAN || 3781 adev->asic_type == CHIP_SIENNA_CICHLID; 3782 } 3783 3784 /* 3785 * this is workaround for vega20 workstation sku, 3786 * force enable gfx ras, ignore vbios gfx ras flag 3787 * due to GC EDC can not write 3788 */ 3789 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) 3790 { 3791 struct atom_context *ctx = adev->mode_info.atom_context; 3792 3793 if (!ctx) 3794 return; 3795 3796 if (strnstr(ctx->vbios_pn, "D16406", 3797 sizeof(ctx->vbios_pn)) || 3798 strnstr(ctx->vbios_pn, "D36002", 3799 sizeof(ctx->vbios_pn))) 3800 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); 3801 } 3802 3803 /* Query ras capablity via atomfirmware interface */ 3804 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev) 3805 { 3806 /* mem_ecc cap */ 3807 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { 3808 dev_info(adev->dev, "MEM ECC is active.\n"); 3809 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | 3810 1 << AMDGPU_RAS_BLOCK__DF); 3811 } else { 3812 dev_info(adev->dev, "MEM ECC is not presented.\n"); 3813 } 3814 3815 /* sram_ecc cap */ 3816 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { 3817 dev_info(adev->dev, "SRAM ECC is active.\n"); 3818 if (!amdgpu_sriov_vf(adev)) 3819 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | 3820 1 << AMDGPU_RAS_BLOCK__DF); 3821 else 3822 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | 3823 1 << AMDGPU_RAS_BLOCK__SDMA | 3824 1 << AMDGPU_RAS_BLOCK__GFX); 3825 3826 /* 3827 * VCN/JPEG RAS can be supported on both bare metal and 3828 * SRIOV environment 3829 */ 3830 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) || 3831 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) || 3832 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) || 3833 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1)) 3834 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 3835 1 << AMDGPU_RAS_BLOCK__JPEG); 3836 else 3837 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | 3838 1 << AMDGPU_RAS_BLOCK__JPEG); 3839 3840 /* 3841 * XGMI RAS is not supported if xgmi num physical nodes 3842 * is zero 3843 */ 3844 if (!adev->gmc.xgmi.num_physical_nodes) 3845 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL); 3846 } else { 3847 dev_info(adev->dev, "SRAM ECC is not presented.\n"); 3848 } 3849 } 3850 3851 /* Query poison mode from umc/df IP callbacks */ 3852 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) 3853 { 3854 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3855 bool df_poison, umc_poison; 3856 3857 /* poison setting is useless on SRIOV guest */ 3858 if (amdgpu_sriov_vf(adev) || !con) 3859 return; 3860 3861 /* Init poison supported flag, the default value is false */ 3862 if (adev->gmc.xgmi.connected_to_cpu || 3863 adev->gmc.is_app_apu) { 3864 /* enabled by default when GPU is connected to CPU */ 3865 con->poison_supported = true; 3866 } else if (adev->df.funcs && 3867 adev->df.funcs->query_ras_poison_mode && 3868 adev->umc.ras && 3869 adev->umc.ras->query_ras_poison_mode) { 3870 df_poison = 3871 adev->df.funcs->query_ras_poison_mode(adev); 3872 umc_poison = 3873 adev->umc.ras->query_ras_poison_mode(adev); 3874 3875 /* Only poison is set in both DF and UMC, we can support it */ 3876 if (df_poison && umc_poison) 3877 con->poison_supported = true; 3878 else if (df_poison != umc_poison) 3879 dev_warn(adev->dev, 3880 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", 3881 df_poison, umc_poison); 3882 } 3883 } 3884 3885 /* 3886 * check hardware's ras ability which will be saved in hw_supported. 3887 * if hardware does not support ras, we can skip some ras initializtion and 3888 * forbid some ras operations from IP. 3889 * if software itself, say boot parameter, limit the ras ability. We still 3890 * need allow IP do some limited operations, like disable. In such case, 3891 * we have to initialize ras as normal. but need check if operation is 3892 * allowed or not in each function. 3893 */ 3894 static void amdgpu_ras_check_supported(struct amdgpu_device *adev) 3895 { 3896 adev->ras_hw_enabled = adev->ras_enabled = 0; 3897 3898 if (!amdgpu_ras_asic_supported(adev)) 3899 return; 3900 3901 if (amdgpu_sriov_vf(adev)) { 3902 if (amdgpu_virt_get_ras_capability(adev)) 3903 goto init_ras_enabled_flag; 3904 } 3905 3906 /* query ras capability from psp */ 3907 if (amdgpu_psp_get_ras_capability(&adev->psp)) 3908 goto init_ras_enabled_flag; 3909 3910 /* query ras capablity from bios */ 3911 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { 3912 amdgpu_ras_query_ras_capablity_from_vbios(adev); 3913 } else { 3914 /* driver only manages a few IP blocks RAS feature 3915 * when GPU is connected cpu through XGMI */ 3916 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | 3917 1 << AMDGPU_RAS_BLOCK__SDMA | 3918 1 << AMDGPU_RAS_BLOCK__MMHUB); 3919 } 3920 3921 /* apply asic specific settings (vega20 only for now) */ 3922 amdgpu_ras_get_quirks(adev); 3923 3924 /* query poison mode from umc/df ip callback */ 3925 amdgpu_ras_query_poison_mode(adev); 3926 3927 init_ras_enabled_flag: 3928 /* hw_supported needs to be aligned with RAS block mask. */ 3929 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; 3930 3931 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : 3932 adev->ras_hw_enabled & amdgpu_ras_mask; 3933 3934 /* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */ 3935 if (!amdgpu_sriov_vf(adev)) { 3936 adev->aca.is_enabled = 3937 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 3938 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 3939 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)); 3940 } 3941 3942 /* bad page feature is not applicable to specific app platform */ 3943 if (adev->gmc.is_app_apu && 3944 amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0)) 3945 amdgpu_bad_page_threshold = 0; 3946 } 3947 3948 static void amdgpu_ras_counte_dw(struct work_struct *work) 3949 { 3950 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, 3951 ras_counte_delay_work.work); 3952 struct amdgpu_device *adev = con->adev; 3953 struct drm_device *dev = adev_to_drm(adev); 3954 unsigned long ce_count, ue_count; 3955 int res; 3956 3957 res = pm_runtime_get_sync(dev->dev); 3958 if (res < 0) 3959 goto Out; 3960 3961 /* Cache new values. 3962 */ 3963 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) { 3964 atomic_set(&con->ras_ce_count, ce_count); 3965 atomic_set(&con->ras_ue_count, ue_count); 3966 } 3967 3968 pm_runtime_mark_last_busy(dev->dev); 3969 Out: 3970 pm_runtime_put_autosuspend(dev->dev); 3971 } 3972 3973 static int amdgpu_get_ras_schema(struct amdgpu_device *adev) 3974 { 3975 return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 | 3976 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE | 3977 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE | 3978 AMDGPU_RAS_ERROR__PARITY; 3979 } 3980 3981 static void ras_event_mgr_init(struct ras_event_manager *mgr) 3982 { 3983 struct ras_event_state *event_state; 3984 int i; 3985 3986 memset(mgr, 0, sizeof(*mgr)); 3987 atomic64_set(&mgr->seqno, 0); 3988 3989 for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) { 3990 event_state = &mgr->event_state[i]; 3991 event_state->last_seqno = RAS_EVENT_INVALID_ID; 3992 atomic64_set(&event_state->count, 0); 3993 } 3994 } 3995 3996 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev) 3997 { 3998 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3999 struct amdgpu_hive_info *hive; 4000 4001 if (!ras) 4002 return; 4003 4004 hive = amdgpu_get_xgmi_hive(adev); 4005 ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr; 4006 4007 /* init event manager with node 0 on xgmi system */ 4008 if (!amdgpu_reset_in_recovery(adev)) { 4009 if (!hive || adev->gmc.xgmi.node_id == 0) 4010 ras_event_mgr_init(ras->event_mgr); 4011 } 4012 4013 if (hive) 4014 amdgpu_put_xgmi_hive(hive); 4015 } 4016 4017 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev) 4018 { 4019 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4020 4021 if (!con || (adev->flags & AMD_IS_APU)) 4022 return; 4023 4024 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 4025 case IP_VERSION(13, 0, 2): 4026 case IP_VERSION(13, 0, 6): 4027 case IP_VERSION(13, 0, 12): 4028 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT; 4029 break; 4030 case IP_VERSION(13, 0, 14): 4031 con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1); 4032 break; 4033 default: 4034 break; 4035 } 4036 } 4037 4038 int amdgpu_ras_init(struct amdgpu_device *adev) 4039 { 4040 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4041 int r; 4042 4043 if (con) 4044 return 0; 4045 4046 con = kzalloc(sizeof(*con) + 4047 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + 4048 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, 4049 GFP_KERNEL); 4050 if (!con) 4051 return -ENOMEM; 4052 4053 con->adev = adev; 4054 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); 4055 atomic_set(&con->ras_ce_count, 0); 4056 atomic_set(&con->ras_ue_count, 0); 4057 4058 con->objs = (struct ras_manager *)(con + 1); 4059 4060 amdgpu_ras_set_context(adev, con); 4061 4062 amdgpu_ras_check_supported(adev); 4063 4064 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { 4065 /* set gfx block ras context feature for VEGA20 Gaming 4066 * send ras disable cmd to ras ta during ras late init. 4067 */ 4068 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { 4069 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); 4070 4071 return 0; 4072 } 4073 4074 r = 0; 4075 goto release_con; 4076 } 4077 4078 con->update_channel_flag = false; 4079 con->features = 0; 4080 con->schema = 0; 4081 INIT_LIST_HEAD(&con->head); 4082 /* Might need get this flag from vbios. */ 4083 con->flags = RAS_DEFAULT_FLAGS; 4084 4085 /* initialize nbio ras function ahead of any other 4086 * ras functions so hardware fatal error interrupt 4087 * can be enabled as early as possible */ 4088 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 4089 case IP_VERSION(7, 4, 0): 4090 case IP_VERSION(7, 4, 1): 4091 case IP_VERSION(7, 4, 4): 4092 if (!adev->gmc.xgmi.connected_to_cpu) 4093 adev->nbio.ras = &nbio_v7_4_ras; 4094 break; 4095 case IP_VERSION(4, 3, 0): 4096 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF)) 4097 /* unlike other generation of nbio ras, 4098 * nbio v4_3 only support fatal error interrupt 4099 * to inform software that DF is freezed due to 4100 * system fatal error event. driver should not 4101 * enable nbio ras in such case. Instead, 4102 * check DF RAS */ 4103 adev->nbio.ras = &nbio_v4_3_ras; 4104 break; 4105 case IP_VERSION(6, 3, 1): 4106 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF)) 4107 /* unlike other generation of nbio ras, 4108 * nbif v6_3_1 only support fatal error interrupt 4109 * to inform software that DF is freezed due to 4110 * system fatal error event. driver should not 4111 * enable nbio ras in such case. Instead, 4112 * check DF RAS 4113 */ 4114 adev->nbio.ras = &nbif_v6_3_1_ras; 4115 break; 4116 case IP_VERSION(7, 9, 0): 4117 case IP_VERSION(7, 9, 1): 4118 if (!adev->gmc.is_app_apu) 4119 adev->nbio.ras = &nbio_v7_9_ras; 4120 break; 4121 default: 4122 /* nbio ras is not available */ 4123 break; 4124 } 4125 4126 /* nbio ras block needs to be enabled ahead of other ras blocks 4127 * to handle fatal error */ 4128 r = amdgpu_nbio_ras_sw_init(adev); 4129 if (r) 4130 return r; 4131 4132 if (adev->nbio.ras && 4133 adev->nbio.ras->init_ras_controller_interrupt) { 4134 r = adev->nbio.ras->init_ras_controller_interrupt(adev); 4135 if (r) 4136 goto release_con; 4137 } 4138 4139 if (adev->nbio.ras && 4140 adev->nbio.ras->init_ras_err_event_athub_interrupt) { 4141 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev); 4142 if (r) 4143 goto release_con; 4144 } 4145 4146 /* Packed socket_id to ras feature mask bits[31:29] */ 4147 if (adev->smuio.funcs && 4148 adev->smuio.funcs->get_socket_id) 4149 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 4150 AMDGPU_RAS_FEATURES_SOCKETID_SHIFT); 4151 4152 /* Get RAS schema for particular SOC */ 4153 con->schema = amdgpu_get_ras_schema(adev); 4154 4155 amdgpu_ras_init_reserved_vram_size(adev); 4156 4157 if (amdgpu_ras_fs_init(adev)) { 4158 r = -EINVAL; 4159 goto release_con; 4160 } 4161 4162 if (amdgpu_ras_aca_is_supported(adev)) { 4163 if (amdgpu_aca_is_enabled(adev)) 4164 r = amdgpu_aca_init(adev); 4165 else 4166 r = amdgpu_mca_init(adev); 4167 if (r) 4168 goto release_con; 4169 } 4170 4171 con->init_task_pid = task_pid_nr(current); 4172 get_task_comm(con->init_task_comm, current); 4173 4174 mutex_init(&con->critical_region_lock); 4175 INIT_LIST_HEAD(&con->critical_region_head); 4176 4177 dev_info(adev->dev, "RAS INFO: ras initialized successfully, " 4178 "hardware ability[%x] ras_mask[%x]\n", 4179 adev->ras_hw_enabled, adev->ras_enabled); 4180 4181 return 0; 4182 release_con: 4183 amdgpu_ras_set_context(adev, NULL); 4184 kfree(con); 4185 4186 return r; 4187 } 4188 4189 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) 4190 { 4191 if (adev->gmc.xgmi.connected_to_cpu || 4192 adev->gmc.is_app_apu) 4193 return 1; 4194 return 0; 4195 } 4196 4197 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, 4198 struct ras_common_if *ras_block) 4199 { 4200 struct ras_query_if info = { 4201 .head = *ras_block, 4202 }; 4203 4204 if (!amdgpu_persistent_edc_harvesting_supported(adev)) 4205 return 0; 4206 4207 if (amdgpu_ras_query_error_status(adev, &info) != 0) 4208 DRM_WARN("RAS init harvest failure"); 4209 4210 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) 4211 DRM_WARN("RAS init harvest reset failure"); 4212 4213 return 0; 4214 } 4215 4216 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) 4217 { 4218 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4219 4220 if (!con) 4221 return false; 4222 4223 return con->poison_supported; 4224 } 4225 4226 /* helper function to handle common stuff in ip late init phase */ 4227 int amdgpu_ras_block_late_init(struct amdgpu_device *adev, 4228 struct ras_common_if *ras_block) 4229 { 4230 struct amdgpu_ras_block_object *ras_obj = NULL; 4231 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4232 struct ras_query_if *query_info; 4233 unsigned long ue_count, ce_count; 4234 int r; 4235 4236 /* disable RAS feature per IP block if it is not supported */ 4237 if (!amdgpu_ras_is_supported(adev, ras_block->block)) { 4238 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); 4239 return 0; 4240 } 4241 4242 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); 4243 if (r) { 4244 if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) { 4245 /* in resume phase, if fail to enable ras, 4246 * clean up all ras fs nodes, and disable ras */ 4247 goto cleanup; 4248 } else 4249 return r; 4250 } 4251 4252 /* check for errors on warm reset edc persisant supported ASIC */ 4253 amdgpu_persistent_edc_harvesting(adev, ras_block); 4254 4255 /* in resume phase, no need to create ras fs node */ 4256 if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) 4257 return 0; 4258 4259 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 4260 if (ras_obj->ras_cb || (ras_obj->hw_ops && 4261 (ras_obj->hw_ops->query_poison_status || 4262 ras_obj->hw_ops->handle_poison_consumption))) { 4263 r = amdgpu_ras_interrupt_add_handler(adev, ras_block); 4264 if (r) 4265 goto cleanup; 4266 } 4267 4268 if (ras_obj->hw_ops && 4269 (ras_obj->hw_ops->query_ras_error_count || 4270 ras_obj->hw_ops->query_ras_error_status)) { 4271 r = amdgpu_ras_sysfs_create(adev, ras_block); 4272 if (r) 4273 goto interrupt; 4274 4275 /* Those are the cached values at init. 4276 */ 4277 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL); 4278 if (!query_info) 4279 return -ENOMEM; 4280 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if)); 4281 4282 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) { 4283 atomic_set(&con->ras_ce_count, ce_count); 4284 atomic_set(&con->ras_ue_count, ue_count); 4285 } 4286 4287 kfree(query_info); 4288 } 4289 4290 return 0; 4291 4292 interrupt: 4293 if (ras_obj->ras_cb) 4294 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 4295 cleanup: 4296 amdgpu_ras_feature_enable(adev, ras_block, 0); 4297 return r; 4298 } 4299 4300 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev, 4301 struct ras_common_if *ras_block) 4302 { 4303 return amdgpu_ras_block_late_init(adev, ras_block); 4304 } 4305 4306 /* helper function to remove ras fs node and interrupt handler */ 4307 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev, 4308 struct ras_common_if *ras_block) 4309 { 4310 struct amdgpu_ras_block_object *ras_obj; 4311 if (!ras_block) 4312 return; 4313 4314 amdgpu_ras_sysfs_remove(adev, ras_block); 4315 4316 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 4317 if (ras_obj->ras_cb) 4318 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 4319 } 4320 4321 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev, 4322 struct ras_common_if *ras_block) 4323 { 4324 return amdgpu_ras_block_late_fini(adev, ras_block); 4325 } 4326 4327 /* do some init work after IP late init as dependence. 4328 * and it runs in resume/gpu reset/booting up cases. 4329 */ 4330 void amdgpu_ras_resume(struct amdgpu_device *adev) 4331 { 4332 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4333 struct ras_manager *obj, *tmp; 4334 4335 if (!adev->ras_enabled || !con) { 4336 /* clean ras context for VEGA20 Gaming after send ras disable cmd */ 4337 amdgpu_release_ras_context(adev); 4338 4339 return; 4340 } 4341 4342 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 4343 /* Set up all other IPs which are not implemented. There is a 4344 * tricky thing that IP's actual ras error type should be 4345 * MULTI_UNCORRECTABLE, but as driver does not handle it, so 4346 * ERROR_NONE make sense anyway. 4347 */ 4348 amdgpu_ras_enable_all_features(adev, 1); 4349 4350 /* We enable ras on all hw_supported block, but as boot 4351 * parameter might disable some of them and one or more IP has 4352 * not implemented yet. So we disable them on behalf. 4353 */ 4354 list_for_each_entry_safe(obj, tmp, &con->head, node) { 4355 if (!amdgpu_ras_is_supported(adev, obj->head.block)) { 4356 amdgpu_ras_feature_enable(adev, &obj->head, 0); 4357 /* there should be no any reference. */ 4358 WARN_ON(alive_obj(obj)); 4359 } 4360 } 4361 } 4362 } 4363 4364 void amdgpu_ras_suspend(struct amdgpu_device *adev) 4365 { 4366 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4367 4368 if (!adev->ras_enabled || !con) 4369 return; 4370 4371 amdgpu_ras_disable_all_features(adev, 0); 4372 /* Make sure all ras objects are disabled. */ 4373 if (AMDGPU_RAS_GET_FEATURES(con->features)) 4374 amdgpu_ras_disable_all_features(adev, 1); 4375 } 4376 4377 int amdgpu_ras_late_init(struct amdgpu_device *adev) 4378 { 4379 struct amdgpu_ras_block_list *node, *tmp; 4380 struct amdgpu_ras_block_object *obj; 4381 int r; 4382 4383 amdgpu_ras_event_mgr_init(adev); 4384 4385 if (amdgpu_ras_aca_is_supported(adev)) { 4386 if (amdgpu_reset_in_recovery(adev)) { 4387 if (amdgpu_aca_is_enabled(adev)) 4388 r = amdgpu_aca_reset(adev); 4389 else 4390 r = amdgpu_mca_reset(adev); 4391 if (r) 4392 return r; 4393 } 4394 4395 if (!amdgpu_sriov_vf(adev)) { 4396 if (amdgpu_aca_is_enabled(adev)) 4397 amdgpu_ras_set_aca_debug_mode(adev, false); 4398 else 4399 amdgpu_ras_set_mca_debug_mode(adev, false); 4400 } 4401 } 4402 4403 /* Guest side doesn't need init ras feature */ 4404 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev)) 4405 return 0; 4406 4407 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 4408 obj = node->ras_obj; 4409 if (!obj) { 4410 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 4411 continue; 4412 } 4413 4414 if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block)) 4415 continue; 4416 4417 if (obj->ras_late_init) { 4418 r = obj->ras_late_init(adev, &obj->ras_comm); 4419 if (r) { 4420 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n", 4421 obj->ras_comm.name, r); 4422 return r; 4423 } 4424 } else 4425 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm); 4426 } 4427 4428 return 0; 4429 } 4430 4431 /* do some fini work before IP fini as dependence */ 4432 int amdgpu_ras_pre_fini(struct amdgpu_device *adev) 4433 { 4434 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4435 4436 if (!adev->ras_enabled || !con) 4437 return 0; 4438 4439 4440 /* Need disable ras on all IPs here before ip [hw/sw]fini */ 4441 if (AMDGPU_RAS_GET_FEATURES(con->features)) 4442 amdgpu_ras_disable_all_features(adev, 0); 4443 amdgpu_ras_recovery_fini(adev); 4444 return 0; 4445 } 4446 4447 int amdgpu_ras_fini(struct amdgpu_device *adev) 4448 { 4449 struct amdgpu_ras_block_list *ras_node, *tmp; 4450 struct amdgpu_ras_block_object *obj = NULL; 4451 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4452 4453 if (!adev->ras_enabled || !con) 4454 return 0; 4455 4456 amdgpu_ras_critical_region_fini(adev); 4457 mutex_destroy(&con->critical_region_lock); 4458 4459 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) { 4460 if (ras_node->ras_obj) { 4461 obj = ras_node->ras_obj; 4462 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) && 4463 obj->ras_fini) 4464 obj->ras_fini(adev, &obj->ras_comm); 4465 else 4466 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm); 4467 } 4468 4469 /* Clear ras blocks from ras_list and free ras block list node */ 4470 list_del(&ras_node->node); 4471 kfree(ras_node); 4472 } 4473 4474 amdgpu_ras_fs_fini(adev); 4475 amdgpu_ras_interrupt_remove_all(adev); 4476 4477 if (amdgpu_ras_aca_is_supported(adev)) { 4478 if (amdgpu_aca_is_enabled(adev)) 4479 amdgpu_aca_fini(adev); 4480 else 4481 amdgpu_mca_fini(adev); 4482 } 4483 4484 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared"); 4485 4486 if (AMDGPU_RAS_GET_FEATURES(con->features)) 4487 amdgpu_ras_disable_all_features(adev, 0); 4488 4489 cancel_delayed_work_sync(&con->ras_counte_delay_work); 4490 4491 amdgpu_ras_set_context(adev, NULL); 4492 kfree(con); 4493 4494 return 0; 4495 } 4496 4497 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev) 4498 { 4499 struct amdgpu_ras *ras; 4500 4501 ras = amdgpu_ras_get_context(adev); 4502 if (!ras) 4503 return false; 4504 4505 return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state); 4506 } 4507 4508 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status) 4509 { 4510 struct amdgpu_ras *ras; 4511 4512 ras = amdgpu_ras_get_context(adev); 4513 if (ras) { 4514 if (status) 4515 set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state); 4516 else 4517 clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state); 4518 } 4519 } 4520 4521 void amdgpu_ras_clear_err_state(struct amdgpu_device *adev) 4522 { 4523 struct amdgpu_ras *ras; 4524 4525 ras = amdgpu_ras_get_context(adev); 4526 if (ras) { 4527 ras->ras_err_state = 0; 4528 ras->gpu_reset_flags = 0; 4529 } 4530 } 4531 4532 void amdgpu_ras_set_err_poison(struct amdgpu_device *adev, 4533 enum amdgpu_ras_block block) 4534 { 4535 struct amdgpu_ras *ras; 4536 4537 ras = amdgpu_ras_get_context(adev); 4538 if (ras) 4539 set_bit(block, &ras->ras_err_state); 4540 } 4541 4542 bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block) 4543 { 4544 struct amdgpu_ras *ras; 4545 4546 ras = amdgpu_ras_get_context(adev); 4547 if (ras) { 4548 if (block == AMDGPU_RAS_BLOCK__ANY) 4549 return (ras->ras_err_state != 0); 4550 else 4551 return test_bit(block, &ras->ras_err_state) || 4552 test_bit(AMDGPU_RAS_BLOCK__LAST, 4553 &ras->ras_err_state); 4554 } 4555 4556 return false; 4557 } 4558 4559 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev) 4560 { 4561 struct amdgpu_ras *ras; 4562 4563 ras = amdgpu_ras_get_context(adev); 4564 if (!ras) 4565 return NULL; 4566 4567 return ras->event_mgr; 4568 } 4569 4570 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type, 4571 const void *caller) 4572 { 4573 struct ras_event_manager *event_mgr; 4574 struct ras_event_state *event_state; 4575 int ret = 0; 4576 4577 if (type >= RAS_EVENT_TYPE_COUNT) { 4578 ret = -EINVAL; 4579 goto out; 4580 } 4581 4582 event_mgr = __get_ras_event_mgr(adev); 4583 if (!event_mgr) { 4584 ret = -EINVAL; 4585 goto out; 4586 } 4587 4588 event_state = &event_mgr->event_state[type]; 4589 event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno); 4590 atomic64_inc(&event_state->count); 4591 4592 out: 4593 if (ret && caller) 4594 dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n", 4595 (int)type, caller, ret); 4596 4597 return ret; 4598 } 4599 4600 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type) 4601 { 4602 struct ras_event_manager *event_mgr; 4603 u64 id; 4604 4605 if (type >= RAS_EVENT_TYPE_COUNT) 4606 return RAS_EVENT_INVALID_ID; 4607 4608 switch (type) { 4609 case RAS_EVENT_TYPE_FATAL: 4610 case RAS_EVENT_TYPE_POISON_CREATION: 4611 case RAS_EVENT_TYPE_POISON_CONSUMPTION: 4612 event_mgr = __get_ras_event_mgr(adev); 4613 if (!event_mgr) 4614 return RAS_EVENT_INVALID_ID; 4615 4616 id = event_mgr->event_state[type].last_seqno; 4617 break; 4618 case RAS_EVENT_TYPE_INVALID: 4619 default: 4620 id = RAS_EVENT_INVALID_ID; 4621 break; 4622 } 4623 4624 return id; 4625 } 4626 4627 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) 4628 { 4629 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { 4630 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 4631 enum ras_event_type type = RAS_EVENT_TYPE_FATAL; 4632 u64 event_id; 4633 4634 if (amdgpu_ras_mark_ras_event(adev, type)) { 4635 dev_err(adev->dev, 4636 "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n"); 4637 return; 4638 } 4639 4640 event_id = amdgpu_ras_acquire_event_id(adev, type); 4641 4642 RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error" 4643 "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); 4644 4645 amdgpu_ras_set_fed(adev, true); 4646 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; 4647 amdgpu_ras_reset_gpu(adev); 4648 } 4649 } 4650 4651 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) 4652 { 4653 if (adev->asic_type == CHIP_VEGA20 && 4654 adev->pm.fw_version <= 0x283400) { 4655 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) && 4656 amdgpu_ras_intr_triggered(); 4657 } 4658 4659 return false; 4660 } 4661 4662 void amdgpu_release_ras_context(struct amdgpu_device *adev) 4663 { 4664 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4665 4666 if (!con) 4667 return; 4668 4669 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { 4670 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); 4671 amdgpu_ras_set_context(adev, NULL); 4672 kfree(con); 4673 } 4674 } 4675 4676 #ifdef CONFIG_X86_MCE_AMD 4677 static struct amdgpu_device *find_adev(uint32_t node_id) 4678 { 4679 int i; 4680 struct amdgpu_device *adev = NULL; 4681 4682 for (i = 0; i < mce_adev_list.num_gpu; i++) { 4683 adev = mce_adev_list.devs[i]; 4684 4685 if (adev && adev->gmc.xgmi.connected_to_cpu && 4686 adev->gmc.xgmi.physical_node_id == node_id) 4687 break; 4688 adev = NULL; 4689 } 4690 4691 return adev; 4692 } 4693 4694 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) 4695 #define GET_UMC_INST(m) (((m) >> 21) & 0x7) 4696 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) 4697 #define GPU_ID_OFFSET 8 4698 4699 static int amdgpu_bad_page_notifier(struct notifier_block *nb, 4700 unsigned long val, void *data) 4701 { 4702 struct mce *m = (struct mce *)data; 4703 struct amdgpu_device *adev = NULL; 4704 uint32_t gpu_id = 0; 4705 uint32_t umc_inst = 0, ch_inst = 0; 4706 4707 /* 4708 * If the error was generated in UMC_V2, which belongs to GPU UMCs, 4709 * and error occurred in DramECC (Extended error code = 0) then only 4710 * process the error, else bail out. 4711 */ 4712 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) && 4713 (XEC(m->status, 0x3f) == 0x0))) 4714 return NOTIFY_DONE; 4715 4716 /* 4717 * If it is correctable error, return. 4718 */ 4719 if (mce_is_correctable(m)) 4720 return NOTIFY_OK; 4721 4722 /* 4723 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. 4724 */ 4725 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; 4726 4727 adev = find_adev(gpu_id); 4728 if (!adev) { 4729 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__, 4730 gpu_id); 4731 return NOTIFY_DONE; 4732 } 4733 4734 /* 4735 * If it is uncorrectable error, then find out UMC instance and 4736 * channel index. 4737 */ 4738 umc_inst = GET_UMC_INST(m->ipid); 4739 ch_inst = GET_CHAN_INDEX(m->ipid); 4740 4741 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d", 4742 umc_inst, ch_inst); 4743 4744 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst)) 4745 return NOTIFY_OK; 4746 else 4747 return NOTIFY_DONE; 4748 } 4749 4750 static struct notifier_block amdgpu_bad_page_nb = { 4751 .notifier_call = amdgpu_bad_page_notifier, 4752 .priority = MCE_PRIO_UC, 4753 }; 4754 4755 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) 4756 { 4757 /* 4758 * Add the adev to the mce_adev_list. 4759 * During mode2 reset, amdgpu device is temporarily 4760 * removed from the mgpu_info list which can cause 4761 * page retirement to fail. 4762 * Use this list instead of mgpu_info to find the amdgpu 4763 * device on which the UMC error was reported. 4764 */ 4765 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; 4766 4767 /* 4768 * Register the x86 notifier only once 4769 * with MCE subsystem. 4770 */ 4771 if (notifier_registered == false) { 4772 mce_register_decode_chain(&amdgpu_bad_page_nb); 4773 notifier_registered = true; 4774 } 4775 } 4776 #endif 4777 4778 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) 4779 { 4780 if (!adev) 4781 return NULL; 4782 4783 return adev->psp.ras_context.ras; 4784 } 4785 4786 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con) 4787 { 4788 if (!adev) 4789 return -EINVAL; 4790 4791 adev->psp.ras_context.ras = ras_con; 4792 return 0; 4793 } 4794 4795 /* check if ras is supported on block, say, sdma, gfx */ 4796 int amdgpu_ras_is_supported(struct amdgpu_device *adev, 4797 unsigned int block) 4798 { 4799 int ret = 0; 4800 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 4801 4802 if (block >= AMDGPU_RAS_BLOCK_COUNT) 4803 return 0; 4804 4805 ret = ras && (adev->ras_enabled & (1 << block)); 4806 4807 /* For the special asic with mem ecc enabled but sram ecc 4808 * not enabled, even if the ras block is not supported on 4809 * .ras_enabled, if the asic supports poison mode and the 4810 * ras block has ras configuration, it can be considered 4811 * that the ras block supports ras function. 4812 */ 4813 if (!ret && 4814 (block == AMDGPU_RAS_BLOCK__GFX || 4815 block == AMDGPU_RAS_BLOCK__SDMA || 4816 block == AMDGPU_RAS_BLOCK__VCN || 4817 block == AMDGPU_RAS_BLOCK__JPEG) && 4818 (amdgpu_ras_mask & (1 << block)) && 4819 amdgpu_ras_is_poison_mode_supported(adev) && 4820 amdgpu_ras_get_ras_block(adev, block, 0)) 4821 ret = 1; 4822 4823 return ret; 4824 } 4825 4826 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) 4827 { 4828 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 4829 4830 /* mode1 is the only selection for RMA status */ 4831 if (amdgpu_ras_is_rma(adev)) { 4832 ras->gpu_reset_flags = 0; 4833 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; 4834 } 4835 4836 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) { 4837 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 4838 int hive_ras_recovery = 0; 4839 4840 if (hive) { 4841 hive_ras_recovery = atomic_read(&hive->ras_recovery); 4842 amdgpu_put_xgmi_hive(hive); 4843 } 4844 /* In the case of multiple GPUs, after a GPU has started 4845 * resetting all GPUs on hive, other GPUs do not need to 4846 * trigger GPU reset again. 4847 */ 4848 if (!hive_ras_recovery) 4849 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); 4850 else 4851 atomic_set(&ras->in_recovery, 0); 4852 } else { 4853 flush_work(&ras->recovery_work); 4854 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); 4855 } 4856 4857 return 0; 4858 } 4859 4860 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable) 4861 { 4862 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4863 int ret = 0; 4864 4865 if (con) { 4866 ret = amdgpu_mca_smu_set_debug_mode(adev, enable); 4867 if (!ret) 4868 con->is_aca_debug_mode = enable; 4869 } 4870 4871 return ret; 4872 } 4873 4874 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable) 4875 { 4876 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4877 int ret = 0; 4878 4879 if (con) { 4880 if (amdgpu_aca_is_enabled(adev)) 4881 ret = amdgpu_aca_smu_set_debug_mode(adev, enable); 4882 else 4883 ret = amdgpu_mca_smu_set_debug_mode(adev, enable); 4884 if (!ret) 4885 con->is_aca_debug_mode = enable; 4886 } 4887 4888 return ret; 4889 } 4890 4891 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev) 4892 { 4893 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4894 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; 4895 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 4896 4897 if (!con) 4898 return false; 4899 4900 if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) || 4901 (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode)) 4902 return con->is_aca_debug_mode; 4903 else 4904 return true; 4905 } 4906 4907 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, 4908 unsigned int *error_query_mode) 4909 { 4910 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4911 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 4912 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; 4913 4914 if (!con) { 4915 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY; 4916 return false; 4917 } 4918 4919 if (amdgpu_sriov_vf(adev)) { 4920 *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY; 4921 } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) { 4922 *error_query_mode = 4923 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; 4924 } else { 4925 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY; 4926 } 4927 4928 return true; 4929 } 4930 4931 /* Register each ip ras block into amdgpu ras */ 4932 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, 4933 struct amdgpu_ras_block_object *ras_block_obj) 4934 { 4935 struct amdgpu_ras_block_list *ras_node; 4936 if (!adev || !ras_block_obj) 4937 return -EINVAL; 4938 4939 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL); 4940 if (!ras_node) 4941 return -ENOMEM; 4942 4943 INIT_LIST_HEAD(&ras_node->node); 4944 ras_node->ras_obj = ras_block_obj; 4945 list_add_tail(&ras_node->node, &adev->ras_list); 4946 4947 return 0; 4948 } 4949 4950 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name) 4951 { 4952 if (!err_type_name) 4953 return; 4954 4955 switch (err_type) { 4956 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE: 4957 sprintf(err_type_name, "correctable"); 4958 break; 4959 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE: 4960 sprintf(err_type_name, "uncorrectable"); 4961 break; 4962 default: 4963 sprintf(err_type_name, "unknown"); 4964 break; 4965 } 4966 } 4967 4968 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev, 4969 const struct amdgpu_ras_err_status_reg_entry *reg_entry, 4970 uint32_t instance, 4971 uint32_t *memory_id) 4972 { 4973 uint32_t err_status_lo_data, err_status_lo_offset; 4974 4975 if (!reg_entry) 4976 return false; 4977 4978 err_status_lo_offset = 4979 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance, 4980 reg_entry->seg_lo, reg_entry->reg_lo); 4981 err_status_lo_data = RREG32(err_status_lo_offset); 4982 4983 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) && 4984 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG)) 4985 return false; 4986 4987 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID); 4988 4989 return true; 4990 } 4991 4992 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev, 4993 const struct amdgpu_ras_err_status_reg_entry *reg_entry, 4994 uint32_t instance, 4995 unsigned long *err_cnt) 4996 { 4997 uint32_t err_status_hi_data, err_status_hi_offset; 4998 4999 if (!reg_entry) 5000 return false; 5001 5002 err_status_hi_offset = 5003 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance, 5004 reg_entry->seg_hi, reg_entry->reg_hi); 5005 err_status_hi_data = RREG32(err_status_hi_offset); 5006 5007 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) && 5008 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG)) 5009 /* keep the check here in case we need to refer to the result later */ 5010 dev_dbg(adev->dev, "Invalid err_info field\n"); 5011 5012 /* read err count */ 5013 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT); 5014 5015 return true; 5016 } 5017 5018 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev, 5019 const struct amdgpu_ras_err_status_reg_entry *reg_list, 5020 uint32_t reg_list_size, 5021 const struct amdgpu_ras_memory_id_entry *mem_list, 5022 uint32_t mem_list_size, 5023 uint32_t instance, 5024 uint32_t err_type, 5025 unsigned long *err_count) 5026 { 5027 uint32_t memory_id; 5028 unsigned long err_cnt; 5029 char err_type_name[16]; 5030 uint32_t i, j; 5031 5032 for (i = 0; i < reg_list_size; i++) { 5033 /* query memory_id from err_status_lo */ 5034 if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i], 5035 instance, &memory_id)) 5036 continue; 5037 5038 /* query err_cnt from err_status_hi */ 5039 if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i], 5040 instance, &err_cnt) || 5041 !err_cnt) 5042 continue; 5043 5044 *err_count += err_cnt; 5045 5046 /* log the errors */ 5047 amdgpu_ras_get_error_type_name(err_type, err_type_name); 5048 if (!mem_list) { 5049 /* memory_list is not supported */ 5050 dev_info(adev->dev, 5051 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n", 5052 err_cnt, err_type_name, 5053 reg_list[i].block_name, 5054 instance, memory_id); 5055 } else { 5056 for (j = 0; j < mem_list_size; j++) { 5057 if (memory_id == mem_list[j].memory_id) { 5058 dev_info(adev->dev, 5059 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n", 5060 err_cnt, err_type_name, 5061 reg_list[i].block_name, 5062 instance, mem_list[j].name); 5063 break; 5064 } 5065 } 5066 } 5067 } 5068 } 5069 5070 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, 5071 const struct amdgpu_ras_err_status_reg_entry *reg_list, 5072 uint32_t reg_list_size, 5073 uint32_t instance) 5074 { 5075 uint32_t err_status_lo_offset, err_status_hi_offset; 5076 uint32_t i; 5077 5078 for (i = 0; i < reg_list_size; i++) { 5079 err_status_lo_offset = 5080 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance, 5081 reg_list[i].seg_lo, reg_list[i].reg_lo); 5082 err_status_hi_offset = 5083 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance, 5084 reg_list[i].seg_hi, reg_list[i].reg_hi); 5085 WREG32(err_status_lo_offset, 0); 5086 WREG32(err_status_hi_offset, 0); 5087 } 5088 } 5089 5090 int amdgpu_ras_error_data_init(struct ras_err_data *err_data) 5091 { 5092 memset(err_data, 0, sizeof(*err_data)); 5093 5094 INIT_LIST_HEAD(&err_data->err_node_list); 5095 5096 return 0; 5097 } 5098 5099 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node) 5100 { 5101 if (!err_node) 5102 return; 5103 5104 list_del(&err_node->node); 5105 kvfree(err_node); 5106 } 5107 5108 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data) 5109 { 5110 struct ras_err_node *err_node, *tmp; 5111 5112 list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node) 5113 amdgpu_ras_error_node_release(err_node); 5114 } 5115 5116 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data, 5117 struct amdgpu_smuio_mcm_config_info *mcm_info) 5118 { 5119 struct ras_err_node *err_node; 5120 struct amdgpu_smuio_mcm_config_info *ref_id; 5121 5122 if (!err_data || !mcm_info) 5123 return NULL; 5124 5125 for_each_ras_error(err_node, err_data) { 5126 ref_id = &err_node->err_info.mcm_info; 5127 5128 if (mcm_info->socket_id == ref_id->socket_id && 5129 mcm_info->die_id == ref_id->die_id) 5130 return err_node; 5131 } 5132 5133 return NULL; 5134 } 5135 5136 static struct ras_err_node *amdgpu_ras_error_node_new(void) 5137 { 5138 struct ras_err_node *err_node; 5139 5140 err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL); 5141 if (!err_node) 5142 return NULL; 5143 5144 INIT_LIST_HEAD(&err_node->node); 5145 5146 return err_node; 5147 } 5148 5149 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b) 5150 { 5151 struct ras_err_node *nodea = container_of(a, struct ras_err_node, node); 5152 struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node); 5153 struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info; 5154 struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info; 5155 5156 if (unlikely(infoa->socket_id != infob->socket_id)) 5157 return infoa->socket_id - infob->socket_id; 5158 else 5159 return infoa->die_id - infob->die_id; 5160 5161 return 0; 5162 } 5163 5164 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data, 5165 struct amdgpu_smuio_mcm_config_info *mcm_info) 5166 { 5167 struct ras_err_node *err_node; 5168 5169 err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info); 5170 if (err_node) 5171 return &err_node->err_info; 5172 5173 err_node = amdgpu_ras_error_node_new(); 5174 if (!err_node) 5175 return NULL; 5176 5177 memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info)); 5178 5179 err_data->err_list_count++; 5180 list_add_tail(&err_node->node, &err_data->err_node_list); 5181 list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp); 5182 5183 return &err_node->err_info; 5184 } 5185 5186 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, 5187 struct amdgpu_smuio_mcm_config_info *mcm_info, 5188 u64 count) 5189 { 5190 struct ras_err_info *err_info; 5191 5192 if (!err_data || !mcm_info) 5193 return -EINVAL; 5194 5195 if (!count) 5196 return 0; 5197 5198 err_info = amdgpu_ras_error_get_info(err_data, mcm_info); 5199 if (!err_info) 5200 return -EINVAL; 5201 5202 err_info->ue_count += count; 5203 err_data->ue_count += count; 5204 5205 return 0; 5206 } 5207 5208 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, 5209 struct amdgpu_smuio_mcm_config_info *mcm_info, 5210 u64 count) 5211 { 5212 struct ras_err_info *err_info; 5213 5214 if (!err_data || !mcm_info) 5215 return -EINVAL; 5216 5217 if (!count) 5218 return 0; 5219 5220 err_info = amdgpu_ras_error_get_info(err_data, mcm_info); 5221 if (!err_info) 5222 return -EINVAL; 5223 5224 err_info->ce_count += count; 5225 err_data->ce_count += count; 5226 5227 return 0; 5228 } 5229 5230 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data, 5231 struct amdgpu_smuio_mcm_config_info *mcm_info, 5232 u64 count) 5233 { 5234 struct ras_err_info *err_info; 5235 5236 if (!err_data || !mcm_info) 5237 return -EINVAL; 5238 5239 if (!count) 5240 return 0; 5241 5242 err_info = amdgpu_ras_error_get_info(err_data, mcm_info); 5243 if (!err_info) 5244 return -EINVAL; 5245 5246 err_info->de_count += count; 5247 err_data->de_count += count; 5248 5249 return 0; 5250 } 5251 5252 #define mmMP0_SMN_C2PMSG_92 0x1609C 5253 #define mmMP0_SMN_C2PMSG_126 0x160BE 5254 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev, 5255 u32 instance) 5256 { 5257 u32 socket_id, aid_id, hbm_id; 5258 u32 fw_status; 5259 u32 boot_error; 5260 u64 reg_addr; 5261 5262 /* The pattern for smn addressing in other SOC could be different from 5263 * the one for aqua_vanjaram. We should revisit the code if the pattern 5264 * is changed. In such case, replace the aqua_vanjaram implementation 5265 * with more common helper */ 5266 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) + 5267 aqua_vanjaram_encode_ext_smn_addressing(instance); 5268 fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr); 5269 5270 reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) + 5271 aqua_vanjaram_encode_ext_smn_addressing(instance); 5272 boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr); 5273 5274 socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error); 5275 aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error); 5276 hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1); 5277 5278 if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error)) 5279 dev_info(adev->dev, 5280 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n", 5281 socket_id, aid_id, hbm_id, fw_status); 5282 5283 if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error)) 5284 dev_info(adev->dev, 5285 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n", 5286 socket_id, aid_id, fw_status); 5287 5288 if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error)) 5289 dev_info(adev->dev, 5290 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n", 5291 socket_id, aid_id, fw_status); 5292 5293 if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error)) 5294 dev_info(adev->dev, 5295 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n", 5296 socket_id, aid_id, fw_status); 5297 5298 if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error)) 5299 dev_info(adev->dev, 5300 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n", 5301 socket_id, aid_id, fw_status); 5302 5303 if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error)) 5304 dev_info(adev->dev, 5305 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n", 5306 socket_id, aid_id, fw_status); 5307 5308 if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error)) 5309 dev_info(adev->dev, 5310 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n", 5311 socket_id, aid_id, hbm_id, fw_status); 5312 5313 if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error)) 5314 dev_info(adev->dev, 5315 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n", 5316 socket_id, aid_id, hbm_id, fw_status); 5317 5318 if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error)) 5319 dev_info(adev->dev, 5320 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n", 5321 socket_id, aid_id, fw_status); 5322 5323 if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error)) 5324 dev_info(adev->dev, 5325 "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n", 5326 socket_id, aid_id, fw_status); 5327 } 5328 5329 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev, 5330 u32 instance) 5331 { 5332 u64 reg_addr; 5333 u32 reg_data; 5334 int retry_loop; 5335 5336 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) + 5337 aqua_vanjaram_encode_ext_smn_addressing(instance); 5338 5339 for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) { 5340 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr); 5341 if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) 5342 return false; 5343 else 5344 msleep(1); 5345 } 5346 5347 return true; 5348 } 5349 5350 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances) 5351 { 5352 u32 i; 5353 5354 for (i = 0; i < num_instances; i++) { 5355 if (amdgpu_ras_boot_error_detected(adev, i)) 5356 amdgpu_ras_boot_time_error_reporting(adev, i); 5357 } 5358 } 5359 5360 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn) 5361 { 5362 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5363 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 5364 uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT; 5365 int ret = 0; 5366 5367 if (amdgpu_ras_check_critical_address(adev, start)) 5368 return 0; 5369 5370 mutex_lock(&con->page_rsv_lock); 5371 ret = amdgpu_vram_mgr_query_page_status(mgr, start); 5372 if (ret == -ENOENT) 5373 ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE); 5374 mutex_unlock(&con->page_rsv_lock); 5375 5376 return ret; 5377 } 5378 5379 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id, 5380 const char *fmt, ...) 5381 { 5382 struct va_format vaf; 5383 va_list args; 5384 5385 va_start(args, fmt); 5386 vaf.fmt = fmt; 5387 vaf.va = &args; 5388 5389 if (RAS_EVENT_ID_IS_VALID(event_id)) 5390 dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf); 5391 else 5392 dev_printk(KERN_INFO, adev->dev, "%pV", &vaf); 5393 5394 va_end(args); 5395 } 5396 5397 bool amdgpu_ras_is_rma(struct amdgpu_device *adev) 5398 { 5399 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5400 5401 if (!con) 5402 return false; 5403 5404 return con->is_rma; 5405 } 5406 5407 int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, 5408 struct amdgpu_bo *bo) 5409 { 5410 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5411 struct amdgpu_vram_mgr_resource *vres; 5412 struct ras_critical_region *region; 5413 struct drm_buddy_block *block; 5414 int ret = 0; 5415 5416 if (!bo || !bo->tbo.resource) 5417 return -EINVAL; 5418 5419 vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource); 5420 5421 mutex_lock(&con->critical_region_lock); 5422 5423 /* Check if the bo had been recorded */ 5424 list_for_each_entry(region, &con->critical_region_head, node) 5425 if (region->bo == bo) 5426 goto out; 5427 5428 /* Record new critical amdgpu bo */ 5429 list_for_each_entry(block, &vres->blocks, link) { 5430 region = kzalloc(sizeof(*region), GFP_KERNEL); 5431 if (!region) { 5432 ret = -ENOMEM; 5433 goto out; 5434 } 5435 region->bo = bo; 5436 region->start = amdgpu_vram_mgr_block_start(block); 5437 region->size = amdgpu_vram_mgr_block_size(block); 5438 list_add_tail(®ion->node, &con->critical_region_head); 5439 } 5440 5441 out: 5442 mutex_unlock(&con->critical_region_lock); 5443 5444 return ret; 5445 } 5446 5447 static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev) 5448 { 5449 amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory); 5450 } 5451 5452 static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev) 5453 { 5454 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5455 struct ras_critical_region *region, *tmp; 5456 5457 mutex_lock(&con->critical_region_lock); 5458 list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) { 5459 list_del(®ion->node); 5460 kfree(region); 5461 } 5462 mutex_unlock(&con->critical_region_lock); 5463 } 5464 5465 bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr) 5466 { 5467 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5468 struct ras_critical_region *region; 5469 bool ret = false; 5470 5471 mutex_lock(&con->critical_region_lock); 5472 list_for_each_entry(region, &con->critical_region_head, node) { 5473 if ((region->start <= addr) && 5474 (addr < (region->start + region->size))) { 5475 ret = true; 5476 break; 5477 } 5478 } 5479 mutex_unlock(&con->critical_region_lock); 5480 5481 return ret; 5482 } 5483