1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/debugfs.h> 25 #include <linux/list.h> 26 #include <linux/module.h> 27 #include <linux/uaccess.h> 28 #include <linux/reboot.h> 29 #include <linux/syscalls.h> 30 #include <linux/pm_runtime.h> 31 #include <linux/list_sort.h> 32 33 #include "amdgpu.h" 34 #include "amdgpu_ras.h" 35 #include "amdgpu_atomfirmware.h" 36 #include "amdgpu_xgmi.h" 37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 38 #include "nbio_v4_3.h" 39 #include "nbif_v6_3_1.h" 40 #include "nbio_v7_9.h" 41 #include "atom.h" 42 #include "amdgpu_reset.h" 43 #include "amdgpu_psp.h" 44 45 #ifdef CONFIG_X86_MCE_AMD 46 #include <asm/mce.h> 47 48 static bool notifier_registered; 49 #endif 50 static const char *RAS_FS_NAME = "ras"; 51 52 const char *ras_error_string[] = { 53 "none", 54 "parity", 55 "single_correctable", 56 "multi_uncorrectable", 57 "poison", 58 }; 59 60 const char *ras_block_string[] = { 61 "umc", 62 "sdma", 63 "gfx", 64 "mmhub", 65 "athub", 66 "pcie_bif", 67 "hdp", 68 "xgmi_wafl", 69 "df", 70 "smn", 71 "sem", 72 "mp0", 73 "mp1", 74 "fuse", 75 "mca", 76 "vcn", 77 "jpeg", 78 "ih", 79 "mpio", 80 }; 81 82 const char *ras_mca_block_string[] = { 83 "mca_mp0", 84 "mca_mp1", 85 "mca_mpio", 86 "mca_iohc", 87 }; 88 89 struct amdgpu_ras_block_list { 90 /* ras block link */ 91 struct list_head node; 92 93 struct amdgpu_ras_block_object *ras_obj; 94 }; 95 96 const char *get_ras_block_str(struct ras_common_if *ras_block) 97 { 98 if (!ras_block) 99 return "NULL"; 100 101 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT || 102 ras_block->block >= ARRAY_SIZE(ras_block_string)) 103 return "OUT OF RANGE"; 104 105 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) 106 return ras_mca_block_string[ras_block->sub_block_index]; 107 108 return ras_block_string[ras_block->block]; 109 } 110 111 #define ras_block_str(_BLOCK_) \ 112 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range") 113 114 #define ras_err_str(i) (ras_error_string[ffs(i)]) 115 116 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) 117 118 /* inject address is 52 bits */ 119 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) 120 121 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ 122 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) 123 124 #define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms 125 126 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms 127 128 #define MAX_FLUSH_RETIRE_DWORK_TIMES 100 129 130 enum amdgpu_ras_retire_page_reservation { 131 AMDGPU_RAS_RETIRE_PAGE_RESERVED, 132 AMDGPU_RAS_RETIRE_PAGE_PENDING, 133 AMDGPU_RAS_RETIRE_PAGE_FAULT, 134 }; 135 136 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); 137 138 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 139 uint64_t addr); 140 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 141 uint64_t addr); 142 #ifdef CONFIG_X86_MCE_AMD 143 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); 144 struct mce_notifier_adev_list { 145 struct amdgpu_device *devs[MAX_GPU_INSTANCE]; 146 int num_gpu; 147 }; 148 static struct mce_notifier_adev_list mce_adev_list; 149 #endif 150 151 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) 152 { 153 if (adev && amdgpu_ras_get_context(adev)) 154 amdgpu_ras_get_context(adev)->error_query_ready = ready; 155 } 156 157 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) 158 { 159 if (adev && amdgpu_ras_get_context(adev)) 160 return amdgpu_ras_get_context(adev)->error_query_ready; 161 162 return false; 163 } 164 165 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) 166 { 167 struct ras_err_data err_data; 168 struct eeprom_table_record err_rec; 169 int ret; 170 171 if ((address >= adev->gmc.mc_vram_size) || 172 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 173 dev_warn(adev->dev, 174 "RAS WARN: input address 0x%llx is invalid.\n", 175 address); 176 return -EINVAL; 177 } 178 179 if (amdgpu_ras_check_bad_page(adev, address)) { 180 dev_warn(adev->dev, 181 "RAS WARN: 0x%llx has already been marked as bad page!\n", 182 address); 183 return 0; 184 } 185 186 ret = amdgpu_ras_error_data_init(&err_data); 187 if (ret) 188 return ret; 189 190 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); 191 err_data.err_addr = &err_rec; 192 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0); 193 194 if (amdgpu_bad_page_threshold != 0) { 195 amdgpu_ras_add_bad_pages(adev, err_data.err_addr, 196 err_data.err_addr_cnt, false); 197 amdgpu_ras_save_bad_pages(adev, NULL); 198 } 199 200 amdgpu_ras_error_data_fini(&err_data); 201 202 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n"); 203 dev_warn(adev->dev, "Clear EEPROM:\n"); 204 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n"); 205 206 return 0; 207 } 208 209 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, 210 size_t size, loff_t *pos) 211 { 212 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; 213 struct ras_query_if info = { 214 .head = obj->head, 215 }; 216 ssize_t s; 217 char val[128]; 218 219 if (amdgpu_ras_query_error_status(obj->adev, &info)) 220 return -EINVAL; 221 222 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ 223 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 224 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 225 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 226 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 227 } 228 229 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", 230 "ue", info.ue_count, 231 "ce", info.ce_count); 232 if (*pos >= s) 233 return 0; 234 235 s -= *pos; 236 s = min_t(u64, s, size); 237 238 239 if (copy_to_user(buf, &val[*pos], s)) 240 return -EINVAL; 241 242 *pos += s; 243 244 return s; 245 } 246 247 static const struct file_operations amdgpu_ras_debugfs_ops = { 248 .owner = THIS_MODULE, 249 .read = amdgpu_ras_debugfs_read, 250 .write = NULL, 251 .llseek = default_llseek 252 }; 253 254 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) 255 { 256 int i; 257 258 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { 259 *block_id = i; 260 if (strcmp(name, ras_block_string[i]) == 0) 261 return 0; 262 } 263 return -EINVAL; 264 } 265 266 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, 267 const char __user *buf, size_t size, 268 loff_t *pos, struct ras_debug_if *data) 269 { 270 ssize_t s = min_t(u64, 64, size); 271 char str[65]; 272 char block_name[33]; 273 char err[9] = "ue"; 274 int op = -1; 275 int block_id; 276 uint32_t sub_block; 277 u64 address, value; 278 /* default value is 0 if the mask is not set by user */ 279 u32 instance_mask = 0; 280 281 if (*pos) 282 return -EINVAL; 283 *pos = size; 284 285 memset(str, 0, sizeof(str)); 286 memset(data, 0, sizeof(*data)); 287 288 if (copy_from_user(str, buf, s)) 289 return -EINVAL; 290 291 if (sscanf(str, "disable %32s", block_name) == 1) 292 op = 0; 293 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2) 294 op = 1; 295 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) 296 op = 2; 297 else if (strstr(str, "retire_page") != NULL) 298 op = 3; 299 else if (str[0] && str[1] && str[2] && str[3]) 300 /* ascii string, but commands are not matched. */ 301 return -EINVAL; 302 303 if (op != -1) { 304 if (op == 3) { 305 if (sscanf(str, "%*s 0x%llx", &address) != 1 && 306 sscanf(str, "%*s %llu", &address) != 1) 307 return -EINVAL; 308 309 data->op = op; 310 data->inject.address = address; 311 312 return 0; 313 } 314 315 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) 316 return -EINVAL; 317 318 data->head.block = block_id; 319 /* only ue, ce and poison errors are supported */ 320 if (!memcmp("ue", err, 2)) 321 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 322 else if (!memcmp("ce", err, 2)) 323 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; 324 else if (!memcmp("poison", err, 6)) 325 data->head.type = AMDGPU_RAS_ERROR__POISON; 326 else 327 return -EINVAL; 328 329 data->op = op; 330 331 if (op == 2) { 332 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x", 333 &sub_block, &address, &value, &instance_mask) != 4 && 334 sscanf(str, "%*s %*s %*s %u %llu %llu %u", 335 &sub_block, &address, &value, &instance_mask) != 4 && 336 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", 337 &sub_block, &address, &value) != 3 && 338 sscanf(str, "%*s %*s %*s %u %llu %llu", 339 &sub_block, &address, &value) != 3) 340 return -EINVAL; 341 data->head.sub_block_index = sub_block; 342 data->inject.address = address; 343 data->inject.value = value; 344 data->inject.instance_mask = instance_mask; 345 } 346 } else { 347 if (size < sizeof(*data)) 348 return -EINVAL; 349 350 if (copy_from_user(data, buf, sizeof(*data))) 351 return -EINVAL; 352 } 353 354 return 0; 355 } 356 357 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev, 358 struct ras_debug_if *data) 359 { 360 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; 361 uint32_t mask, inst_mask = data->inject.instance_mask; 362 363 /* no need to set instance mask if there is only one instance */ 364 if (num_xcc <= 1 && inst_mask) { 365 data->inject.instance_mask = 0; 366 dev_dbg(adev->dev, 367 "RAS inject mask(0x%x) isn't supported and force it to 0.\n", 368 inst_mask); 369 370 return; 371 } 372 373 switch (data->head.block) { 374 case AMDGPU_RAS_BLOCK__GFX: 375 mask = GENMASK(num_xcc - 1, 0); 376 break; 377 case AMDGPU_RAS_BLOCK__SDMA: 378 mask = GENMASK(adev->sdma.num_instances - 1, 0); 379 break; 380 case AMDGPU_RAS_BLOCK__VCN: 381 case AMDGPU_RAS_BLOCK__JPEG: 382 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0); 383 break; 384 default: 385 mask = inst_mask; 386 break; 387 } 388 389 /* remove invalid bits in instance mask */ 390 data->inject.instance_mask &= mask; 391 if (inst_mask != data->inject.instance_mask) 392 dev_dbg(adev->dev, 393 "Adjust RAS inject mask 0x%x to 0x%x\n", 394 inst_mask, data->inject.instance_mask); 395 } 396 397 /** 398 * DOC: AMDGPU RAS debugfs control interface 399 * 400 * The control interface accepts struct ras_debug_if which has two members. 401 * 402 * First member: ras_debug_if::head or ras_debug_if::inject. 403 * 404 * head is used to indicate which IP block will be under control. 405 * 406 * head has four members, they are block, type, sub_block_index, name. 407 * block: which IP will be under control. 408 * type: what kind of error will be enabled/disabled/injected. 409 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. 410 * name: the name of IP. 411 * 412 * inject has three more members than head, they are address, value and mask. 413 * As their names indicate, inject operation will write the 414 * value to the address. 415 * 416 * The second member: struct ras_debug_if::op. 417 * It has three kinds of operations. 418 * 419 * - 0: disable RAS on the block. Take ::head as its data. 420 * - 1: enable RAS on the block. Take ::head as its data. 421 * - 2: inject errors on the block. Take ::inject as its data. 422 * 423 * How to use the interface? 424 * 425 * In a program 426 * 427 * Copy the struct ras_debug_if in your code and initialize it. 428 * Write the struct to the control interface. 429 * 430 * From shell 431 * 432 * .. code-block:: bash 433 * 434 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 435 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 436 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 437 * 438 * Where N, is the card which you want to affect. 439 * 440 * "disable" requires only the block. 441 * "enable" requires the block and error type. 442 * "inject" requires the block, error type, address, and value. 443 * 444 * The block is one of: umc, sdma, gfx, etc. 445 * see ras_block_string[] for details 446 * 447 * The error type is one of: ue, ce and poison where, 448 * ue is multi-uncorrectable 449 * ce is single-correctable 450 * poison is poison 451 * 452 * The sub-block is a the sub-block index, pass 0 if there is no sub-block. 453 * The address and value are hexadecimal numbers, leading 0x is optional. 454 * The mask means instance mask, is optional, default value is 0x1. 455 * 456 * For instance, 457 * 458 * .. code-block:: bash 459 * 460 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 461 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl 462 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl 463 * 464 * How to check the result of the operation? 465 * 466 * To check disable/enable, see "ras" features at, 467 * /sys/class/drm/card[0/1/2...]/device/ras/features 468 * 469 * To check inject, see the corresponding error count at, 470 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count 471 * 472 * .. note:: 473 * Operations are only allowed on blocks which are supported. 474 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask 475 * to see which blocks support RAS on a particular asic. 476 * 477 */ 478 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, 479 const char __user *buf, 480 size_t size, loff_t *pos) 481 { 482 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 483 struct ras_debug_if data; 484 int ret = 0; 485 486 if (!amdgpu_ras_get_error_query_ready(adev)) { 487 dev_warn(adev->dev, "RAS WARN: error injection " 488 "currently inaccessible\n"); 489 return size; 490 } 491 492 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); 493 if (ret) 494 return ret; 495 496 if (data.op == 3) { 497 ret = amdgpu_reserve_page_direct(adev, data.inject.address); 498 if (!ret) 499 return size; 500 else 501 return ret; 502 } 503 504 if (!amdgpu_ras_is_supported(adev, data.head.block)) 505 return -EINVAL; 506 507 switch (data.op) { 508 case 0: 509 ret = amdgpu_ras_feature_enable(adev, &data.head, 0); 510 break; 511 case 1: 512 ret = amdgpu_ras_feature_enable(adev, &data.head, 1); 513 break; 514 case 2: 515 if ((data.inject.address >= adev->gmc.mc_vram_size && 516 adev->gmc.mc_vram_size) || 517 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 518 dev_warn(adev->dev, "RAS WARN: input address " 519 "0x%llx is invalid.", 520 data.inject.address); 521 ret = -EINVAL; 522 break; 523 } 524 525 /* umc ce/ue error injection for a bad page is not allowed */ 526 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && 527 amdgpu_ras_check_bad_page(adev, data.inject.address)) { 528 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has " 529 "already been marked as bad!\n", 530 data.inject.address); 531 break; 532 } 533 534 amdgpu_ras_instance_mask_check(adev, &data); 535 536 /* data.inject.address is offset instead of absolute gpu address */ 537 ret = amdgpu_ras_error_inject(adev, &data.inject); 538 break; 539 default: 540 ret = -EINVAL; 541 break; 542 } 543 544 if (ret) 545 return ret; 546 547 return size; 548 } 549 550 /** 551 * DOC: AMDGPU RAS debugfs EEPROM table reset interface 552 * 553 * Some boards contain an EEPROM which is used to persistently store a list of 554 * bad pages which experiences ECC errors in vram. This interface provides 555 * a way to reset the EEPROM, e.g., after testing error injection. 556 * 557 * Usage: 558 * 559 * .. code-block:: bash 560 * 561 * echo 1 > ../ras/ras_eeprom_reset 562 * 563 * will reset EEPROM table to 0 entries. 564 * 565 */ 566 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, 567 const char __user *buf, 568 size_t size, loff_t *pos) 569 { 570 struct amdgpu_device *adev = 571 (struct amdgpu_device *)file_inode(f)->i_private; 572 int ret; 573 574 ret = amdgpu_ras_eeprom_reset_table( 575 &(amdgpu_ras_get_context(adev)->eeprom_control)); 576 577 if (!ret) { 578 /* Something was written to EEPROM. 579 */ 580 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; 581 return size; 582 } else { 583 return ret; 584 } 585 } 586 587 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { 588 .owner = THIS_MODULE, 589 .read = NULL, 590 .write = amdgpu_ras_debugfs_ctrl_write, 591 .llseek = default_llseek 592 }; 593 594 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { 595 .owner = THIS_MODULE, 596 .read = NULL, 597 .write = amdgpu_ras_debugfs_eeprom_write, 598 .llseek = default_llseek 599 }; 600 601 /** 602 * DOC: AMDGPU RAS sysfs Error Count Interface 603 * 604 * It allows the user to read the error count for each IP block on the gpu through 605 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count 606 * 607 * It outputs the multiple lines which report the uncorrected (ue) and corrected 608 * (ce) error counts. 609 * 610 * The format of one line is below, 611 * 612 * [ce|ue]: count 613 * 614 * Example: 615 * 616 * .. code-block:: bash 617 * 618 * ue: 0 619 * ce: 1 620 * 621 */ 622 static ssize_t amdgpu_ras_sysfs_read(struct device *dev, 623 struct device_attribute *attr, char *buf) 624 { 625 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); 626 struct ras_query_if info = { 627 .head = obj->head, 628 }; 629 630 if (!amdgpu_ras_get_error_query_ready(obj->adev)) 631 return sysfs_emit(buf, "Query currently inaccessible\n"); 632 633 if (amdgpu_ras_query_error_status(obj->adev, &info)) 634 return -EINVAL; 635 636 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 637 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 638 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 639 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 640 } 641 642 if (info.head.block == AMDGPU_RAS_BLOCK__UMC) 643 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count, 644 "ce", info.ce_count, "de", info.de_count); 645 else 646 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, 647 "ce", info.ce_count); 648 } 649 650 /* obj begin */ 651 652 #define get_obj(obj) do { (obj)->use++; } while (0) 653 #define alive_obj(obj) ((obj)->use) 654 655 static inline void put_obj(struct ras_manager *obj) 656 { 657 if (obj && (--obj->use == 0)) { 658 list_del(&obj->node); 659 amdgpu_ras_error_data_fini(&obj->err_data); 660 } 661 662 if (obj && (obj->use < 0)) 663 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); 664 } 665 666 /* make one obj and return it. */ 667 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, 668 struct ras_common_if *head) 669 { 670 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 671 struct ras_manager *obj; 672 673 if (!adev->ras_enabled || !con) 674 return NULL; 675 676 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 677 return NULL; 678 679 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 680 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 681 return NULL; 682 683 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 684 } else 685 obj = &con->objs[head->block]; 686 687 /* already exist. return obj? */ 688 if (alive_obj(obj)) 689 return NULL; 690 691 if (amdgpu_ras_error_data_init(&obj->err_data)) 692 return NULL; 693 694 obj->head = *head; 695 obj->adev = adev; 696 list_add(&obj->node, &con->head); 697 get_obj(obj); 698 699 return obj; 700 } 701 702 /* return an obj equal to head, or the first when head is NULL */ 703 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, 704 struct ras_common_if *head) 705 { 706 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 707 struct ras_manager *obj; 708 int i; 709 710 if (!adev->ras_enabled || !con) 711 return NULL; 712 713 if (head) { 714 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 715 return NULL; 716 717 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 718 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 719 return NULL; 720 721 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 722 } else 723 obj = &con->objs[head->block]; 724 725 if (alive_obj(obj)) 726 return obj; 727 } else { 728 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 729 obj = &con->objs[i]; 730 if (alive_obj(obj)) 731 return obj; 732 } 733 } 734 735 return NULL; 736 } 737 /* obj end */ 738 739 /* feature ctl begin */ 740 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, 741 struct ras_common_if *head) 742 { 743 return adev->ras_hw_enabled & BIT(head->block); 744 } 745 746 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, 747 struct ras_common_if *head) 748 { 749 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 750 751 return con->features & BIT(head->block); 752 } 753 754 /* 755 * if obj is not created, then create one. 756 * set feature enable flag. 757 */ 758 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, 759 struct ras_common_if *head, int enable) 760 { 761 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 762 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 763 764 /* If hardware does not support ras, then do not create obj. 765 * But if hardware support ras, we can create the obj. 766 * Ras framework checks con->hw_supported to see if it need do 767 * corresponding initialization. 768 * IP checks con->support to see if it need disable ras. 769 */ 770 if (!amdgpu_ras_is_feature_allowed(adev, head)) 771 return 0; 772 773 if (enable) { 774 if (!obj) { 775 obj = amdgpu_ras_create_obj(adev, head); 776 if (!obj) 777 return -EINVAL; 778 } else { 779 /* In case we create obj somewhere else */ 780 get_obj(obj); 781 } 782 con->features |= BIT(head->block); 783 } else { 784 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { 785 con->features &= ~BIT(head->block); 786 put_obj(obj); 787 } 788 } 789 790 return 0; 791 } 792 793 /* wrapper of psp_ras_enable_features */ 794 int amdgpu_ras_feature_enable(struct amdgpu_device *adev, 795 struct ras_common_if *head, bool enable) 796 { 797 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 798 union ta_ras_cmd_input *info; 799 int ret; 800 801 if (!con) 802 return -EINVAL; 803 804 /* For non-gfx ip, do not enable ras feature if it is not allowed */ 805 /* For gfx ip, regardless of feature support status, */ 806 /* Force issue enable or disable ras feature commands */ 807 if (head->block != AMDGPU_RAS_BLOCK__GFX && 808 !amdgpu_ras_is_feature_allowed(adev, head)) 809 return 0; 810 811 /* Only enable gfx ras feature from host side */ 812 if (head->block == AMDGPU_RAS_BLOCK__GFX && 813 !amdgpu_sriov_vf(adev) && 814 !amdgpu_ras_intr_triggered()) { 815 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); 816 if (!info) 817 return -ENOMEM; 818 819 if (!enable) { 820 info->disable_features = (struct ta_ras_disable_features_input) { 821 .block_id = amdgpu_ras_block_to_ta(head->block), 822 .error_type = amdgpu_ras_error_to_ta(head->type), 823 }; 824 } else { 825 info->enable_features = (struct ta_ras_enable_features_input) { 826 .block_id = amdgpu_ras_block_to_ta(head->block), 827 .error_type = amdgpu_ras_error_to_ta(head->type), 828 }; 829 } 830 831 ret = psp_ras_enable_features(&adev->psp, info, enable); 832 if (ret) { 833 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n", 834 enable ? "enable":"disable", 835 get_ras_block_str(head), 836 amdgpu_ras_is_poison_mode_supported(adev), ret); 837 kfree(info); 838 return ret; 839 } 840 841 kfree(info); 842 } 843 844 /* setup the obj */ 845 __amdgpu_ras_feature_enable(adev, head, enable); 846 847 return 0; 848 } 849 850 /* Only used in device probe stage and called only once. */ 851 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, 852 struct ras_common_if *head, bool enable) 853 { 854 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 855 int ret; 856 857 if (!con) 858 return -EINVAL; 859 860 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 861 if (enable) { 862 /* There is no harm to issue a ras TA cmd regardless of 863 * the currecnt ras state. 864 * If current state == target state, it will do nothing 865 * But sometimes it requests driver to reset and repost 866 * with error code -EAGAIN. 867 */ 868 ret = amdgpu_ras_feature_enable(adev, head, 1); 869 /* With old ras TA, we might fail to enable ras. 870 * Log it and just setup the object. 871 * TODO need remove this WA in the future. 872 */ 873 if (ret == -EINVAL) { 874 ret = __amdgpu_ras_feature_enable(adev, head, 1); 875 if (!ret) 876 dev_info(adev->dev, 877 "RAS INFO: %s setup object\n", 878 get_ras_block_str(head)); 879 } 880 } else { 881 /* setup the object then issue a ras TA disable cmd.*/ 882 ret = __amdgpu_ras_feature_enable(adev, head, 1); 883 if (ret) 884 return ret; 885 886 /* gfx block ras disable cmd must send to ras-ta */ 887 if (head->block == AMDGPU_RAS_BLOCK__GFX) 888 con->features |= BIT(head->block); 889 890 ret = amdgpu_ras_feature_enable(adev, head, 0); 891 892 /* clean gfx block ras features flag */ 893 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) 894 con->features &= ~BIT(head->block); 895 } 896 } else 897 ret = amdgpu_ras_feature_enable(adev, head, enable); 898 899 return ret; 900 } 901 902 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, 903 bool bypass) 904 { 905 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 906 struct ras_manager *obj, *tmp; 907 908 list_for_each_entry_safe(obj, tmp, &con->head, node) { 909 /* bypass psp. 910 * aka just release the obj and corresponding flags 911 */ 912 if (bypass) { 913 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) 914 break; 915 } else { 916 if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) 917 break; 918 } 919 } 920 921 return con->features; 922 } 923 924 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, 925 bool bypass) 926 { 927 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 928 int i; 929 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; 930 931 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { 932 struct ras_common_if head = { 933 .block = i, 934 .type = default_ras_type, 935 .sub_block_index = 0, 936 }; 937 938 if (i == AMDGPU_RAS_BLOCK__MCA) 939 continue; 940 941 if (bypass) { 942 /* 943 * bypass psp. vbios enable ras for us. 944 * so just create the obj 945 */ 946 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 947 break; 948 } else { 949 if (amdgpu_ras_feature_enable(adev, &head, 1)) 950 break; 951 } 952 } 953 954 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 955 struct ras_common_if head = { 956 .block = AMDGPU_RAS_BLOCK__MCA, 957 .type = default_ras_type, 958 .sub_block_index = i, 959 }; 960 961 if (bypass) { 962 /* 963 * bypass psp. vbios enable ras for us. 964 * so just create the obj 965 */ 966 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 967 break; 968 } else { 969 if (amdgpu_ras_feature_enable(adev, &head, 1)) 970 break; 971 } 972 } 973 974 return con->features; 975 } 976 /* feature ctl end */ 977 978 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj, 979 enum amdgpu_ras_block block) 980 { 981 if (!block_obj) 982 return -EINVAL; 983 984 if (block_obj->ras_comm.block == block) 985 return 0; 986 987 return -EINVAL; 988 } 989 990 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev, 991 enum amdgpu_ras_block block, uint32_t sub_block_index) 992 { 993 struct amdgpu_ras_block_list *node, *tmp; 994 struct amdgpu_ras_block_object *obj; 995 996 if (block >= AMDGPU_RAS_BLOCK__LAST) 997 return NULL; 998 999 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 1000 if (!node->ras_obj) { 1001 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 1002 continue; 1003 } 1004 1005 obj = node->ras_obj; 1006 if (obj->ras_block_match) { 1007 if (obj->ras_block_match(obj, block, sub_block_index) == 0) 1008 return obj; 1009 } else { 1010 if (amdgpu_ras_block_match_default(obj, block) == 0) 1011 return obj; 1012 } 1013 } 1014 1015 return NULL; 1016 } 1017 1018 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) 1019 { 1020 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1021 int ret = 0; 1022 1023 /* 1024 * choosing right query method according to 1025 * whether smu support query error information 1026 */ 1027 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); 1028 if (ret == -EOPNOTSUPP) { 1029 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 1030 adev->umc.ras->ras_block.hw_ops->query_ras_error_count) 1031 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); 1032 1033 /* umc query_ras_error_address is also responsible for clearing 1034 * error status 1035 */ 1036 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 1037 adev->umc.ras->ras_block.hw_ops->query_ras_error_address) 1038 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data); 1039 } else if (!ret) { 1040 if (adev->umc.ras && 1041 adev->umc.ras->ecc_info_query_ras_error_count) 1042 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data); 1043 1044 if (adev->umc.ras && 1045 adev->umc.ras->ecc_info_query_ras_error_address) 1046 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data); 1047 } 1048 } 1049 1050 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, 1051 struct ras_manager *ras_mgr, 1052 struct ras_err_data *err_data, 1053 struct ras_query_context *qctx, 1054 const char *blk_name, 1055 bool is_ue, 1056 bool is_de) 1057 { 1058 struct amdgpu_smuio_mcm_config_info *mcm_info; 1059 struct ras_err_node *err_node; 1060 struct ras_err_info *err_info; 1061 u64 event_id = qctx->evid.event_id; 1062 1063 if (is_ue) { 1064 for_each_ras_error(err_node, err_data) { 1065 err_info = &err_node->err_info; 1066 mcm_info = &err_info->mcm_info; 1067 if (err_info->ue_count) { 1068 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1069 "%lld new uncorrectable hardware errors detected in %s block\n", 1070 mcm_info->socket_id, 1071 mcm_info->die_id, 1072 err_info->ue_count, 1073 blk_name); 1074 } 1075 } 1076 1077 for_each_ras_error(err_node, &ras_mgr->err_data) { 1078 err_info = &err_node->err_info; 1079 mcm_info = &err_info->mcm_info; 1080 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1081 "%lld uncorrectable hardware errors detected in total in %s block\n", 1082 mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); 1083 } 1084 1085 } else { 1086 if (is_de) { 1087 for_each_ras_error(err_node, err_data) { 1088 err_info = &err_node->err_info; 1089 mcm_info = &err_info->mcm_info; 1090 if (err_info->de_count) { 1091 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1092 "%lld new deferred hardware errors detected in %s block\n", 1093 mcm_info->socket_id, 1094 mcm_info->die_id, 1095 err_info->de_count, 1096 blk_name); 1097 } 1098 } 1099 1100 for_each_ras_error(err_node, &ras_mgr->err_data) { 1101 err_info = &err_node->err_info; 1102 mcm_info = &err_info->mcm_info; 1103 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1104 "%lld deferred hardware errors detected in total in %s block\n", 1105 mcm_info->socket_id, mcm_info->die_id, 1106 err_info->de_count, blk_name); 1107 } 1108 } else { 1109 for_each_ras_error(err_node, err_data) { 1110 err_info = &err_node->err_info; 1111 mcm_info = &err_info->mcm_info; 1112 if (err_info->ce_count) { 1113 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1114 "%lld new correctable hardware errors detected in %s block\n", 1115 mcm_info->socket_id, 1116 mcm_info->die_id, 1117 err_info->ce_count, 1118 blk_name); 1119 } 1120 } 1121 1122 for_each_ras_error(err_node, &ras_mgr->err_data) { 1123 err_info = &err_node->err_info; 1124 mcm_info = &err_info->mcm_info; 1125 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1126 "%lld correctable hardware errors detected in total in %s block\n", 1127 mcm_info->socket_id, mcm_info->die_id, 1128 err_info->ce_count, blk_name); 1129 } 1130 } 1131 } 1132 } 1133 1134 static inline bool err_data_has_source_info(struct ras_err_data *data) 1135 { 1136 return !list_empty(&data->err_node_list); 1137 } 1138 1139 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, 1140 struct ras_query_if *query_if, 1141 struct ras_err_data *err_data, 1142 struct ras_query_context *qctx) 1143 { 1144 struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head); 1145 const char *blk_name = get_ras_block_str(&query_if->head); 1146 u64 event_id = qctx->evid.event_id; 1147 1148 if (err_data->ce_count) { 1149 if (err_data_has_source_info(err_data)) { 1150 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, 1151 blk_name, false, false); 1152 } else if (!adev->aid_mask && 1153 adev->smuio.funcs && 1154 adev->smuio.funcs->get_socket_id && 1155 adev->smuio.funcs->get_die_id) { 1156 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " 1157 "%ld correctable hardware errors " 1158 "detected in %s block\n", 1159 adev->smuio.funcs->get_socket_id(adev), 1160 adev->smuio.funcs->get_die_id(adev), 1161 ras_mgr->err_data.ce_count, 1162 blk_name); 1163 } else { 1164 RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors " 1165 "detected in %s block\n", 1166 ras_mgr->err_data.ce_count, 1167 blk_name); 1168 } 1169 } 1170 1171 if (err_data->ue_count) { 1172 if (err_data_has_source_info(err_data)) { 1173 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, 1174 blk_name, true, false); 1175 } else if (!adev->aid_mask && 1176 adev->smuio.funcs && 1177 adev->smuio.funcs->get_socket_id && 1178 adev->smuio.funcs->get_die_id) { 1179 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " 1180 "%ld uncorrectable hardware errors " 1181 "detected in %s block\n", 1182 adev->smuio.funcs->get_socket_id(adev), 1183 adev->smuio.funcs->get_die_id(adev), 1184 ras_mgr->err_data.ue_count, 1185 blk_name); 1186 } else { 1187 RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors " 1188 "detected in %s block\n", 1189 ras_mgr->err_data.ue_count, 1190 blk_name); 1191 } 1192 } 1193 1194 if (err_data->de_count) { 1195 if (err_data_has_source_info(err_data)) { 1196 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, 1197 blk_name, false, true); 1198 } else if (!adev->aid_mask && 1199 adev->smuio.funcs && 1200 adev->smuio.funcs->get_socket_id && 1201 adev->smuio.funcs->get_die_id) { 1202 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " 1203 "%ld deferred hardware errors " 1204 "detected in %s block\n", 1205 adev->smuio.funcs->get_socket_id(adev), 1206 adev->smuio.funcs->get_die_id(adev), 1207 ras_mgr->err_data.de_count, 1208 blk_name); 1209 } else { 1210 RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors " 1211 "detected in %s block\n", 1212 ras_mgr->err_data.de_count, 1213 blk_name); 1214 } 1215 } 1216 } 1217 1218 static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev, 1219 struct ras_query_if *query_if, 1220 struct ras_err_data *err_data, 1221 struct ras_query_context *qctx) 1222 { 1223 unsigned long new_ue, new_ce, new_de; 1224 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head); 1225 const char *blk_name = get_ras_block_str(&query_if->head); 1226 u64 event_id = qctx->evid.event_id; 1227 1228 new_ce = err_data->ce_count - obj->err_data.ce_count; 1229 new_ue = err_data->ue_count - obj->err_data.ue_count; 1230 new_de = err_data->de_count - obj->err_data.de_count; 1231 1232 if (new_ce) { 1233 RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors " 1234 "detected in %s block\n", 1235 new_ce, 1236 blk_name); 1237 } 1238 1239 if (new_ue) { 1240 RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors " 1241 "detected in %s block\n", 1242 new_ue, 1243 blk_name); 1244 } 1245 1246 if (new_de) { 1247 RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors " 1248 "detected in %s block\n", 1249 new_de, 1250 blk_name); 1251 } 1252 } 1253 1254 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) 1255 { 1256 struct ras_err_node *err_node; 1257 struct ras_err_info *err_info; 1258 1259 if (err_data_has_source_info(err_data)) { 1260 for_each_ras_error(err_node, err_data) { 1261 err_info = &err_node->err_info; 1262 amdgpu_ras_error_statistic_de_count(&obj->err_data, 1263 &err_info->mcm_info, err_info->de_count); 1264 amdgpu_ras_error_statistic_ce_count(&obj->err_data, 1265 &err_info->mcm_info, err_info->ce_count); 1266 amdgpu_ras_error_statistic_ue_count(&obj->err_data, 1267 &err_info->mcm_info, err_info->ue_count); 1268 } 1269 } else { 1270 /* for legacy asic path which doesn't has error source info */ 1271 obj->err_data.ue_count += err_data->ue_count; 1272 obj->err_data.ce_count += err_data->ce_count; 1273 obj->err_data.de_count += err_data->de_count; 1274 } 1275 } 1276 1277 static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj, 1278 struct ras_err_data *err_data) 1279 { 1280 /* Host reports absolute counts */ 1281 obj->err_data.ue_count = err_data->ue_count; 1282 obj->err_data.ce_count = err_data->ce_count; 1283 obj->err_data.de_count = err_data->de_count; 1284 } 1285 1286 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk) 1287 { 1288 struct ras_common_if head; 1289 1290 memset(&head, 0, sizeof(head)); 1291 head.block = blk; 1292 1293 return amdgpu_ras_find_obj(adev, &head); 1294 } 1295 1296 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 1297 const struct aca_info *aca_info, void *data) 1298 { 1299 struct ras_manager *obj; 1300 1301 /* in resume phase, no need to create aca fs node */ 1302 if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) 1303 return 0; 1304 1305 obj = get_ras_manager(adev, blk); 1306 if (!obj) 1307 return -EINVAL; 1308 1309 return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data); 1310 } 1311 1312 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk) 1313 { 1314 struct ras_manager *obj; 1315 1316 obj = get_ras_manager(adev, blk); 1317 if (!obj) 1318 return -EINVAL; 1319 1320 amdgpu_aca_remove_handle(&obj->aca_handle); 1321 1322 return 0; 1323 } 1324 1325 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 1326 enum aca_error_type type, struct ras_err_data *err_data, 1327 struct ras_query_context *qctx) 1328 { 1329 struct ras_manager *obj; 1330 1331 obj = get_ras_manager(adev, blk); 1332 if (!obj) 1333 return -EINVAL; 1334 1335 return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx); 1336 } 1337 1338 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr, 1339 struct aca_handle *handle, char *buf, void *data) 1340 { 1341 struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle); 1342 struct ras_query_if info = { 1343 .head = obj->head, 1344 }; 1345 1346 if (!amdgpu_ras_get_error_query_ready(obj->adev)) 1347 return sysfs_emit(buf, "Query currently inaccessible\n"); 1348 1349 if (amdgpu_ras_query_error_status(obj->adev, &info)) 1350 return -EINVAL; 1351 1352 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count, 1353 "ce", info.ce_count, "de", info.de_count); 1354 } 1355 1356 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, 1357 struct ras_query_if *info, 1358 struct ras_err_data *err_data, 1359 struct ras_query_context *qctx, 1360 unsigned int error_query_mode) 1361 { 1362 enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT; 1363 struct amdgpu_ras_block_object *block_obj = NULL; 1364 int ret; 1365 1366 if (blk == AMDGPU_RAS_BLOCK_COUNT) 1367 return -EINVAL; 1368 1369 if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY) 1370 return -EINVAL; 1371 1372 if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) { 1373 return amdgpu_virt_req_ras_err_count(adev, blk, err_data); 1374 } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { 1375 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { 1376 amdgpu_ras_get_ecc_info(adev, err_data); 1377 } else { 1378 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); 1379 if (!block_obj || !block_obj->hw_ops) { 1380 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1381 get_ras_block_str(&info->head)); 1382 return -EINVAL; 1383 } 1384 1385 if (block_obj->hw_ops->query_ras_error_count) 1386 block_obj->hw_ops->query_ras_error_count(adev, err_data); 1387 1388 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || 1389 (info->head.block == AMDGPU_RAS_BLOCK__GFX) || 1390 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { 1391 if (block_obj->hw_ops->query_ras_error_status) 1392 block_obj->hw_ops->query_ras_error_status(adev); 1393 } 1394 } 1395 } else { 1396 if (amdgpu_aca_is_enabled(adev)) { 1397 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx); 1398 if (ret) 1399 return ret; 1400 1401 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx); 1402 if (ret) 1403 return ret; 1404 1405 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx); 1406 if (ret) 1407 return ret; 1408 } else { 1409 /* FIXME: add code to check return value later */ 1410 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx); 1411 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx); 1412 } 1413 } 1414 1415 return 0; 1416 } 1417 1418 /* query/inject/cure begin */ 1419 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev, 1420 struct ras_query_if *info, 1421 enum ras_event_type type) 1422 { 1423 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1424 struct ras_err_data err_data; 1425 struct ras_query_context qctx; 1426 unsigned int error_query_mode; 1427 int ret; 1428 1429 if (!obj) 1430 return -EINVAL; 1431 1432 ret = amdgpu_ras_error_data_init(&err_data); 1433 if (ret) 1434 return ret; 1435 1436 if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) 1437 return -EINVAL; 1438 1439 memset(&qctx, 0, sizeof(qctx)); 1440 qctx.evid.type = type; 1441 qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type); 1442 1443 if (!down_read_trylock(&adev->reset_domain->sem)) { 1444 ret = -EIO; 1445 goto out_fini_err_data; 1446 } 1447 1448 ret = amdgpu_ras_query_error_status_helper(adev, info, 1449 &err_data, 1450 &qctx, 1451 error_query_mode); 1452 up_read(&adev->reset_domain->sem); 1453 if (ret) 1454 goto out_fini_err_data; 1455 1456 if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) { 1457 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); 1458 amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); 1459 } else { 1460 /* Host provides absolute error counts. First generate the report 1461 * using the previous VF internal count against new host count. 1462 * Then Update VF internal count. 1463 */ 1464 amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx); 1465 amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data); 1466 } 1467 1468 info->ue_count = obj->err_data.ue_count; 1469 info->ce_count = obj->err_data.ce_count; 1470 info->de_count = obj->err_data.de_count; 1471 1472 out_fini_err_data: 1473 amdgpu_ras_error_data_fini(&err_data); 1474 1475 return ret; 1476 } 1477 1478 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) 1479 { 1480 return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID); 1481 } 1482 1483 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, 1484 enum amdgpu_ras_block block) 1485 { 1486 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1487 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 1488 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; 1489 1490 if (!block_obj || !block_obj->hw_ops) { 1491 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1492 ras_block_str(block)); 1493 return -EOPNOTSUPP; 1494 } 1495 1496 if (!amdgpu_ras_is_supported(adev, block) || 1497 !amdgpu_ras_get_aca_debug_mode(adev)) 1498 return -EOPNOTSUPP; 1499 1500 /* skip ras error reset in gpu reset */ 1501 if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) && 1502 ((smu_funcs && smu_funcs->set_debug_mode) || 1503 (mca_funcs && mca_funcs->mca_set_debug_mode))) 1504 return -EOPNOTSUPP; 1505 1506 if (block_obj->hw_ops->reset_ras_error_count) 1507 block_obj->hw_ops->reset_ras_error_count(adev); 1508 1509 return 0; 1510 } 1511 1512 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, 1513 enum amdgpu_ras_block block) 1514 { 1515 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1516 1517 if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP) 1518 return 0; 1519 1520 if ((block == AMDGPU_RAS_BLOCK__GFX) || 1521 (block == AMDGPU_RAS_BLOCK__MMHUB)) { 1522 if (block_obj->hw_ops->reset_ras_error_status) 1523 block_obj->hw_ops->reset_ras_error_status(adev); 1524 } 1525 1526 return 0; 1527 } 1528 1529 /* wrapper of psp_ras_trigger_error */ 1530 int amdgpu_ras_error_inject(struct amdgpu_device *adev, 1531 struct ras_inject_if *info) 1532 { 1533 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1534 struct ta_ras_trigger_error_input block_info = { 1535 .block_id = amdgpu_ras_block_to_ta(info->head.block), 1536 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), 1537 .sub_block_index = info->head.sub_block_index, 1538 .address = info->address, 1539 .value = info->value, 1540 }; 1541 int ret = -EINVAL; 1542 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, 1543 info->head.block, 1544 info->head.sub_block_index); 1545 1546 /* inject on guest isn't allowed, return success directly */ 1547 if (amdgpu_sriov_vf(adev)) 1548 return 0; 1549 1550 if (!obj) 1551 return -EINVAL; 1552 1553 if (!block_obj || !block_obj->hw_ops) { 1554 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1555 get_ras_block_str(&info->head)); 1556 return -EINVAL; 1557 } 1558 1559 /* Calculate XGMI relative offset */ 1560 if (adev->gmc.xgmi.num_physical_nodes > 1 && 1561 info->head.block != AMDGPU_RAS_BLOCK__GFX) { 1562 block_info.address = 1563 amdgpu_xgmi_get_relative_phy_addr(adev, 1564 block_info.address); 1565 } 1566 1567 if (block_obj->hw_ops->ras_error_inject) { 1568 if (info->head.block == AMDGPU_RAS_BLOCK__GFX) 1569 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask); 1570 else /* Special ras_error_inject is defined (e.g: xgmi) */ 1571 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info, 1572 info->instance_mask); 1573 } else { 1574 /* default path */ 1575 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask); 1576 } 1577 1578 if (ret) 1579 dev_err(adev->dev, "ras inject %s failed %d\n", 1580 get_ras_block_str(&info->head), ret); 1581 1582 return ret; 1583 } 1584 1585 /** 1586 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP 1587 * @adev: pointer to AMD GPU device 1588 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1589 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors. 1590 * @query_info: pointer to ras_query_if 1591 * 1592 * Return 0 for query success or do nothing, otherwise return an error 1593 * on failures 1594 */ 1595 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev, 1596 unsigned long *ce_count, 1597 unsigned long *ue_count, 1598 struct ras_query_if *query_info) 1599 { 1600 int ret; 1601 1602 if (!query_info) 1603 /* do nothing if query_info is not specified */ 1604 return 0; 1605 1606 ret = amdgpu_ras_query_error_status(adev, query_info); 1607 if (ret) 1608 return ret; 1609 1610 *ce_count += query_info->ce_count; 1611 *ue_count += query_info->ue_count; 1612 1613 /* some hardware/IP supports read to clear 1614 * no need to explictly reset the err status after the query call */ 1615 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 1616 amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 1617 if (amdgpu_ras_reset_error_status(adev, query_info->head.block)) 1618 dev_warn(adev->dev, 1619 "Failed to reset error counter and error status\n"); 1620 } 1621 1622 return 0; 1623 } 1624 1625 /** 1626 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP 1627 * @adev: pointer to AMD GPU device 1628 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1629 * @ue_count: pointer to an integer to be set to the count of uncorrectible 1630 * errors. 1631 * @query_info: pointer to ras_query_if if the query request is only for 1632 * specific ip block; if info is NULL, then the qurey request is for 1633 * all the ip blocks that support query ras error counters/status 1634 * 1635 * If set, @ce_count or @ue_count, count and return the corresponding 1636 * error counts in those integer pointers. Return 0 if the device 1637 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. 1638 */ 1639 int amdgpu_ras_query_error_count(struct amdgpu_device *adev, 1640 unsigned long *ce_count, 1641 unsigned long *ue_count, 1642 struct ras_query_if *query_info) 1643 { 1644 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1645 struct ras_manager *obj; 1646 unsigned long ce, ue; 1647 int ret; 1648 1649 if (!adev->ras_enabled || !con) 1650 return -EOPNOTSUPP; 1651 1652 /* Don't count since no reporting. 1653 */ 1654 if (!ce_count && !ue_count) 1655 return 0; 1656 1657 ce = 0; 1658 ue = 0; 1659 if (!query_info) { 1660 /* query all the ip blocks that support ras query interface */ 1661 list_for_each_entry(obj, &con->head, node) { 1662 struct ras_query_if info = { 1663 .head = obj->head, 1664 }; 1665 1666 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info); 1667 } 1668 } else { 1669 /* query specific ip block */ 1670 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info); 1671 } 1672 1673 if (ret) 1674 return ret; 1675 1676 if (ce_count) 1677 *ce_count = ce; 1678 1679 if (ue_count) 1680 *ue_count = ue; 1681 1682 return 0; 1683 } 1684 /* query/inject/cure end */ 1685 1686 1687 /* sysfs begin */ 1688 1689 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1690 struct ras_badpage **bps, unsigned int *count); 1691 1692 static char *amdgpu_ras_badpage_flags_str(unsigned int flags) 1693 { 1694 switch (flags) { 1695 case AMDGPU_RAS_RETIRE_PAGE_RESERVED: 1696 return "R"; 1697 case AMDGPU_RAS_RETIRE_PAGE_PENDING: 1698 return "P"; 1699 case AMDGPU_RAS_RETIRE_PAGE_FAULT: 1700 default: 1701 return "F"; 1702 } 1703 } 1704 1705 /** 1706 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface 1707 * 1708 * It allows user to read the bad pages of vram on the gpu through 1709 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages 1710 * 1711 * It outputs multiple lines, and each line stands for one gpu page. 1712 * 1713 * The format of one line is below, 1714 * gpu pfn : gpu page size : flags 1715 * 1716 * gpu pfn and gpu page size are printed in hex format. 1717 * flags can be one of below character, 1718 * 1719 * R: reserved, this gpu page is reserved and not able to use. 1720 * 1721 * P: pending for reserve, this gpu page is marked as bad, will be reserved 1722 * in next window of page_reserve. 1723 * 1724 * F: unable to reserve. this gpu page can't be reserved due to some reasons. 1725 * 1726 * Examples: 1727 * 1728 * .. code-block:: bash 1729 * 1730 * 0x00000001 : 0x00001000 : R 1731 * 0x00000002 : 0x00001000 : P 1732 * 1733 */ 1734 1735 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, 1736 struct kobject *kobj, struct bin_attribute *attr, 1737 char *buf, loff_t ppos, size_t count) 1738 { 1739 struct amdgpu_ras *con = 1740 container_of(attr, struct amdgpu_ras, badpages_attr); 1741 struct amdgpu_device *adev = con->adev; 1742 const unsigned int element_size = 1743 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1; 1744 unsigned int start = div64_ul(ppos + element_size - 1, element_size); 1745 unsigned int end = div64_ul(ppos + count - 1, element_size); 1746 ssize_t s = 0; 1747 struct ras_badpage *bps = NULL; 1748 unsigned int bps_count = 0; 1749 1750 memset(buf, 0, count); 1751 1752 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) 1753 return 0; 1754 1755 for (; start < end && start < bps_count; start++) 1756 s += scnprintf(&buf[s], element_size + 1, 1757 "0x%08x : 0x%08x : %1s\n", 1758 bps[start].bp, 1759 bps[start].size, 1760 amdgpu_ras_badpage_flags_str(bps[start].flags)); 1761 1762 kfree(bps); 1763 1764 return s; 1765 } 1766 1767 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, 1768 struct device_attribute *attr, char *buf) 1769 { 1770 struct amdgpu_ras *con = 1771 container_of(attr, struct amdgpu_ras, features_attr); 1772 1773 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); 1774 } 1775 1776 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev, 1777 struct device_attribute *attr, char *buf) 1778 { 1779 struct amdgpu_ras *con = 1780 container_of(attr, struct amdgpu_ras, version_attr); 1781 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); 1782 } 1783 1784 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev, 1785 struct device_attribute *attr, char *buf) 1786 { 1787 struct amdgpu_ras *con = 1788 container_of(attr, struct amdgpu_ras, schema_attr); 1789 return sysfs_emit(buf, "schema: 0x%x\n", con->schema); 1790 } 1791 1792 static struct { 1793 enum ras_event_type type; 1794 const char *name; 1795 } dump_event[] = { 1796 {RAS_EVENT_TYPE_FATAL, "Fatal Error"}, 1797 {RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"}, 1798 {RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"}, 1799 }; 1800 1801 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev, 1802 struct device_attribute *attr, char *buf) 1803 { 1804 struct amdgpu_ras *con = 1805 container_of(attr, struct amdgpu_ras, event_state_attr); 1806 struct ras_event_manager *event_mgr = con->event_mgr; 1807 struct ras_event_state *event_state; 1808 int i, size = 0; 1809 1810 if (!event_mgr) 1811 return -EINVAL; 1812 1813 size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno)); 1814 for (i = 0; i < ARRAY_SIZE(dump_event); i++) { 1815 event_state = &event_mgr->event_state[dump_event[i].type]; 1816 size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n", 1817 dump_event[i].name, 1818 atomic64_read(&event_state->count), 1819 event_state->last_seqno); 1820 } 1821 1822 return (ssize_t)size; 1823 } 1824 1825 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) 1826 { 1827 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1828 1829 if (adev->dev->kobj.sd) 1830 sysfs_remove_file_from_group(&adev->dev->kobj, 1831 &con->badpages_attr.attr, 1832 RAS_FS_NAME); 1833 } 1834 1835 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev) 1836 { 1837 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1838 struct attribute *attrs[] = { 1839 &con->features_attr.attr, 1840 &con->version_attr.attr, 1841 &con->schema_attr.attr, 1842 &con->event_state_attr.attr, 1843 NULL 1844 }; 1845 struct attribute_group group = { 1846 .name = RAS_FS_NAME, 1847 .attrs = attrs, 1848 }; 1849 1850 if (adev->dev->kobj.sd) 1851 sysfs_remove_group(&adev->dev->kobj, &group); 1852 1853 return 0; 1854 } 1855 1856 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, 1857 struct ras_common_if *head) 1858 { 1859 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1860 1861 if (amdgpu_aca_is_enabled(adev)) 1862 return 0; 1863 1864 if (!obj || obj->attr_inuse) 1865 return -EINVAL; 1866 1867 if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block)) 1868 return 0; 1869 1870 get_obj(obj); 1871 1872 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name), 1873 "%s_err_count", head->name); 1874 1875 obj->sysfs_attr = (struct device_attribute){ 1876 .attr = { 1877 .name = obj->fs_data.sysfs_name, 1878 .mode = S_IRUGO, 1879 }, 1880 .show = amdgpu_ras_sysfs_read, 1881 }; 1882 sysfs_attr_init(&obj->sysfs_attr.attr); 1883 1884 if (sysfs_add_file_to_group(&adev->dev->kobj, 1885 &obj->sysfs_attr.attr, 1886 RAS_FS_NAME)) { 1887 put_obj(obj); 1888 return -EINVAL; 1889 } 1890 1891 obj->attr_inuse = 1; 1892 1893 return 0; 1894 } 1895 1896 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, 1897 struct ras_common_if *head) 1898 { 1899 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1900 1901 if (amdgpu_aca_is_enabled(adev)) 1902 return 0; 1903 1904 if (!obj || !obj->attr_inuse) 1905 return -EINVAL; 1906 1907 if (adev->dev->kobj.sd) 1908 sysfs_remove_file_from_group(&adev->dev->kobj, 1909 &obj->sysfs_attr.attr, 1910 RAS_FS_NAME); 1911 obj->attr_inuse = 0; 1912 put_obj(obj); 1913 1914 return 0; 1915 } 1916 1917 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) 1918 { 1919 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1920 struct ras_manager *obj, *tmp; 1921 1922 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1923 amdgpu_ras_sysfs_remove(adev, &obj->head); 1924 } 1925 1926 if (amdgpu_bad_page_threshold != 0) 1927 amdgpu_ras_sysfs_remove_bad_page_node(adev); 1928 1929 amdgpu_ras_sysfs_remove_dev_attr_node(adev); 1930 1931 return 0; 1932 } 1933 /* sysfs end */ 1934 1935 /** 1936 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors 1937 * 1938 * Normally when there is an uncorrectable error, the driver will reset 1939 * the GPU to recover. However, in the event of an unrecoverable error, 1940 * the driver provides an interface to reboot the system automatically 1941 * in that event. 1942 * 1943 * The following file in debugfs provides that interface: 1944 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot 1945 * 1946 * Usage: 1947 * 1948 * .. code-block:: bash 1949 * 1950 * echo true > .../ras/auto_reboot 1951 * 1952 */ 1953 /* debugfs begin */ 1954 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) 1955 { 1956 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1957 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; 1958 struct drm_minor *minor = adev_to_drm(adev)->primary; 1959 struct dentry *dir; 1960 1961 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); 1962 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev, 1963 &amdgpu_ras_debugfs_ctrl_ops); 1964 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, 1965 &amdgpu_ras_debugfs_eeprom_ops); 1966 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, 1967 &con->bad_page_cnt_threshold); 1968 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs); 1969 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled); 1970 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled); 1971 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev, 1972 &amdgpu_ras_debugfs_eeprom_size_ops); 1973 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", 1974 S_IRUGO, dir, adev, 1975 &amdgpu_ras_debugfs_eeprom_table_ops); 1976 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); 1977 1978 /* 1979 * After one uncorrectable error happens, usually GPU recovery will 1980 * be scheduled. But due to the known problem in GPU recovery failing 1981 * to bring GPU back, below interface provides one direct way to 1982 * user to reboot system automatically in such case within 1983 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine 1984 * will never be called. 1985 */ 1986 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); 1987 1988 /* 1989 * User could set this not to clean up hardware's error count register 1990 * of RAS IPs during ras recovery. 1991 */ 1992 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir, 1993 &con->disable_ras_err_cnt_harvest); 1994 return dir; 1995 } 1996 1997 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, 1998 struct ras_fs_if *head, 1999 struct dentry *dir) 2000 { 2001 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); 2002 2003 if (!obj || !dir) 2004 return; 2005 2006 get_obj(obj); 2007 2008 memcpy(obj->fs_data.debugfs_name, 2009 head->debugfs_name, 2010 sizeof(obj->fs_data.debugfs_name)); 2011 2012 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir, 2013 obj, &amdgpu_ras_debugfs_ops); 2014 } 2015 2016 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev) 2017 { 2018 bool ret; 2019 2020 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2021 case IP_VERSION(13, 0, 6): 2022 case IP_VERSION(13, 0, 12): 2023 case IP_VERSION(13, 0, 14): 2024 ret = true; 2025 break; 2026 default: 2027 ret = false; 2028 break; 2029 } 2030 2031 return ret; 2032 } 2033 2034 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) 2035 { 2036 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2037 struct dentry *dir; 2038 struct ras_manager *obj; 2039 struct ras_fs_if fs_info; 2040 2041 /* 2042 * it won't be called in resume path, no need to check 2043 * suspend and gpu reset status 2044 */ 2045 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) 2046 return; 2047 2048 dir = amdgpu_ras_debugfs_create_ctrl_node(adev); 2049 2050 list_for_each_entry(obj, &con->head, node) { 2051 if (amdgpu_ras_is_supported(adev, obj->head.block) && 2052 (obj->attr_inuse == 1)) { 2053 sprintf(fs_info.debugfs_name, "%s_err_inject", 2054 get_ras_block_str(&obj->head)); 2055 fs_info.head = obj->head; 2056 amdgpu_ras_debugfs_create(adev, &fs_info, dir); 2057 } 2058 } 2059 2060 if (amdgpu_ras_aca_is_supported(adev)) { 2061 if (amdgpu_aca_is_enabled(adev)) 2062 amdgpu_aca_smu_debugfs_init(adev, dir); 2063 else 2064 amdgpu_mca_smu_debugfs_init(adev, dir); 2065 } 2066 } 2067 2068 /* debugfs end */ 2069 2070 /* ras fs */ 2071 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, 2072 amdgpu_ras_sysfs_badpages_read, NULL, 0); 2073 static DEVICE_ATTR(features, S_IRUGO, 2074 amdgpu_ras_sysfs_features_read, NULL); 2075 static DEVICE_ATTR(version, 0444, 2076 amdgpu_ras_sysfs_version_show, NULL); 2077 static DEVICE_ATTR(schema, 0444, 2078 amdgpu_ras_sysfs_schema_show, NULL); 2079 static DEVICE_ATTR(event_state, 0444, 2080 amdgpu_ras_sysfs_event_state_show, NULL); 2081 static int amdgpu_ras_fs_init(struct amdgpu_device *adev) 2082 { 2083 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2084 struct attribute_group group = { 2085 .name = RAS_FS_NAME, 2086 }; 2087 struct attribute *attrs[] = { 2088 &con->features_attr.attr, 2089 &con->version_attr.attr, 2090 &con->schema_attr.attr, 2091 &con->event_state_attr.attr, 2092 NULL 2093 }; 2094 struct bin_attribute *bin_attrs[] = { 2095 NULL, 2096 NULL, 2097 }; 2098 int r; 2099 2100 group.attrs = attrs; 2101 2102 /* add features entry */ 2103 con->features_attr = dev_attr_features; 2104 sysfs_attr_init(attrs[0]); 2105 2106 /* add version entry */ 2107 con->version_attr = dev_attr_version; 2108 sysfs_attr_init(attrs[1]); 2109 2110 /* add schema entry */ 2111 con->schema_attr = dev_attr_schema; 2112 sysfs_attr_init(attrs[2]); 2113 2114 /* add event_state entry */ 2115 con->event_state_attr = dev_attr_event_state; 2116 sysfs_attr_init(attrs[3]); 2117 2118 if (amdgpu_bad_page_threshold != 0) { 2119 /* add bad_page_features entry */ 2120 bin_attr_gpu_vram_bad_pages.private = NULL; 2121 con->badpages_attr = bin_attr_gpu_vram_bad_pages; 2122 bin_attrs[0] = &con->badpages_attr; 2123 group.bin_attrs = bin_attrs; 2124 sysfs_bin_attr_init(bin_attrs[0]); 2125 } 2126 2127 r = sysfs_create_group(&adev->dev->kobj, &group); 2128 if (r) 2129 dev_err(adev->dev, "Failed to create RAS sysfs group!"); 2130 2131 return 0; 2132 } 2133 2134 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) 2135 { 2136 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2137 struct ras_manager *con_obj, *ip_obj, *tmp; 2138 2139 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 2140 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { 2141 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); 2142 if (ip_obj) 2143 put_obj(ip_obj); 2144 } 2145 } 2146 2147 amdgpu_ras_sysfs_remove_all(adev); 2148 return 0; 2149 } 2150 /* ras fs end */ 2151 2152 /* ih begin */ 2153 2154 /* For the hardware that cannot enable bif ring for both ras_controller_irq 2155 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status 2156 * register to check whether the interrupt is triggered or not, and properly 2157 * ack the interrupt if it is there 2158 */ 2159 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) 2160 { 2161 /* Fatal error events are handled on host side */ 2162 if (amdgpu_sriov_vf(adev)) 2163 return; 2164 /** 2165 * If the current interrupt is caused by a non-fatal RAS error, skip 2166 * check for fatal error. For fatal errors, FED status of all devices 2167 * in XGMI hive gets set when the first device gets fatal error 2168 * interrupt. The error gets propagated to other devices as well, so 2169 * make sure to ack the interrupt regardless of FED status. 2170 */ 2171 if (!amdgpu_ras_get_fed_status(adev) && 2172 amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY)) 2173 return; 2174 2175 if (adev->nbio.ras && 2176 adev->nbio.ras->handle_ras_controller_intr_no_bifring) 2177 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); 2178 2179 if (adev->nbio.ras && 2180 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring) 2181 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev); 2182 } 2183 2184 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj, 2185 struct amdgpu_iv_entry *entry) 2186 { 2187 bool poison_stat = false; 2188 struct amdgpu_device *adev = obj->adev; 2189 struct amdgpu_ras_block_object *block_obj = 2190 amdgpu_ras_get_ras_block(adev, obj->head.block, 0); 2191 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2192 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION; 2193 u64 event_id; 2194 int ret; 2195 2196 if (!block_obj || !con) 2197 return; 2198 2199 ret = amdgpu_ras_mark_ras_event(adev, type); 2200 if (ret) 2201 return; 2202 2203 amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block); 2204 /* both query_poison_status and handle_poison_consumption are optional, 2205 * but at least one of them should be implemented if we need poison 2206 * consumption handler 2207 */ 2208 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) { 2209 poison_stat = block_obj->hw_ops->query_poison_status(adev); 2210 if (!poison_stat) { 2211 /* Not poison consumption interrupt, no need to handle it */ 2212 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n", 2213 block_obj->ras_comm.name); 2214 2215 return; 2216 } 2217 } 2218 2219 amdgpu_umc_poison_handler(adev, obj->head.block, 0); 2220 2221 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption) 2222 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); 2223 2224 /* gpu reset is fallback for failed and default cases. 2225 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset. 2226 */ 2227 if (poison_stat && !amdgpu_ras_is_rma(adev)) { 2228 event_id = amdgpu_ras_acquire_event_id(adev, type); 2229 RAS_EVENT_LOG(adev, event_id, 2230 "GPU reset for %s RAS poison consumption is issued!\n", 2231 block_obj->ras_comm.name); 2232 amdgpu_ras_reset_gpu(adev); 2233 } 2234 2235 if (!poison_stat) 2236 amdgpu_gfx_poison_consumption_handler(adev, entry); 2237 } 2238 2239 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj, 2240 struct amdgpu_iv_entry *entry) 2241 { 2242 struct amdgpu_device *adev = obj->adev; 2243 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION; 2244 u64 event_id; 2245 int ret; 2246 2247 ret = amdgpu_ras_mark_ras_event(adev, type); 2248 if (ret) 2249 return; 2250 2251 event_id = amdgpu_ras_acquire_event_id(adev, type); 2252 RAS_EVENT_LOG(adev, event_id, "Poison is created\n"); 2253 2254 if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) { 2255 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); 2256 2257 atomic_inc(&con->page_retirement_req_cnt); 2258 atomic_inc(&con->poison_creation_count); 2259 2260 wake_up(&con->page_retirement_wq); 2261 } 2262 } 2263 2264 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, 2265 struct amdgpu_iv_entry *entry) 2266 { 2267 struct ras_ih_data *data = &obj->ih_data; 2268 struct ras_err_data err_data; 2269 int ret; 2270 2271 if (!data->cb) 2272 return; 2273 2274 ret = amdgpu_ras_error_data_init(&err_data); 2275 if (ret) 2276 return; 2277 2278 /* Let IP handle its data, maybe we need get the output 2279 * from the callback to update the error type/count, etc 2280 */ 2281 amdgpu_ras_set_fed(obj->adev, true); 2282 ret = data->cb(obj->adev, &err_data, entry); 2283 /* ue will trigger an interrupt, and in that case 2284 * we need do a reset to recovery the whole system. 2285 * But leave IP do that recovery, here we just dispatch 2286 * the error. 2287 */ 2288 if (ret == AMDGPU_RAS_SUCCESS) { 2289 /* these counts could be left as 0 if 2290 * some blocks do not count error number 2291 */ 2292 obj->err_data.ue_count += err_data.ue_count; 2293 obj->err_data.ce_count += err_data.ce_count; 2294 obj->err_data.de_count += err_data.de_count; 2295 } 2296 2297 amdgpu_ras_error_data_fini(&err_data); 2298 } 2299 2300 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) 2301 { 2302 struct ras_ih_data *data = &obj->ih_data; 2303 struct amdgpu_iv_entry entry; 2304 2305 while (data->rptr != data->wptr) { 2306 rmb(); 2307 memcpy(&entry, &data->ring[data->rptr], 2308 data->element_size); 2309 2310 wmb(); 2311 data->rptr = (data->aligned_element_size + 2312 data->rptr) % data->ring_size; 2313 2314 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) { 2315 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 2316 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry); 2317 else 2318 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry); 2319 } else { 2320 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 2321 amdgpu_ras_interrupt_umc_handler(obj, &entry); 2322 else 2323 dev_warn(obj->adev->dev, 2324 "No RAS interrupt handler for non-UMC block with poison disabled.\n"); 2325 } 2326 } 2327 } 2328 2329 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) 2330 { 2331 struct ras_ih_data *data = 2332 container_of(work, struct ras_ih_data, ih_work); 2333 struct ras_manager *obj = 2334 container_of(data, struct ras_manager, ih_data); 2335 2336 amdgpu_ras_interrupt_handler(obj); 2337 } 2338 2339 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, 2340 struct ras_dispatch_if *info) 2341 { 2342 struct ras_manager *obj; 2343 struct ras_ih_data *data; 2344 2345 obj = amdgpu_ras_find_obj(adev, &info->head); 2346 if (!obj) 2347 return -EINVAL; 2348 2349 data = &obj->ih_data; 2350 2351 if (data->inuse == 0) 2352 return 0; 2353 2354 /* Might be overflow... */ 2355 memcpy(&data->ring[data->wptr], info->entry, 2356 data->element_size); 2357 2358 wmb(); 2359 data->wptr = (data->aligned_element_size + 2360 data->wptr) % data->ring_size; 2361 2362 schedule_work(&data->ih_work); 2363 2364 return 0; 2365 } 2366 2367 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, 2368 struct ras_common_if *head) 2369 { 2370 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 2371 struct ras_ih_data *data; 2372 2373 if (!obj) 2374 return -EINVAL; 2375 2376 data = &obj->ih_data; 2377 if (data->inuse == 0) 2378 return 0; 2379 2380 cancel_work_sync(&data->ih_work); 2381 2382 kfree(data->ring); 2383 memset(data, 0, sizeof(*data)); 2384 put_obj(obj); 2385 2386 return 0; 2387 } 2388 2389 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, 2390 struct ras_common_if *head) 2391 { 2392 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 2393 struct ras_ih_data *data; 2394 struct amdgpu_ras_block_object *ras_obj; 2395 2396 if (!obj) { 2397 /* in case we registe the IH before enable ras feature */ 2398 obj = amdgpu_ras_create_obj(adev, head); 2399 if (!obj) 2400 return -EINVAL; 2401 } else 2402 get_obj(obj); 2403 2404 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm); 2405 2406 data = &obj->ih_data; 2407 /* add the callback.etc */ 2408 *data = (struct ras_ih_data) { 2409 .inuse = 0, 2410 .cb = ras_obj->ras_cb, 2411 .element_size = sizeof(struct amdgpu_iv_entry), 2412 .rptr = 0, 2413 .wptr = 0, 2414 }; 2415 2416 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); 2417 2418 data->aligned_element_size = ALIGN(data->element_size, 8); 2419 /* the ring can store 64 iv entries. */ 2420 data->ring_size = 64 * data->aligned_element_size; 2421 data->ring = kmalloc(data->ring_size, GFP_KERNEL); 2422 if (!data->ring) { 2423 put_obj(obj); 2424 return -ENOMEM; 2425 } 2426 2427 /* IH is ready */ 2428 data->inuse = 1; 2429 2430 return 0; 2431 } 2432 2433 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) 2434 { 2435 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2436 struct ras_manager *obj, *tmp; 2437 2438 list_for_each_entry_safe(obj, tmp, &con->head, node) { 2439 amdgpu_ras_interrupt_remove_handler(adev, &obj->head); 2440 } 2441 2442 return 0; 2443 } 2444 /* ih end */ 2445 2446 /* traversal all IPs except NBIO to query error counter */ 2447 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type) 2448 { 2449 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2450 struct ras_manager *obj; 2451 2452 if (!adev->ras_enabled || !con) 2453 return; 2454 2455 list_for_each_entry(obj, &con->head, node) { 2456 struct ras_query_if info = { 2457 .head = obj->head, 2458 }; 2459 2460 /* 2461 * PCIE_BIF IP has one different isr by ras controller 2462 * interrupt, the specific ras counter query will be 2463 * done in that isr. So skip such block from common 2464 * sync flood interrupt isr calling. 2465 */ 2466 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) 2467 continue; 2468 2469 /* 2470 * this is a workaround for aldebaran, skip send msg to 2471 * smu to get ecc_info table due to smu handle get ecc 2472 * info table failed temporarily. 2473 * should be removed until smu fix handle ecc_info table. 2474 */ 2475 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && 2476 (amdgpu_ip_version(adev, MP1_HWIP, 0) == 2477 IP_VERSION(13, 0, 2))) 2478 continue; 2479 2480 amdgpu_ras_query_error_status_with_event(adev, &info, type); 2481 2482 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != 2483 IP_VERSION(11, 0, 2) && 2484 amdgpu_ip_version(adev, MP0_HWIP, 0) != 2485 IP_VERSION(11, 0, 4) && 2486 amdgpu_ip_version(adev, MP0_HWIP, 0) != 2487 IP_VERSION(13, 0, 0)) { 2488 if (amdgpu_ras_reset_error_status(adev, info.head.block)) 2489 dev_warn(adev->dev, "Failed to reset error counter and error status"); 2490 } 2491 } 2492 } 2493 2494 /* Parse RdRspStatus and WrRspStatus */ 2495 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, 2496 struct ras_query_if *info) 2497 { 2498 struct amdgpu_ras_block_object *block_obj; 2499 /* 2500 * Only two block need to query read/write 2501 * RspStatus at current state 2502 */ 2503 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) && 2504 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB)) 2505 return; 2506 2507 block_obj = amdgpu_ras_get_ras_block(adev, 2508 info->head.block, 2509 info->head.sub_block_index); 2510 2511 if (!block_obj || !block_obj->hw_ops) { 2512 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 2513 get_ras_block_str(&info->head)); 2514 return; 2515 } 2516 2517 if (block_obj->hw_ops->query_ras_error_status) 2518 block_obj->hw_ops->query_ras_error_status(adev); 2519 2520 } 2521 2522 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) 2523 { 2524 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2525 struct ras_manager *obj; 2526 2527 if (!adev->ras_enabled || !con) 2528 return; 2529 2530 list_for_each_entry(obj, &con->head, node) { 2531 struct ras_query_if info = { 2532 .head = obj->head, 2533 }; 2534 2535 amdgpu_ras_error_status_query(adev, &info); 2536 } 2537 } 2538 2539 /* recovery begin */ 2540 2541 /* return 0 on success. 2542 * caller need free bps. 2543 */ 2544 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 2545 struct ras_badpage **bps, unsigned int *count) 2546 { 2547 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2548 struct ras_err_handler_data *data; 2549 int i = 0; 2550 int ret = 0, status; 2551 2552 if (!con || !con->eh_data || !bps || !count) 2553 return -EINVAL; 2554 2555 mutex_lock(&con->recovery_lock); 2556 data = con->eh_data; 2557 if (!data || data->count == 0) { 2558 *bps = NULL; 2559 ret = -EINVAL; 2560 goto out; 2561 } 2562 2563 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL); 2564 if (!*bps) { 2565 ret = -ENOMEM; 2566 goto out; 2567 } 2568 2569 for (; i < data->count; i++) { 2570 (*bps)[i] = (struct ras_badpage){ 2571 .bp = data->bps[i].retired_page, 2572 .size = AMDGPU_GPU_PAGE_SIZE, 2573 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, 2574 }; 2575 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, 2576 data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT); 2577 if (status == -EBUSY) 2578 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; 2579 else if (status == -ENOENT) 2580 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; 2581 } 2582 2583 *count = data->count; 2584 out: 2585 mutex_unlock(&con->recovery_lock); 2586 return ret; 2587 } 2588 2589 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev, 2590 struct amdgpu_hive_info *hive, bool status) 2591 { 2592 struct amdgpu_device *tmp_adev; 2593 2594 if (hive) { 2595 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 2596 amdgpu_ras_set_fed(tmp_adev, status); 2597 } else { 2598 amdgpu_ras_set_fed(adev, status); 2599 } 2600 } 2601 2602 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev) 2603 { 2604 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2605 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 2606 int hive_ras_recovery = 0; 2607 2608 if (hive) { 2609 hive_ras_recovery = atomic_read(&hive->ras_recovery); 2610 amdgpu_put_xgmi_hive(hive); 2611 } 2612 2613 if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) 2614 return true; 2615 2616 return false; 2617 } 2618 2619 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev) 2620 { 2621 if (amdgpu_ras_intr_triggered()) 2622 return RAS_EVENT_TYPE_FATAL; 2623 else 2624 return RAS_EVENT_TYPE_POISON_CONSUMPTION; 2625 } 2626 2627 static void amdgpu_ras_do_recovery(struct work_struct *work) 2628 { 2629 struct amdgpu_ras *ras = 2630 container_of(work, struct amdgpu_ras, recovery_work); 2631 struct amdgpu_device *remote_adev = NULL; 2632 struct amdgpu_device *adev = ras->adev; 2633 struct list_head device_list, *device_list_handle = NULL; 2634 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2635 enum ras_event_type type; 2636 2637 if (hive) { 2638 atomic_set(&hive->ras_recovery, 1); 2639 2640 /* If any device which is part of the hive received RAS fatal 2641 * error interrupt, set fatal error status on all. This 2642 * condition will need a recovery, and flag will be cleared 2643 * as part of recovery. 2644 */ 2645 list_for_each_entry(remote_adev, &hive->device_list, 2646 gmc.xgmi.head) 2647 if (amdgpu_ras_get_fed_status(remote_adev)) { 2648 amdgpu_ras_set_fed_all(adev, hive, true); 2649 break; 2650 } 2651 } 2652 if (!ras->disable_ras_err_cnt_harvest) { 2653 2654 /* Build list of devices to query RAS related errors */ 2655 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { 2656 device_list_handle = &hive->device_list; 2657 } else { 2658 INIT_LIST_HEAD(&device_list); 2659 list_add_tail(&adev->gmc.xgmi.head, &device_list); 2660 device_list_handle = &device_list; 2661 } 2662 2663 type = amdgpu_ras_get_fatal_error_event(adev); 2664 list_for_each_entry(remote_adev, 2665 device_list_handle, gmc.xgmi.head) { 2666 amdgpu_ras_query_err_status(remote_adev); 2667 amdgpu_ras_log_on_err_counter(remote_adev, type); 2668 } 2669 2670 } 2671 2672 if (amdgpu_device_should_recover_gpu(ras->adev)) { 2673 struct amdgpu_reset_context reset_context; 2674 memset(&reset_context, 0, sizeof(reset_context)); 2675 2676 reset_context.method = AMD_RESET_METHOD_NONE; 2677 reset_context.reset_req_dev = adev; 2678 reset_context.src = AMDGPU_RESET_SRC_RAS; 2679 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 2680 2681 /* Perform full reset in fatal error mode */ 2682 if (!amdgpu_ras_is_poison_mode_supported(ras->adev)) 2683 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2684 else { 2685 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2686 2687 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) { 2688 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET; 2689 reset_context.method = AMD_RESET_METHOD_MODE2; 2690 } 2691 2692 /* Fatal error occurs in poison mode, mode1 reset is used to 2693 * recover gpu. 2694 */ 2695 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) { 2696 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET; 2697 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2698 2699 psp_fatal_error_recovery_quirk(&adev->psp); 2700 } 2701 } 2702 2703 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); 2704 } 2705 atomic_set(&ras->in_recovery, 0); 2706 if (hive) { 2707 atomic_set(&hive->ras_recovery, 0); 2708 amdgpu_put_xgmi_hive(hive); 2709 } 2710 } 2711 2712 /* alloc/realloc bps array */ 2713 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, 2714 struct ras_err_handler_data *data, int pages) 2715 { 2716 unsigned int old_space = data->count + data->space_left; 2717 unsigned int new_space = old_space + pages; 2718 unsigned int align_space = ALIGN(new_space, 512); 2719 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); 2720 2721 if (!bps) { 2722 return -ENOMEM; 2723 } 2724 2725 if (data->bps) { 2726 memcpy(bps, data->bps, 2727 data->count * sizeof(*data->bps)); 2728 kfree(data->bps); 2729 } 2730 2731 data->bps = bps; 2732 data->space_left += align_space - old_space; 2733 return 0; 2734 } 2735 2736 static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev, 2737 struct eeprom_table_record *bps, 2738 struct ras_err_data *err_data) 2739 { 2740 struct ta_ras_query_address_input addr_in; 2741 uint32_t socket = 0; 2742 int ret = 0; 2743 2744 if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) 2745 socket = adev->smuio.funcs->get_socket_id(adev); 2746 2747 /* reinit err_data */ 2748 err_data->err_addr_cnt = 0; 2749 err_data->err_addr_len = adev->umc.retire_unit; 2750 2751 memset(&addr_in, 0, sizeof(addr_in)); 2752 addr_in.ma.err_addr = bps->address; 2753 addr_in.ma.socket_id = socket; 2754 addr_in.ma.ch_inst = bps->mem_channel; 2755 /* tell RAS TA the node instance is not used */ 2756 addr_in.ma.node_inst = TA_RAS_INV_NODE; 2757 2758 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) 2759 ret = adev->umc.ras->convert_ras_err_addr(adev, err_data, 2760 &addr_in, NULL, false); 2761 2762 return ret; 2763 } 2764 2765 static int amdgpu_ras_mca2pa(struct amdgpu_device *adev, 2766 struct eeprom_table_record *bps, 2767 struct ras_err_data *err_data) 2768 { 2769 struct ta_ras_query_address_input addr_in; 2770 uint32_t die_id, socket = 0; 2771 2772 if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) 2773 socket = adev->smuio.funcs->get_socket_id(adev); 2774 2775 /* although die id is gotten from PA in nps1 mode, the id is 2776 * fitable for any nps mode 2777 */ 2778 if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa) 2779 die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address, 2780 bps->retired_page << AMDGPU_GPU_PAGE_SHIFT); 2781 else 2782 return -EINVAL; 2783 2784 /* reinit err_data */ 2785 err_data->err_addr_cnt = 0; 2786 err_data->err_addr_len = adev->umc.retire_unit; 2787 2788 memset(&addr_in, 0, sizeof(addr_in)); 2789 addr_in.ma.err_addr = bps->address; 2790 addr_in.ma.ch_inst = bps->mem_channel; 2791 addr_in.ma.umc_inst = bps->mcumc_id; 2792 addr_in.ma.node_inst = die_id; 2793 addr_in.ma.socket_id = socket; 2794 2795 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) 2796 return adev->umc.ras->convert_ras_err_addr(adev, err_data, 2797 &addr_in, NULL, false); 2798 else 2799 return -EINVAL; 2800 } 2801 2802 /* it deal with vram only. */ 2803 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, 2804 struct eeprom_table_record *bps, int pages, bool from_rom) 2805 { 2806 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2807 struct ras_err_handler_data *data; 2808 struct ras_err_data err_data; 2809 struct eeprom_table_record *err_rec; 2810 struct amdgpu_ras_eeprom_control *control = 2811 &adev->psp.ras_context.ras->eeprom_control; 2812 enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; 2813 int ret = 0; 2814 uint32_t i, j, loop_cnt = 1; 2815 bool find_pages_per_pa = false; 2816 2817 if (!con || !con->eh_data || !bps || pages <= 0) 2818 return 0; 2819 2820 if (from_rom) { 2821 err_data.err_addr = 2822 kcalloc(adev->umc.retire_unit, 2823 sizeof(struct eeprom_table_record), GFP_KERNEL); 2824 if (!err_data.err_addr) { 2825 dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n"); 2826 ret = -ENOMEM; 2827 goto out; 2828 } 2829 2830 err_rec = err_data.err_addr; 2831 loop_cnt = adev->umc.retire_unit; 2832 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 2833 nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 2834 } 2835 2836 mutex_lock(&con->recovery_lock); 2837 data = con->eh_data; 2838 if (!data) { 2839 /* Returning 0 as the absence of eh_data is acceptable */ 2840 goto free; 2841 } 2842 2843 for (i = 0; i < pages; i++) { 2844 if (from_rom && 2845 control->rec_type == AMDGPU_RAS_EEPROM_REC_MCA) { 2846 if (!find_pages_per_pa) { 2847 if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) { 2848 if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) { 2849 /* may use old RAS TA, use PA to find pages in 2850 * one row 2851 */ 2852 if (amdgpu_umc_pages_in_a_row(adev, &err_data, 2853 bps[i].retired_page << 2854 AMDGPU_GPU_PAGE_SHIFT)) { 2855 ret = -EINVAL; 2856 goto free; 2857 } else { 2858 find_pages_per_pa = true; 2859 } 2860 } else { 2861 /* unsupported cases */ 2862 ret = -EOPNOTSUPP; 2863 goto free; 2864 } 2865 } 2866 } else { 2867 if (amdgpu_umc_pages_in_a_row(adev, &err_data, 2868 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) { 2869 ret = -EINVAL; 2870 goto free; 2871 } 2872 } 2873 } else { 2874 if (from_rom && !find_pages_per_pa) { 2875 if (bps[i].retired_page & UMC_CHANNEL_IDX_V2) { 2876 /* bad page in any NPS mode in eeprom */ 2877 if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) { 2878 ret = -EINVAL; 2879 goto free; 2880 } 2881 } else { 2882 /* legacy bad page in eeprom, generated only in 2883 * NPS1 mode 2884 */ 2885 if (amdgpu_ras_mca2pa(adev, &bps[i], &err_data)) { 2886 /* old RAS TA or ASICs which don't support to 2887 * convert addrss via mca address 2888 */ 2889 if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) { 2890 find_pages_per_pa = true; 2891 err_rec = &bps[i]; 2892 loop_cnt = 1; 2893 } else { 2894 /* non-nps1 mode, old RAS TA 2895 * can't support it 2896 */ 2897 ret = -EOPNOTSUPP; 2898 goto free; 2899 } 2900 } 2901 } 2902 2903 if (!find_pages_per_pa) 2904 i += (adev->umc.retire_unit - 1); 2905 } else { 2906 err_rec = &bps[i]; 2907 } 2908 } 2909 2910 for (j = 0; j < loop_cnt; j++) { 2911 if (amdgpu_ras_check_bad_page_unlock(con, 2912 err_rec[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) 2913 continue; 2914 2915 if (!data->space_left && 2916 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { 2917 ret = -ENOMEM; 2918 goto free; 2919 } 2920 2921 amdgpu_ras_reserve_page(adev, err_rec[j].retired_page); 2922 2923 memcpy(&data->bps[data->count], &(err_rec[j]), 2924 sizeof(struct eeprom_table_record)); 2925 data->count++; 2926 data->space_left--; 2927 } 2928 } 2929 2930 free: 2931 if (from_rom) 2932 kfree(err_data.err_addr); 2933 out: 2934 mutex_unlock(&con->recovery_lock); 2935 2936 return ret; 2937 } 2938 2939 /* 2940 * write error record array to eeprom, the function should be 2941 * protected by recovery_lock 2942 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL 2943 */ 2944 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, 2945 unsigned long *new_cnt) 2946 { 2947 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2948 struct ras_err_handler_data *data; 2949 struct amdgpu_ras_eeprom_control *control; 2950 int save_count, unit_num, bad_page_num, i; 2951 2952 if (!con || !con->eh_data) { 2953 if (new_cnt) 2954 *new_cnt = 0; 2955 2956 return 0; 2957 } 2958 2959 mutex_lock(&con->recovery_lock); 2960 control = &con->eeprom_control; 2961 data = con->eh_data; 2962 bad_page_num = control->ras_num_bad_pages; 2963 save_count = data->count - bad_page_num; 2964 mutex_unlock(&con->recovery_lock); 2965 2966 unit_num = save_count / adev->umc.retire_unit; 2967 if (new_cnt) 2968 *new_cnt = unit_num; 2969 2970 /* only new entries are saved */ 2971 if (save_count > 0) { 2972 if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA) { 2973 if (amdgpu_ras_eeprom_append(control, 2974 &data->bps[control->ras_num_recs], 2975 save_count)) { 2976 dev_err(adev->dev, "Failed to save EEPROM table data!"); 2977 return -EIO; 2978 } 2979 } else { 2980 for (i = 0; i < unit_num; i++) { 2981 if (amdgpu_ras_eeprom_append(control, 2982 &data->bps[bad_page_num + i * adev->umc.retire_unit], 2983 1)) { 2984 dev_err(adev->dev, "Failed to save EEPROM table data!"); 2985 return -EIO; 2986 } 2987 } 2988 } 2989 2990 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); 2991 } 2992 2993 return 0; 2994 } 2995 2996 /* 2997 * read error record array in eeprom and reserve enough space for 2998 * storing new bad pages 2999 */ 3000 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) 3001 { 3002 struct amdgpu_ras_eeprom_control *control = 3003 &adev->psp.ras_context.ras->eeprom_control; 3004 struct eeprom_table_record *bps; 3005 int ret; 3006 3007 /* no bad page record, skip eeprom access */ 3008 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0) 3009 return 0; 3010 3011 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL); 3012 if (!bps) 3013 return -ENOMEM; 3014 3015 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs); 3016 if (ret) { 3017 dev_err(adev->dev, "Failed to load EEPROM table records!"); 3018 } else { 3019 if (control->ras_num_recs > 1 && 3020 adev->umc.ras && adev->umc.ras->convert_ras_err_addr) { 3021 if ((bps[0].address == bps[1].address) && 3022 (bps[0].mem_channel == bps[1].mem_channel)) 3023 control->rec_type = AMDGPU_RAS_EEPROM_REC_PA; 3024 else 3025 control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA; 3026 } 3027 3028 ret = amdgpu_ras_eeprom_check(control); 3029 if (ret) 3030 goto out; 3031 3032 /* HW not usable */ 3033 if (amdgpu_ras_is_rma(adev)) { 3034 ret = -EHWPOISON; 3035 goto out; 3036 } 3037 3038 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true); 3039 } 3040 3041 out: 3042 kfree(bps); 3043 return ret; 3044 } 3045 3046 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 3047 uint64_t addr) 3048 { 3049 struct ras_err_handler_data *data = con->eh_data; 3050 int i; 3051 3052 addr >>= AMDGPU_GPU_PAGE_SHIFT; 3053 for (i = 0; i < data->count; i++) 3054 if (addr == data->bps[i].retired_page) 3055 return true; 3056 3057 return false; 3058 } 3059 3060 /* 3061 * check if an address belongs to bad page 3062 * 3063 * Note: this check is only for umc block 3064 */ 3065 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 3066 uint64_t addr) 3067 { 3068 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3069 bool ret = false; 3070 3071 if (!con || !con->eh_data) 3072 return ret; 3073 3074 mutex_lock(&con->recovery_lock); 3075 ret = amdgpu_ras_check_bad_page_unlock(con, addr); 3076 mutex_unlock(&con->recovery_lock); 3077 return ret; 3078 } 3079 3080 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, 3081 uint32_t max_count) 3082 { 3083 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3084 3085 /* 3086 * amdgpu_bad_page_threshold is used to config 3087 * the threshold for the number of bad pages. 3088 * -1: Threshold is set to default value 3089 * Driver will issue a warning message when threshold is reached 3090 * and continue runtime services. 3091 * 0: Disable bad page retirement 3092 * Driver will not retire bad pages 3093 * which is intended for debugging purpose. 3094 * -2: Threshold is determined by a formula 3095 * that assumes 1 bad page per 100M of local memory. 3096 * Driver will continue runtime services when threhold is reached. 3097 * 0 < threshold < max number of bad page records in EEPROM, 3098 * A user-defined threshold is set 3099 * Driver will halt runtime services when this custom threshold is reached. 3100 */ 3101 if (amdgpu_bad_page_threshold == -2) { 3102 u64 val = adev->gmc.mc_vram_size; 3103 3104 do_div(val, RAS_BAD_PAGE_COVER); 3105 con->bad_page_cnt_threshold = min(lower_32_bits(val), 3106 max_count); 3107 } else if (amdgpu_bad_page_threshold == -1) { 3108 con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4; 3109 } else { 3110 con->bad_page_cnt_threshold = min_t(int, max_count, 3111 amdgpu_bad_page_threshold); 3112 } 3113 } 3114 3115 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev, 3116 enum amdgpu_ras_block block, uint16_t pasid, 3117 pasid_notify pasid_fn, void *data, uint32_t reset) 3118 { 3119 int ret = 0; 3120 struct ras_poison_msg poison_msg; 3121 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3122 3123 memset(&poison_msg, 0, sizeof(poison_msg)); 3124 poison_msg.block = block; 3125 poison_msg.pasid = pasid; 3126 poison_msg.reset = reset; 3127 poison_msg.pasid_fn = pasid_fn; 3128 poison_msg.data = data; 3129 3130 ret = kfifo_put(&con->poison_fifo, poison_msg); 3131 if (!ret) { 3132 dev_err(adev->dev, "Poison message fifo is full!\n"); 3133 return -ENOSPC; 3134 } 3135 3136 return 0; 3137 } 3138 3139 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev, 3140 struct ras_poison_msg *poison_msg) 3141 { 3142 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3143 3144 return kfifo_get(&con->poison_fifo, poison_msg); 3145 } 3146 3147 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log) 3148 { 3149 mutex_init(&ecc_log->lock); 3150 3151 INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL); 3152 ecc_log->de_queried_count = 0; 3153 ecc_log->prev_de_queried_count = 0; 3154 } 3155 3156 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log) 3157 { 3158 struct radix_tree_iter iter; 3159 void __rcu **slot; 3160 struct ras_ecc_err *ecc_err; 3161 3162 mutex_lock(&ecc_log->lock); 3163 radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) { 3164 ecc_err = radix_tree_deref_slot(slot); 3165 kfree(ecc_err->err_pages.pfn); 3166 kfree(ecc_err); 3167 radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot); 3168 } 3169 mutex_unlock(&ecc_log->lock); 3170 3171 mutex_destroy(&ecc_log->lock); 3172 ecc_log->de_queried_count = 0; 3173 ecc_log->prev_de_queried_count = 0; 3174 } 3175 3176 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con, 3177 uint32_t delayed_ms) 3178 { 3179 int ret; 3180 3181 mutex_lock(&con->umc_ecc_log.lock); 3182 ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree, 3183 UMC_ECC_NEW_DETECTED_TAG); 3184 mutex_unlock(&con->umc_ecc_log.lock); 3185 3186 if (ret) 3187 schedule_delayed_work(&con->page_retirement_dwork, 3188 msecs_to_jiffies(delayed_ms)); 3189 3190 return ret ? true : false; 3191 } 3192 3193 static void amdgpu_ras_do_page_retirement(struct work_struct *work) 3194 { 3195 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, 3196 page_retirement_dwork.work); 3197 struct amdgpu_device *adev = con->adev; 3198 struct ras_err_data err_data; 3199 unsigned long err_cnt; 3200 3201 /* If gpu reset is ongoing, delay retiring the bad pages */ 3202 if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) { 3203 amdgpu_ras_schedule_retirement_dwork(con, 3204 AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3); 3205 return; 3206 } 3207 3208 amdgpu_ras_error_data_init(&err_data); 3209 3210 amdgpu_umc_handle_bad_pages(adev, &err_data); 3211 err_cnt = err_data.err_addr_cnt; 3212 3213 amdgpu_ras_error_data_fini(&err_data); 3214 3215 if (err_cnt && amdgpu_ras_is_rma(adev)) 3216 amdgpu_ras_reset_gpu(adev); 3217 3218 amdgpu_ras_schedule_retirement_dwork(con, 3219 AMDGPU_RAS_RETIRE_PAGE_INTERVAL); 3220 } 3221 3222 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, 3223 uint32_t poison_creation_count) 3224 { 3225 int ret = 0; 3226 struct ras_ecc_log_info *ecc_log; 3227 struct ras_query_if info; 3228 uint32_t timeout = 0; 3229 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3230 uint64_t de_queried_count; 3231 uint32_t new_detect_count, total_detect_count; 3232 uint32_t need_query_count = poison_creation_count; 3233 bool query_data_timeout = false; 3234 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION; 3235 3236 memset(&info, 0, sizeof(info)); 3237 info.head.block = AMDGPU_RAS_BLOCK__UMC; 3238 3239 ecc_log = &ras->umc_ecc_log; 3240 total_detect_count = 0; 3241 do { 3242 ret = amdgpu_ras_query_error_status_with_event(adev, &info, type); 3243 if (ret) 3244 return ret; 3245 3246 de_queried_count = ecc_log->de_queried_count; 3247 if (de_queried_count > ecc_log->prev_de_queried_count) { 3248 new_detect_count = de_queried_count - ecc_log->prev_de_queried_count; 3249 ecc_log->prev_de_queried_count = de_queried_count; 3250 timeout = 0; 3251 } else { 3252 new_detect_count = 0; 3253 } 3254 3255 if (new_detect_count) { 3256 total_detect_count += new_detect_count; 3257 } else { 3258 if (!timeout && need_query_count) 3259 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC; 3260 3261 if (timeout) { 3262 if (!--timeout) { 3263 query_data_timeout = true; 3264 break; 3265 } 3266 msleep(1); 3267 } 3268 } 3269 } while (total_detect_count < need_query_count); 3270 3271 if (query_data_timeout) { 3272 dev_warn(adev->dev, "Can't find deferred error! count: %u\n", 3273 (need_query_count - total_detect_count)); 3274 return -ENOENT; 3275 } 3276 3277 if (total_detect_count) 3278 schedule_delayed_work(&ras->page_retirement_dwork, 0); 3279 3280 return 0; 3281 } 3282 3283 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev) 3284 { 3285 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3286 struct ras_poison_msg msg; 3287 int ret; 3288 3289 do { 3290 ret = kfifo_get(&con->poison_fifo, &msg); 3291 } while (ret); 3292 } 3293 3294 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev, 3295 uint32_t msg_count, uint32_t *gpu_reset) 3296 { 3297 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3298 uint32_t reset_flags = 0, reset = 0; 3299 struct ras_poison_msg msg; 3300 int ret, i; 3301 3302 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 3303 3304 for (i = 0; i < msg_count; i++) { 3305 ret = amdgpu_ras_get_poison_req(adev, &msg); 3306 if (!ret) 3307 continue; 3308 3309 if (msg.pasid_fn) 3310 msg.pasid_fn(adev, msg.pasid, msg.data); 3311 3312 reset_flags |= msg.reset; 3313 } 3314 3315 /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */ 3316 if (reset_flags && !amdgpu_ras_is_rma(adev)) { 3317 if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) 3318 reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; 3319 else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) 3320 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; 3321 else 3322 reset = reset_flags; 3323 3324 flush_delayed_work(&con->page_retirement_dwork); 3325 3326 con->gpu_reset_flags |= reset; 3327 amdgpu_ras_reset_gpu(adev); 3328 3329 *gpu_reset = reset; 3330 3331 /* Wait for gpu recovery to complete */ 3332 flush_work(&con->recovery_work); 3333 } 3334 3335 return 0; 3336 } 3337 3338 static int amdgpu_ras_page_retirement_thread(void *param) 3339 { 3340 struct amdgpu_device *adev = (struct amdgpu_device *)param; 3341 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3342 uint32_t poison_creation_count, msg_count; 3343 uint32_t gpu_reset; 3344 int ret; 3345 3346 while (!kthread_should_stop()) { 3347 3348 wait_event_interruptible(con->page_retirement_wq, 3349 kthread_should_stop() || 3350 atomic_read(&con->page_retirement_req_cnt)); 3351 3352 if (kthread_should_stop()) 3353 break; 3354 3355 gpu_reset = 0; 3356 3357 do { 3358 poison_creation_count = atomic_read(&con->poison_creation_count); 3359 ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count); 3360 if (ret == -EIO) 3361 break; 3362 3363 if (poison_creation_count) { 3364 atomic_sub(poison_creation_count, &con->poison_creation_count); 3365 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt); 3366 } 3367 } while (atomic_read(&con->poison_creation_count)); 3368 3369 if (ret != -EIO) { 3370 msg_count = kfifo_len(&con->poison_fifo); 3371 if (msg_count) { 3372 ret = amdgpu_ras_poison_consumption_handler(adev, 3373 msg_count, &gpu_reset); 3374 if ((ret != -EIO) && 3375 (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET)) 3376 atomic_sub(msg_count, &con->page_retirement_req_cnt); 3377 } 3378 } 3379 3380 if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) { 3381 /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */ 3382 /* Clear poison creation request */ 3383 atomic_set(&con->poison_creation_count, 0); 3384 3385 /* Clear poison fifo */ 3386 amdgpu_ras_clear_poison_fifo(adev); 3387 3388 /* Clear all poison requests */ 3389 atomic_set(&con->page_retirement_req_cnt, 0); 3390 3391 if (ret == -EIO) { 3392 /* Wait for mode-1 reset to complete */ 3393 down_read(&adev->reset_domain->sem); 3394 up_read(&adev->reset_domain->sem); 3395 } 3396 3397 /* Wake up work to save bad pages to eeprom */ 3398 schedule_delayed_work(&con->page_retirement_dwork, 0); 3399 } else if (gpu_reset) { 3400 /* gpu just completed mode-2 reset or other reset */ 3401 /* Clear poison consumption messages cached in fifo */ 3402 msg_count = kfifo_len(&con->poison_fifo); 3403 if (msg_count) { 3404 amdgpu_ras_clear_poison_fifo(adev); 3405 atomic_sub(msg_count, &con->page_retirement_req_cnt); 3406 } 3407 3408 /* Wake up work to save bad pages to eeprom */ 3409 schedule_delayed_work(&con->page_retirement_dwork, 0); 3410 } 3411 } 3412 3413 return 0; 3414 } 3415 3416 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) 3417 { 3418 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3419 struct amdgpu_ras_eeprom_control *control; 3420 int ret; 3421 3422 if (!con || amdgpu_sriov_vf(adev)) 3423 return 0; 3424 3425 control = &con->eeprom_control; 3426 ret = amdgpu_ras_eeprom_init(control); 3427 if (ret) 3428 return ret; 3429 3430 if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr) 3431 control->rec_type = AMDGPU_RAS_EEPROM_REC_PA; 3432 3433 /* default status is MCA storage */ 3434 if (control->ras_num_recs <= 1 && 3435 adev->umc.ras && adev->umc.ras->convert_ras_err_addr) 3436 control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA; 3437 3438 if (control->ras_num_recs) { 3439 ret = amdgpu_ras_load_bad_pages(adev); 3440 if (ret) 3441 return ret; 3442 3443 amdgpu_dpm_send_hbm_bad_pages_num( 3444 adev, control->ras_num_bad_pages); 3445 3446 if (con->update_channel_flag == true) { 3447 amdgpu_dpm_send_hbm_bad_channel_flag( 3448 adev, control->bad_channel_bitmap); 3449 con->update_channel_flag = false; 3450 } 3451 } 3452 3453 return ret; 3454 } 3455 3456 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info) 3457 { 3458 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3459 struct ras_err_handler_data **data; 3460 u32 max_eeprom_records_count = 0; 3461 int ret; 3462 3463 if (!con || amdgpu_sriov_vf(adev)) 3464 return 0; 3465 3466 /* Allow access to RAS EEPROM via debugfs, when the ASIC 3467 * supports RAS and debugfs is enabled, but when 3468 * adev->ras_enabled is unset, i.e. when "ras_enable" 3469 * module parameter is set to 0. 3470 */ 3471 con->adev = adev; 3472 3473 if (!adev->ras_enabled) 3474 return 0; 3475 3476 data = &con->eh_data; 3477 *data = kzalloc(sizeof(**data), GFP_KERNEL); 3478 if (!*data) { 3479 ret = -ENOMEM; 3480 goto out; 3481 } 3482 3483 mutex_init(&con->recovery_lock); 3484 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); 3485 atomic_set(&con->in_recovery, 0); 3486 con->eeprom_control.bad_channel_bitmap = 0; 3487 3488 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); 3489 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); 3490 3491 if (init_bp_info) { 3492 ret = amdgpu_ras_init_badpage_info(adev); 3493 if (ret) 3494 goto free; 3495 } 3496 3497 mutex_init(&con->page_rsv_lock); 3498 INIT_KFIFO(con->poison_fifo); 3499 mutex_init(&con->page_retirement_lock); 3500 init_waitqueue_head(&con->page_retirement_wq); 3501 atomic_set(&con->page_retirement_req_cnt, 0); 3502 atomic_set(&con->poison_creation_count, 0); 3503 con->page_retirement_thread = 3504 kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement"); 3505 if (IS_ERR(con->page_retirement_thread)) { 3506 con->page_retirement_thread = NULL; 3507 dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n"); 3508 } 3509 3510 INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement); 3511 amdgpu_ras_ecc_log_init(&con->umc_ecc_log); 3512 #ifdef CONFIG_X86_MCE_AMD 3513 if ((adev->asic_type == CHIP_ALDEBARAN) && 3514 (adev->gmc.xgmi.connected_to_cpu)) 3515 amdgpu_register_bad_pages_mca_notifier(adev); 3516 #endif 3517 return 0; 3518 3519 free: 3520 kfree((*data)->bps); 3521 kfree(*data); 3522 con->eh_data = NULL; 3523 out: 3524 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret); 3525 3526 /* 3527 * Except error threshold exceeding case, other failure cases in this 3528 * function would not fail amdgpu driver init. 3529 */ 3530 if (!amdgpu_ras_is_rma(adev)) 3531 ret = 0; 3532 else 3533 ret = -EINVAL; 3534 3535 return ret; 3536 } 3537 3538 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) 3539 { 3540 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3541 struct ras_err_handler_data *data = con->eh_data; 3542 int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES; 3543 bool ret; 3544 3545 /* recovery_init failed to init it, fini is useless */ 3546 if (!data) 3547 return 0; 3548 3549 /* Save all cached bad pages to eeprom */ 3550 do { 3551 flush_delayed_work(&con->page_retirement_dwork); 3552 ret = amdgpu_ras_schedule_retirement_dwork(con, 0); 3553 } while (ret && max_flush_timeout--); 3554 3555 if (con->page_retirement_thread) 3556 kthread_stop(con->page_retirement_thread); 3557 3558 atomic_set(&con->page_retirement_req_cnt, 0); 3559 atomic_set(&con->poison_creation_count, 0); 3560 3561 mutex_destroy(&con->page_rsv_lock); 3562 3563 cancel_work_sync(&con->recovery_work); 3564 3565 cancel_delayed_work_sync(&con->page_retirement_dwork); 3566 3567 amdgpu_ras_ecc_log_fini(&con->umc_ecc_log); 3568 3569 mutex_lock(&con->recovery_lock); 3570 con->eh_data = NULL; 3571 kfree(data->bps); 3572 kfree(data); 3573 mutex_unlock(&con->recovery_lock); 3574 3575 return 0; 3576 } 3577 /* recovery end */ 3578 3579 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) 3580 { 3581 if (amdgpu_sriov_vf(adev)) { 3582 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 3583 case IP_VERSION(13, 0, 2): 3584 case IP_VERSION(13, 0, 6): 3585 case IP_VERSION(13, 0, 12): 3586 case IP_VERSION(13, 0, 14): 3587 return true; 3588 default: 3589 return false; 3590 } 3591 } 3592 3593 if (adev->asic_type == CHIP_IP_DISCOVERY) { 3594 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 3595 case IP_VERSION(13, 0, 0): 3596 case IP_VERSION(13, 0, 6): 3597 case IP_VERSION(13, 0, 10): 3598 case IP_VERSION(13, 0, 12): 3599 case IP_VERSION(13, 0, 14): 3600 case IP_VERSION(14, 0, 3): 3601 return true; 3602 default: 3603 return false; 3604 } 3605 } 3606 3607 return adev->asic_type == CHIP_VEGA10 || 3608 adev->asic_type == CHIP_VEGA20 || 3609 adev->asic_type == CHIP_ARCTURUS || 3610 adev->asic_type == CHIP_ALDEBARAN || 3611 adev->asic_type == CHIP_SIENNA_CICHLID; 3612 } 3613 3614 /* 3615 * this is workaround for vega20 workstation sku, 3616 * force enable gfx ras, ignore vbios gfx ras flag 3617 * due to GC EDC can not write 3618 */ 3619 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) 3620 { 3621 struct atom_context *ctx = adev->mode_info.atom_context; 3622 3623 if (!ctx) 3624 return; 3625 3626 if (strnstr(ctx->vbios_pn, "D16406", 3627 sizeof(ctx->vbios_pn)) || 3628 strnstr(ctx->vbios_pn, "D36002", 3629 sizeof(ctx->vbios_pn))) 3630 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); 3631 } 3632 3633 /* Query ras capablity via atomfirmware interface */ 3634 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev) 3635 { 3636 /* mem_ecc cap */ 3637 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { 3638 dev_info(adev->dev, "MEM ECC is active.\n"); 3639 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | 3640 1 << AMDGPU_RAS_BLOCK__DF); 3641 } else { 3642 dev_info(adev->dev, "MEM ECC is not presented.\n"); 3643 } 3644 3645 /* sram_ecc cap */ 3646 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { 3647 dev_info(adev->dev, "SRAM ECC is active.\n"); 3648 if (!amdgpu_sriov_vf(adev)) 3649 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | 3650 1 << AMDGPU_RAS_BLOCK__DF); 3651 else 3652 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | 3653 1 << AMDGPU_RAS_BLOCK__SDMA | 3654 1 << AMDGPU_RAS_BLOCK__GFX); 3655 3656 /* 3657 * VCN/JPEG RAS can be supported on both bare metal and 3658 * SRIOV environment 3659 */ 3660 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) || 3661 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) || 3662 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3)) 3663 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 3664 1 << AMDGPU_RAS_BLOCK__JPEG); 3665 else 3666 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | 3667 1 << AMDGPU_RAS_BLOCK__JPEG); 3668 3669 /* 3670 * XGMI RAS is not supported if xgmi num physical nodes 3671 * is zero 3672 */ 3673 if (!adev->gmc.xgmi.num_physical_nodes) 3674 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL); 3675 } else { 3676 dev_info(adev->dev, "SRAM ECC is not presented.\n"); 3677 } 3678 } 3679 3680 /* Query poison mode from umc/df IP callbacks */ 3681 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) 3682 { 3683 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3684 bool df_poison, umc_poison; 3685 3686 /* poison setting is useless on SRIOV guest */ 3687 if (amdgpu_sriov_vf(adev) || !con) 3688 return; 3689 3690 /* Init poison supported flag, the default value is false */ 3691 if (adev->gmc.xgmi.connected_to_cpu || 3692 adev->gmc.is_app_apu) { 3693 /* enabled by default when GPU is connected to CPU */ 3694 con->poison_supported = true; 3695 } else if (adev->df.funcs && 3696 adev->df.funcs->query_ras_poison_mode && 3697 adev->umc.ras && 3698 adev->umc.ras->query_ras_poison_mode) { 3699 df_poison = 3700 adev->df.funcs->query_ras_poison_mode(adev); 3701 umc_poison = 3702 adev->umc.ras->query_ras_poison_mode(adev); 3703 3704 /* Only poison is set in both DF and UMC, we can support it */ 3705 if (df_poison && umc_poison) 3706 con->poison_supported = true; 3707 else if (df_poison != umc_poison) 3708 dev_warn(adev->dev, 3709 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", 3710 df_poison, umc_poison); 3711 } 3712 } 3713 3714 /* 3715 * check hardware's ras ability which will be saved in hw_supported. 3716 * if hardware does not support ras, we can skip some ras initializtion and 3717 * forbid some ras operations from IP. 3718 * if software itself, say boot parameter, limit the ras ability. We still 3719 * need allow IP do some limited operations, like disable. In such case, 3720 * we have to initialize ras as normal. but need check if operation is 3721 * allowed or not in each function. 3722 */ 3723 static void amdgpu_ras_check_supported(struct amdgpu_device *adev) 3724 { 3725 adev->ras_hw_enabled = adev->ras_enabled = 0; 3726 3727 if (!amdgpu_ras_asic_supported(adev)) 3728 return; 3729 3730 if (amdgpu_sriov_vf(adev)) { 3731 if (amdgpu_virt_get_ras_capability(adev)) 3732 goto init_ras_enabled_flag; 3733 } 3734 3735 /* query ras capability from psp */ 3736 if (amdgpu_psp_get_ras_capability(&adev->psp)) 3737 goto init_ras_enabled_flag; 3738 3739 /* query ras capablity from bios */ 3740 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { 3741 amdgpu_ras_query_ras_capablity_from_vbios(adev); 3742 } else { 3743 /* driver only manages a few IP blocks RAS feature 3744 * when GPU is connected cpu through XGMI */ 3745 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | 3746 1 << AMDGPU_RAS_BLOCK__SDMA | 3747 1 << AMDGPU_RAS_BLOCK__MMHUB); 3748 } 3749 3750 /* apply asic specific settings (vega20 only for now) */ 3751 amdgpu_ras_get_quirks(adev); 3752 3753 /* query poison mode from umc/df ip callback */ 3754 amdgpu_ras_query_poison_mode(adev); 3755 3756 init_ras_enabled_flag: 3757 /* hw_supported needs to be aligned with RAS block mask. */ 3758 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; 3759 3760 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : 3761 adev->ras_hw_enabled & amdgpu_ras_mask; 3762 3763 /* aca is disabled by default */ 3764 adev->aca.is_enabled = false; 3765 3766 /* bad page feature is not applicable to specific app platform */ 3767 if (adev->gmc.is_app_apu && 3768 amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0)) 3769 amdgpu_bad_page_threshold = 0; 3770 } 3771 3772 static void amdgpu_ras_counte_dw(struct work_struct *work) 3773 { 3774 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, 3775 ras_counte_delay_work.work); 3776 struct amdgpu_device *adev = con->adev; 3777 struct drm_device *dev = adev_to_drm(adev); 3778 unsigned long ce_count, ue_count; 3779 int res; 3780 3781 res = pm_runtime_get_sync(dev->dev); 3782 if (res < 0) 3783 goto Out; 3784 3785 /* Cache new values. 3786 */ 3787 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) { 3788 atomic_set(&con->ras_ce_count, ce_count); 3789 atomic_set(&con->ras_ue_count, ue_count); 3790 } 3791 3792 pm_runtime_mark_last_busy(dev->dev); 3793 Out: 3794 pm_runtime_put_autosuspend(dev->dev); 3795 } 3796 3797 static int amdgpu_get_ras_schema(struct amdgpu_device *adev) 3798 { 3799 return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 | 3800 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE | 3801 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE | 3802 AMDGPU_RAS_ERROR__PARITY; 3803 } 3804 3805 static void ras_event_mgr_init(struct ras_event_manager *mgr) 3806 { 3807 struct ras_event_state *event_state; 3808 int i; 3809 3810 memset(mgr, 0, sizeof(*mgr)); 3811 atomic64_set(&mgr->seqno, 0); 3812 3813 for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) { 3814 event_state = &mgr->event_state[i]; 3815 event_state->last_seqno = RAS_EVENT_INVALID_ID; 3816 atomic64_set(&event_state->count, 0); 3817 } 3818 } 3819 3820 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev) 3821 { 3822 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3823 struct amdgpu_hive_info *hive; 3824 3825 if (!ras) 3826 return; 3827 3828 hive = amdgpu_get_xgmi_hive(adev); 3829 ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr; 3830 3831 /* init event manager with node 0 on xgmi system */ 3832 if (!amdgpu_reset_in_recovery(adev)) { 3833 if (!hive || adev->gmc.xgmi.node_id == 0) 3834 ras_event_mgr_init(ras->event_mgr); 3835 } 3836 3837 if (hive) 3838 amdgpu_put_xgmi_hive(hive); 3839 } 3840 3841 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev) 3842 { 3843 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3844 3845 if (!con || (adev->flags & AMD_IS_APU)) 3846 return; 3847 3848 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 3849 case IP_VERSION(13, 0, 2): 3850 case IP_VERSION(13, 0, 6): 3851 case IP_VERSION(13, 0, 12): 3852 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT; 3853 break; 3854 case IP_VERSION(13, 0, 14): 3855 con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1); 3856 break; 3857 default: 3858 break; 3859 } 3860 } 3861 3862 int amdgpu_ras_init(struct amdgpu_device *adev) 3863 { 3864 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3865 int r; 3866 3867 if (con) 3868 return 0; 3869 3870 con = kzalloc(sizeof(*con) + 3871 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + 3872 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, 3873 GFP_KERNEL); 3874 if (!con) 3875 return -ENOMEM; 3876 3877 con->adev = adev; 3878 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); 3879 atomic_set(&con->ras_ce_count, 0); 3880 atomic_set(&con->ras_ue_count, 0); 3881 3882 con->objs = (struct ras_manager *)(con + 1); 3883 3884 amdgpu_ras_set_context(adev, con); 3885 3886 amdgpu_ras_check_supported(adev); 3887 3888 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { 3889 /* set gfx block ras context feature for VEGA20 Gaming 3890 * send ras disable cmd to ras ta during ras late init. 3891 */ 3892 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { 3893 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); 3894 3895 return 0; 3896 } 3897 3898 r = 0; 3899 goto release_con; 3900 } 3901 3902 con->update_channel_flag = false; 3903 con->features = 0; 3904 con->schema = 0; 3905 INIT_LIST_HEAD(&con->head); 3906 /* Might need get this flag from vbios. */ 3907 con->flags = RAS_DEFAULT_FLAGS; 3908 3909 /* initialize nbio ras function ahead of any other 3910 * ras functions so hardware fatal error interrupt 3911 * can be enabled as early as possible */ 3912 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 3913 case IP_VERSION(7, 4, 0): 3914 case IP_VERSION(7, 4, 1): 3915 case IP_VERSION(7, 4, 4): 3916 if (!adev->gmc.xgmi.connected_to_cpu) 3917 adev->nbio.ras = &nbio_v7_4_ras; 3918 break; 3919 case IP_VERSION(4, 3, 0): 3920 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF)) 3921 /* unlike other generation of nbio ras, 3922 * nbio v4_3 only support fatal error interrupt 3923 * to inform software that DF is freezed due to 3924 * system fatal error event. driver should not 3925 * enable nbio ras in such case. Instead, 3926 * check DF RAS */ 3927 adev->nbio.ras = &nbio_v4_3_ras; 3928 break; 3929 case IP_VERSION(6, 3, 1): 3930 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF)) 3931 /* unlike other generation of nbio ras, 3932 * nbif v6_3_1 only support fatal error interrupt 3933 * to inform software that DF is freezed due to 3934 * system fatal error event. driver should not 3935 * enable nbio ras in such case. Instead, 3936 * check DF RAS 3937 */ 3938 adev->nbio.ras = &nbif_v6_3_1_ras; 3939 break; 3940 case IP_VERSION(7, 9, 0): 3941 case IP_VERSION(7, 9, 1): 3942 if (!adev->gmc.is_app_apu) 3943 adev->nbio.ras = &nbio_v7_9_ras; 3944 break; 3945 default: 3946 /* nbio ras is not available */ 3947 break; 3948 } 3949 3950 /* nbio ras block needs to be enabled ahead of other ras blocks 3951 * to handle fatal error */ 3952 r = amdgpu_nbio_ras_sw_init(adev); 3953 if (r) 3954 return r; 3955 3956 if (adev->nbio.ras && 3957 adev->nbio.ras->init_ras_controller_interrupt) { 3958 r = adev->nbio.ras->init_ras_controller_interrupt(adev); 3959 if (r) 3960 goto release_con; 3961 } 3962 3963 if (adev->nbio.ras && 3964 adev->nbio.ras->init_ras_err_event_athub_interrupt) { 3965 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev); 3966 if (r) 3967 goto release_con; 3968 } 3969 3970 /* Packed socket_id to ras feature mask bits[31:29] */ 3971 if (adev->smuio.funcs && 3972 adev->smuio.funcs->get_socket_id) 3973 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 3974 AMDGPU_RAS_FEATURES_SOCKETID_SHIFT); 3975 3976 /* Get RAS schema for particular SOC */ 3977 con->schema = amdgpu_get_ras_schema(adev); 3978 3979 amdgpu_ras_init_reserved_vram_size(adev); 3980 3981 if (amdgpu_ras_fs_init(adev)) { 3982 r = -EINVAL; 3983 goto release_con; 3984 } 3985 3986 if (amdgpu_ras_aca_is_supported(adev)) { 3987 if (amdgpu_aca_is_enabled(adev)) 3988 r = amdgpu_aca_init(adev); 3989 else 3990 r = amdgpu_mca_init(adev); 3991 if (r) 3992 goto release_con; 3993 } 3994 3995 dev_info(adev->dev, "RAS INFO: ras initialized successfully, " 3996 "hardware ability[%x] ras_mask[%x]\n", 3997 adev->ras_hw_enabled, adev->ras_enabled); 3998 3999 return 0; 4000 release_con: 4001 amdgpu_ras_set_context(adev, NULL); 4002 kfree(con); 4003 4004 return r; 4005 } 4006 4007 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) 4008 { 4009 if (adev->gmc.xgmi.connected_to_cpu || 4010 adev->gmc.is_app_apu) 4011 return 1; 4012 return 0; 4013 } 4014 4015 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, 4016 struct ras_common_if *ras_block) 4017 { 4018 struct ras_query_if info = { 4019 .head = *ras_block, 4020 }; 4021 4022 if (!amdgpu_persistent_edc_harvesting_supported(adev)) 4023 return 0; 4024 4025 if (amdgpu_ras_query_error_status(adev, &info) != 0) 4026 DRM_WARN("RAS init harvest failure"); 4027 4028 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) 4029 DRM_WARN("RAS init harvest reset failure"); 4030 4031 return 0; 4032 } 4033 4034 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) 4035 { 4036 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4037 4038 if (!con) 4039 return false; 4040 4041 return con->poison_supported; 4042 } 4043 4044 /* helper function to handle common stuff in ip late init phase */ 4045 int amdgpu_ras_block_late_init(struct amdgpu_device *adev, 4046 struct ras_common_if *ras_block) 4047 { 4048 struct amdgpu_ras_block_object *ras_obj = NULL; 4049 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4050 struct ras_query_if *query_info; 4051 unsigned long ue_count, ce_count; 4052 int r; 4053 4054 /* disable RAS feature per IP block if it is not supported */ 4055 if (!amdgpu_ras_is_supported(adev, ras_block->block)) { 4056 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); 4057 return 0; 4058 } 4059 4060 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); 4061 if (r) { 4062 if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) { 4063 /* in resume phase, if fail to enable ras, 4064 * clean up all ras fs nodes, and disable ras */ 4065 goto cleanup; 4066 } else 4067 return r; 4068 } 4069 4070 /* check for errors on warm reset edc persisant supported ASIC */ 4071 amdgpu_persistent_edc_harvesting(adev, ras_block); 4072 4073 /* in resume phase, no need to create ras fs node */ 4074 if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) 4075 return 0; 4076 4077 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 4078 if (ras_obj->ras_cb || (ras_obj->hw_ops && 4079 (ras_obj->hw_ops->query_poison_status || 4080 ras_obj->hw_ops->handle_poison_consumption))) { 4081 r = amdgpu_ras_interrupt_add_handler(adev, ras_block); 4082 if (r) 4083 goto cleanup; 4084 } 4085 4086 if (ras_obj->hw_ops && 4087 (ras_obj->hw_ops->query_ras_error_count || 4088 ras_obj->hw_ops->query_ras_error_status)) { 4089 r = amdgpu_ras_sysfs_create(adev, ras_block); 4090 if (r) 4091 goto interrupt; 4092 4093 /* Those are the cached values at init. 4094 */ 4095 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL); 4096 if (!query_info) 4097 return -ENOMEM; 4098 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if)); 4099 4100 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) { 4101 atomic_set(&con->ras_ce_count, ce_count); 4102 atomic_set(&con->ras_ue_count, ue_count); 4103 } 4104 4105 kfree(query_info); 4106 } 4107 4108 return 0; 4109 4110 interrupt: 4111 if (ras_obj->ras_cb) 4112 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 4113 cleanup: 4114 amdgpu_ras_feature_enable(adev, ras_block, 0); 4115 return r; 4116 } 4117 4118 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev, 4119 struct ras_common_if *ras_block) 4120 { 4121 return amdgpu_ras_block_late_init(adev, ras_block); 4122 } 4123 4124 /* helper function to remove ras fs node and interrupt handler */ 4125 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev, 4126 struct ras_common_if *ras_block) 4127 { 4128 struct amdgpu_ras_block_object *ras_obj; 4129 if (!ras_block) 4130 return; 4131 4132 amdgpu_ras_sysfs_remove(adev, ras_block); 4133 4134 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 4135 if (ras_obj->ras_cb) 4136 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 4137 } 4138 4139 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev, 4140 struct ras_common_if *ras_block) 4141 { 4142 return amdgpu_ras_block_late_fini(adev, ras_block); 4143 } 4144 4145 /* do some init work after IP late init as dependence. 4146 * and it runs in resume/gpu reset/booting up cases. 4147 */ 4148 void amdgpu_ras_resume(struct amdgpu_device *adev) 4149 { 4150 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4151 struct ras_manager *obj, *tmp; 4152 4153 if (!adev->ras_enabled || !con) { 4154 /* clean ras context for VEGA20 Gaming after send ras disable cmd */ 4155 amdgpu_release_ras_context(adev); 4156 4157 return; 4158 } 4159 4160 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 4161 /* Set up all other IPs which are not implemented. There is a 4162 * tricky thing that IP's actual ras error type should be 4163 * MULTI_UNCORRECTABLE, but as driver does not handle it, so 4164 * ERROR_NONE make sense anyway. 4165 */ 4166 amdgpu_ras_enable_all_features(adev, 1); 4167 4168 /* We enable ras on all hw_supported block, but as boot 4169 * parameter might disable some of them and one or more IP has 4170 * not implemented yet. So we disable them on behalf. 4171 */ 4172 list_for_each_entry_safe(obj, tmp, &con->head, node) { 4173 if (!amdgpu_ras_is_supported(adev, obj->head.block)) { 4174 amdgpu_ras_feature_enable(adev, &obj->head, 0); 4175 /* there should be no any reference. */ 4176 WARN_ON(alive_obj(obj)); 4177 } 4178 } 4179 } 4180 } 4181 4182 void amdgpu_ras_suspend(struct amdgpu_device *adev) 4183 { 4184 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4185 4186 if (!adev->ras_enabled || !con) 4187 return; 4188 4189 amdgpu_ras_disable_all_features(adev, 0); 4190 /* Make sure all ras objects are disabled. */ 4191 if (AMDGPU_RAS_GET_FEATURES(con->features)) 4192 amdgpu_ras_disable_all_features(adev, 1); 4193 } 4194 4195 int amdgpu_ras_late_init(struct amdgpu_device *adev) 4196 { 4197 struct amdgpu_ras_block_list *node, *tmp; 4198 struct amdgpu_ras_block_object *obj; 4199 int r; 4200 4201 amdgpu_ras_event_mgr_init(adev); 4202 4203 if (amdgpu_ras_aca_is_supported(adev)) { 4204 if (amdgpu_reset_in_recovery(adev)) { 4205 if (amdgpu_aca_is_enabled(adev)) 4206 r = amdgpu_aca_reset(adev); 4207 else 4208 r = amdgpu_mca_reset(adev); 4209 if (r) 4210 return r; 4211 } 4212 4213 if (!amdgpu_sriov_vf(adev)) { 4214 if (amdgpu_aca_is_enabled(adev)) 4215 amdgpu_ras_set_aca_debug_mode(adev, false); 4216 else 4217 amdgpu_ras_set_mca_debug_mode(adev, false); 4218 } 4219 } 4220 4221 /* Guest side doesn't need init ras feature */ 4222 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev)) 4223 return 0; 4224 4225 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 4226 obj = node->ras_obj; 4227 if (!obj) { 4228 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 4229 continue; 4230 } 4231 4232 if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block)) 4233 continue; 4234 4235 if (obj->ras_late_init) { 4236 r = obj->ras_late_init(adev, &obj->ras_comm); 4237 if (r) { 4238 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n", 4239 obj->ras_comm.name, r); 4240 return r; 4241 } 4242 } else 4243 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm); 4244 } 4245 4246 return 0; 4247 } 4248 4249 /* do some fini work before IP fini as dependence */ 4250 int amdgpu_ras_pre_fini(struct amdgpu_device *adev) 4251 { 4252 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4253 4254 if (!adev->ras_enabled || !con) 4255 return 0; 4256 4257 4258 /* Need disable ras on all IPs here before ip [hw/sw]fini */ 4259 if (AMDGPU_RAS_GET_FEATURES(con->features)) 4260 amdgpu_ras_disable_all_features(adev, 0); 4261 amdgpu_ras_recovery_fini(adev); 4262 return 0; 4263 } 4264 4265 int amdgpu_ras_fini(struct amdgpu_device *adev) 4266 { 4267 struct amdgpu_ras_block_list *ras_node, *tmp; 4268 struct amdgpu_ras_block_object *obj = NULL; 4269 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4270 4271 if (!adev->ras_enabled || !con) 4272 return 0; 4273 4274 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) { 4275 if (ras_node->ras_obj) { 4276 obj = ras_node->ras_obj; 4277 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) && 4278 obj->ras_fini) 4279 obj->ras_fini(adev, &obj->ras_comm); 4280 else 4281 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm); 4282 } 4283 4284 /* Clear ras blocks from ras_list and free ras block list node */ 4285 list_del(&ras_node->node); 4286 kfree(ras_node); 4287 } 4288 4289 amdgpu_ras_fs_fini(adev); 4290 amdgpu_ras_interrupt_remove_all(adev); 4291 4292 if (amdgpu_ras_aca_is_supported(adev)) { 4293 if (amdgpu_aca_is_enabled(adev)) 4294 amdgpu_aca_fini(adev); 4295 else 4296 amdgpu_mca_fini(adev); 4297 } 4298 4299 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared"); 4300 4301 if (AMDGPU_RAS_GET_FEATURES(con->features)) 4302 amdgpu_ras_disable_all_features(adev, 0); 4303 4304 cancel_delayed_work_sync(&con->ras_counte_delay_work); 4305 4306 amdgpu_ras_set_context(adev, NULL); 4307 kfree(con); 4308 4309 return 0; 4310 } 4311 4312 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev) 4313 { 4314 struct amdgpu_ras *ras; 4315 4316 ras = amdgpu_ras_get_context(adev); 4317 if (!ras) 4318 return false; 4319 4320 return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state); 4321 } 4322 4323 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status) 4324 { 4325 struct amdgpu_ras *ras; 4326 4327 ras = amdgpu_ras_get_context(adev); 4328 if (ras) { 4329 if (status) 4330 set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state); 4331 else 4332 clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state); 4333 } 4334 } 4335 4336 void amdgpu_ras_clear_err_state(struct amdgpu_device *adev) 4337 { 4338 struct amdgpu_ras *ras; 4339 4340 ras = amdgpu_ras_get_context(adev); 4341 if (ras) 4342 ras->ras_err_state = 0; 4343 } 4344 4345 void amdgpu_ras_set_err_poison(struct amdgpu_device *adev, 4346 enum amdgpu_ras_block block) 4347 { 4348 struct amdgpu_ras *ras; 4349 4350 ras = amdgpu_ras_get_context(adev); 4351 if (ras) 4352 set_bit(block, &ras->ras_err_state); 4353 } 4354 4355 bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block) 4356 { 4357 struct amdgpu_ras *ras; 4358 4359 ras = amdgpu_ras_get_context(adev); 4360 if (ras) { 4361 if (block == AMDGPU_RAS_BLOCK__ANY) 4362 return (ras->ras_err_state != 0); 4363 else 4364 return test_bit(block, &ras->ras_err_state) || 4365 test_bit(AMDGPU_RAS_BLOCK__LAST, 4366 &ras->ras_err_state); 4367 } 4368 4369 return false; 4370 } 4371 4372 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev) 4373 { 4374 struct amdgpu_ras *ras; 4375 4376 ras = amdgpu_ras_get_context(adev); 4377 if (!ras) 4378 return NULL; 4379 4380 return ras->event_mgr; 4381 } 4382 4383 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type, 4384 const void *caller) 4385 { 4386 struct ras_event_manager *event_mgr; 4387 struct ras_event_state *event_state; 4388 int ret = 0; 4389 4390 if (type >= RAS_EVENT_TYPE_COUNT) { 4391 ret = -EINVAL; 4392 goto out; 4393 } 4394 4395 event_mgr = __get_ras_event_mgr(adev); 4396 if (!event_mgr) { 4397 ret = -EINVAL; 4398 goto out; 4399 } 4400 4401 event_state = &event_mgr->event_state[type]; 4402 event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno); 4403 atomic64_inc(&event_state->count); 4404 4405 out: 4406 if (ret && caller) 4407 dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n", 4408 (int)type, caller, ret); 4409 4410 return ret; 4411 } 4412 4413 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type) 4414 { 4415 struct ras_event_manager *event_mgr; 4416 u64 id; 4417 4418 if (type >= RAS_EVENT_TYPE_COUNT) 4419 return RAS_EVENT_INVALID_ID; 4420 4421 switch (type) { 4422 case RAS_EVENT_TYPE_FATAL: 4423 case RAS_EVENT_TYPE_POISON_CREATION: 4424 case RAS_EVENT_TYPE_POISON_CONSUMPTION: 4425 event_mgr = __get_ras_event_mgr(adev); 4426 if (!event_mgr) 4427 return RAS_EVENT_INVALID_ID; 4428 4429 id = event_mgr->event_state[type].last_seqno; 4430 break; 4431 case RAS_EVENT_TYPE_INVALID: 4432 default: 4433 id = RAS_EVENT_INVALID_ID; 4434 break; 4435 } 4436 4437 return id; 4438 } 4439 4440 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) 4441 { 4442 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { 4443 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 4444 enum ras_event_type type = RAS_EVENT_TYPE_FATAL; 4445 u64 event_id; 4446 4447 if (amdgpu_ras_mark_ras_event(adev, type)) 4448 return; 4449 4450 event_id = amdgpu_ras_acquire_event_id(adev, type); 4451 4452 RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error" 4453 "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); 4454 4455 amdgpu_ras_set_fed(adev, true); 4456 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; 4457 amdgpu_ras_reset_gpu(adev); 4458 } 4459 } 4460 4461 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) 4462 { 4463 if (adev->asic_type == CHIP_VEGA20 && 4464 adev->pm.fw_version <= 0x283400) { 4465 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) && 4466 amdgpu_ras_intr_triggered(); 4467 } 4468 4469 return false; 4470 } 4471 4472 void amdgpu_release_ras_context(struct amdgpu_device *adev) 4473 { 4474 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4475 4476 if (!con) 4477 return; 4478 4479 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { 4480 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); 4481 amdgpu_ras_set_context(adev, NULL); 4482 kfree(con); 4483 } 4484 } 4485 4486 #ifdef CONFIG_X86_MCE_AMD 4487 static struct amdgpu_device *find_adev(uint32_t node_id) 4488 { 4489 int i; 4490 struct amdgpu_device *adev = NULL; 4491 4492 for (i = 0; i < mce_adev_list.num_gpu; i++) { 4493 adev = mce_adev_list.devs[i]; 4494 4495 if (adev && adev->gmc.xgmi.connected_to_cpu && 4496 adev->gmc.xgmi.physical_node_id == node_id) 4497 break; 4498 adev = NULL; 4499 } 4500 4501 return adev; 4502 } 4503 4504 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) 4505 #define GET_UMC_INST(m) (((m) >> 21) & 0x7) 4506 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) 4507 #define GPU_ID_OFFSET 8 4508 4509 static int amdgpu_bad_page_notifier(struct notifier_block *nb, 4510 unsigned long val, void *data) 4511 { 4512 struct mce *m = (struct mce *)data; 4513 struct amdgpu_device *adev = NULL; 4514 uint32_t gpu_id = 0; 4515 uint32_t umc_inst = 0, ch_inst = 0; 4516 4517 /* 4518 * If the error was generated in UMC_V2, which belongs to GPU UMCs, 4519 * and error occurred in DramECC (Extended error code = 0) then only 4520 * process the error, else bail out. 4521 */ 4522 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) && 4523 (XEC(m->status, 0x3f) == 0x0))) 4524 return NOTIFY_DONE; 4525 4526 /* 4527 * If it is correctable error, return. 4528 */ 4529 if (mce_is_correctable(m)) 4530 return NOTIFY_OK; 4531 4532 /* 4533 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. 4534 */ 4535 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; 4536 4537 adev = find_adev(gpu_id); 4538 if (!adev) { 4539 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__, 4540 gpu_id); 4541 return NOTIFY_DONE; 4542 } 4543 4544 /* 4545 * If it is uncorrectable error, then find out UMC instance and 4546 * channel index. 4547 */ 4548 umc_inst = GET_UMC_INST(m->ipid); 4549 ch_inst = GET_CHAN_INDEX(m->ipid); 4550 4551 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d", 4552 umc_inst, ch_inst); 4553 4554 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst)) 4555 return NOTIFY_OK; 4556 else 4557 return NOTIFY_DONE; 4558 } 4559 4560 static struct notifier_block amdgpu_bad_page_nb = { 4561 .notifier_call = amdgpu_bad_page_notifier, 4562 .priority = MCE_PRIO_UC, 4563 }; 4564 4565 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) 4566 { 4567 /* 4568 * Add the adev to the mce_adev_list. 4569 * During mode2 reset, amdgpu device is temporarily 4570 * removed from the mgpu_info list which can cause 4571 * page retirement to fail. 4572 * Use this list instead of mgpu_info to find the amdgpu 4573 * device on which the UMC error was reported. 4574 */ 4575 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; 4576 4577 /* 4578 * Register the x86 notifier only once 4579 * with MCE subsystem. 4580 */ 4581 if (notifier_registered == false) { 4582 mce_register_decode_chain(&amdgpu_bad_page_nb); 4583 notifier_registered = true; 4584 } 4585 } 4586 #endif 4587 4588 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) 4589 { 4590 if (!adev) 4591 return NULL; 4592 4593 return adev->psp.ras_context.ras; 4594 } 4595 4596 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con) 4597 { 4598 if (!adev) 4599 return -EINVAL; 4600 4601 adev->psp.ras_context.ras = ras_con; 4602 return 0; 4603 } 4604 4605 /* check if ras is supported on block, say, sdma, gfx */ 4606 int amdgpu_ras_is_supported(struct amdgpu_device *adev, 4607 unsigned int block) 4608 { 4609 int ret = 0; 4610 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 4611 4612 if (block >= AMDGPU_RAS_BLOCK_COUNT) 4613 return 0; 4614 4615 ret = ras && (adev->ras_enabled & (1 << block)); 4616 4617 /* For the special asic with mem ecc enabled but sram ecc 4618 * not enabled, even if the ras block is not supported on 4619 * .ras_enabled, if the asic supports poison mode and the 4620 * ras block has ras configuration, it can be considered 4621 * that the ras block supports ras function. 4622 */ 4623 if (!ret && 4624 (block == AMDGPU_RAS_BLOCK__GFX || 4625 block == AMDGPU_RAS_BLOCK__SDMA || 4626 block == AMDGPU_RAS_BLOCK__VCN || 4627 block == AMDGPU_RAS_BLOCK__JPEG) && 4628 (amdgpu_ras_mask & (1 << block)) && 4629 amdgpu_ras_is_poison_mode_supported(adev) && 4630 amdgpu_ras_get_ras_block(adev, block, 0)) 4631 ret = 1; 4632 4633 return ret; 4634 } 4635 4636 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) 4637 { 4638 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 4639 4640 /* mode1 is the only selection for RMA status */ 4641 if (amdgpu_ras_is_rma(adev)) { 4642 ras->gpu_reset_flags = 0; 4643 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; 4644 } 4645 4646 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) { 4647 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 4648 int hive_ras_recovery = 0; 4649 4650 if (hive) { 4651 hive_ras_recovery = atomic_read(&hive->ras_recovery); 4652 amdgpu_put_xgmi_hive(hive); 4653 } 4654 /* In the case of multiple GPUs, after a GPU has started 4655 * resetting all GPUs on hive, other GPUs do not need to 4656 * trigger GPU reset again. 4657 */ 4658 if (!hive_ras_recovery) 4659 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); 4660 else 4661 atomic_set(&ras->in_recovery, 0); 4662 } else { 4663 flush_work(&ras->recovery_work); 4664 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); 4665 } 4666 4667 return 0; 4668 } 4669 4670 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable) 4671 { 4672 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4673 int ret = 0; 4674 4675 if (con) { 4676 ret = amdgpu_mca_smu_set_debug_mode(adev, enable); 4677 if (!ret) 4678 con->is_aca_debug_mode = enable; 4679 } 4680 4681 return ret; 4682 } 4683 4684 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable) 4685 { 4686 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4687 int ret = 0; 4688 4689 if (con) { 4690 if (amdgpu_aca_is_enabled(adev)) 4691 ret = amdgpu_aca_smu_set_debug_mode(adev, enable); 4692 else 4693 ret = amdgpu_mca_smu_set_debug_mode(adev, enable); 4694 if (!ret) 4695 con->is_aca_debug_mode = enable; 4696 } 4697 4698 return ret; 4699 } 4700 4701 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev) 4702 { 4703 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4704 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; 4705 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 4706 4707 if (!con) 4708 return false; 4709 4710 if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) || 4711 (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode)) 4712 return con->is_aca_debug_mode; 4713 else 4714 return true; 4715 } 4716 4717 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, 4718 unsigned int *error_query_mode) 4719 { 4720 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4721 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 4722 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; 4723 4724 if (!con) { 4725 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY; 4726 return false; 4727 } 4728 4729 if (amdgpu_sriov_vf(adev)) { 4730 *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY; 4731 } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) { 4732 *error_query_mode = 4733 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; 4734 } else { 4735 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY; 4736 } 4737 4738 return true; 4739 } 4740 4741 /* Register each ip ras block into amdgpu ras */ 4742 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, 4743 struct amdgpu_ras_block_object *ras_block_obj) 4744 { 4745 struct amdgpu_ras_block_list *ras_node; 4746 if (!adev || !ras_block_obj) 4747 return -EINVAL; 4748 4749 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL); 4750 if (!ras_node) 4751 return -ENOMEM; 4752 4753 INIT_LIST_HEAD(&ras_node->node); 4754 ras_node->ras_obj = ras_block_obj; 4755 list_add_tail(&ras_node->node, &adev->ras_list); 4756 4757 return 0; 4758 } 4759 4760 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name) 4761 { 4762 if (!err_type_name) 4763 return; 4764 4765 switch (err_type) { 4766 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE: 4767 sprintf(err_type_name, "correctable"); 4768 break; 4769 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE: 4770 sprintf(err_type_name, "uncorrectable"); 4771 break; 4772 default: 4773 sprintf(err_type_name, "unknown"); 4774 break; 4775 } 4776 } 4777 4778 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev, 4779 const struct amdgpu_ras_err_status_reg_entry *reg_entry, 4780 uint32_t instance, 4781 uint32_t *memory_id) 4782 { 4783 uint32_t err_status_lo_data, err_status_lo_offset; 4784 4785 if (!reg_entry) 4786 return false; 4787 4788 err_status_lo_offset = 4789 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance, 4790 reg_entry->seg_lo, reg_entry->reg_lo); 4791 err_status_lo_data = RREG32(err_status_lo_offset); 4792 4793 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) && 4794 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG)) 4795 return false; 4796 4797 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID); 4798 4799 return true; 4800 } 4801 4802 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev, 4803 const struct amdgpu_ras_err_status_reg_entry *reg_entry, 4804 uint32_t instance, 4805 unsigned long *err_cnt) 4806 { 4807 uint32_t err_status_hi_data, err_status_hi_offset; 4808 4809 if (!reg_entry) 4810 return false; 4811 4812 err_status_hi_offset = 4813 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance, 4814 reg_entry->seg_hi, reg_entry->reg_hi); 4815 err_status_hi_data = RREG32(err_status_hi_offset); 4816 4817 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) && 4818 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG)) 4819 /* keep the check here in case we need to refer to the result later */ 4820 dev_dbg(adev->dev, "Invalid err_info field\n"); 4821 4822 /* read err count */ 4823 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT); 4824 4825 return true; 4826 } 4827 4828 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev, 4829 const struct amdgpu_ras_err_status_reg_entry *reg_list, 4830 uint32_t reg_list_size, 4831 const struct amdgpu_ras_memory_id_entry *mem_list, 4832 uint32_t mem_list_size, 4833 uint32_t instance, 4834 uint32_t err_type, 4835 unsigned long *err_count) 4836 { 4837 uint32_t memory_id; 4838 unsigned long err_cnt; 4839 char err_type_name[16]; 4840 uint32_t i, j; 4841 4842 for (i = 0; i < reg_list_size; i++) { 4843 /* query memory_id from err_status_lo */ 4844 if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i], 4845 instance, &memory_id)) 4846 continue; 4847 4848 /* query err_cnt from err_status_hi */ 4849 if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i], 4850 instance, &err_cnt) || 4851 !err_cnt) 4852 continue; 4853 4854 *err_count += err_cnt; 4855 4856 /* log the errors */ 4857 amdgpu_ras_get_error_type_name(err_type, err_type_name); 4858 if (!mem_list) { 4859 /* memory_list is not supported */ 4860 dev_info(adev->dev, 4861 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n", 4862 err_cnt, err_type_name, 4863 reg_list[i].block_name, 4864 instance, memory_id); 4865 } else { 4866 for (j = 0; j < mem_list_size; j++) { 4867 if (memory_id == mem_list[j].memory_id) { 4868 dev_info(adev->dev, 4869 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n", 4870 err_cnt, err_type_name, 4871 reg_list[i].block_name, 4872 instance, mem_list[j].name); 4873 break; 4874 } 4875 } 4876 } 4877 } 4878 } 4879 4880 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, 4881 const struct amdgpu_ras_err_status_reg_entry *reg_list, 4882 uint32_t reg_list_size, 4883 uint32_t instance) 4884 { 4885 uint32_t err_status_lo_offset, err_status_hi_offset; 4886 uint32_t i; 4887 4888 for (i = 0; i < reg_list_size; i++) { 4889 err_status_lo_offset = 4890 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance, 4891 reg_list[i].seg_lo, reg_list[i].reg_lo); 4892 err_status_hi_offset = 4893 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance, 4894 reg_list[i].seg_hi, reg_list[i].reg_hi); 4895 WREG32(err_status_lo_offset, 0); 4896 WREG32(err_status_hi_offset, 0); 4897 } 4898 } 4899 4900 int amdgpu_ras_error_data_init(struct ras_err_data *err_data) 4901 { 4902 memset(err_data, 0, sizeof(*err_data)); 4903 4904 INIT_LIST_HEAD(&err_data->err_node_list); 4905 4906 return 0; 4907 } 4908 4909 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node) 4910 { 4911 if (!err_node) 4912 return; 4913 4914 list_del(&err_node->node); 4915 kvfree(err_node); 4916 } 4917 4918 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data) 4919 { 4920 struct ras_err_node *err_node, *tmp; 4921 4922 list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node) 4923 amdgpu_ras_error_node_release(err_node); 4924 } 4925 4926 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data, 4927 struct amdgpu_smuio_mcm_config_info *mcm_info) 4928 { 4929 struct ras_err_node *err_node; 4930 struct amdgpu_smuio_mcm_config_info *ref_id; 4931 4932 if (!err_data || !mcm_info) 4933 return NULL; 4934 4935 for_each_ras_error(err_node, err_data) { 4936 ref_id = &err_node->err_info.mcm_info; 4937 4938 if (mcm_info->socket_id == ref_id->socket_id && 4939 mcm_info->die_id == ref_id->die_id) 4940 return err_node; 4941 } 4942 4943 return NULL; 4944 } 4945 4946 static struct ras_err_node *amdgpu_ras_error_node_new(void) 4947 { 4948 struct ras_err_node *err_node; 4949 4950 err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL); 4951 if (!err_node) 4952 return NULL; 4953 4954 INIT_LIST_HEAD(&err_node->node); 4955 4956 return err_node; 4957 } 4958 4959 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b) 4960 { 4961 struct ras_err_node *nodea = container_of(a, struct ras_err_node, node); 4962 struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node); 4963 struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info; 4964 struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info; 4965 4966 if (unlikely(infoa->socket_id != infob->socket_id)) 4967 return infoa->socket_id - infob->socket_id; 4968 else 4969 return infoa->die_id - infob->die_id; 4970 4971 return 0; 4972 } 4973 4974 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data, 4975 struct amdgpu_smuio_mcm_config_info *mcm_info) 4976 { 4977 struct ras_err_node *err_node; 4978 4979 err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info); 4980 if (err_node) 4981 return &err_node->err_info; 4982 4983 err_node = amdgpu_ras_error_node_new(); 4984 if (!err_node) 4985 return NULL; 4986 4987 memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info)); 4988 4989 err_data->err_list_count++; 4990 list_add_tail(&err_node->node, &err_data->err_node_list); 4991 list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp); 4992 4993 return &err_node->err_info; 4994 } 4995 4996 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, 4997 struct amdgpu_smuio_mcm_config_info *mcm_info, 4998 u64 count) 4999 { 5000 struct ras_err_info *err_info; 5001 5002 if (!err_data || !mcm_info) 5003 return -EINVAL; 5004 5005 if (!count) 5006 return 0; 5007 5008 err_info = amdgpu_ras_error_get_info(err_data, mcm_info); 5009 if (!err_info) 5010 return -EINVAL; 5011 5012 err_info->ue_count += count; 5013 err_data->ue_count += count; 5014 5015 return 0; 5016 } 5017 5018 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, 5019 struct amdgpu_smuio_mcm_config_info *mcm_info, 5020 u64 count) 5021 { 5022 struct ras_err_info *err_info; 5023 5024 if (!err_data || !mcm_info) 5025 return -EINVAL; 5026 5027 if (!count) 5028 return 0; 5029 5030 err_info = amdgpu_ras_error_get_info(err_data, mcm_info); 5031 if (!err_info) 5032 return -EINVAL; 5033 5034 err_info->ce_count += count; 5035 err_data->ce_count += count; 5036 5037 return 0; 5038 } 5039 5040 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data, 5041 struct amdgpu_smuio_mcm_config_info *mcm_info, 5042 u64 count) 5043 { 5044 struct ras_err_info *err_info; 5045 5046 if (!err_data || !mcm_info) 5047 return -EINVAL; 5048 5049 if (!count) 5050 return 0; 5051 5052 err_info = amdgpu_ras_error_get_info(err_data, mcm_info); 5053 if (!err_info) 5054 return -EINVAL; 5055 5056 err_info->de_count += count; 5057 err_data->de_count += count; 5058 5059 return 0; 5060 } 5061 5062 #define mmMP0_SMN_C2PMSG_92 0x1609C 5063 #define mmMP0_SMN_C2PMSG_126 0x160BE 5064 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev, 5065 u32 instance) 5066 { 5067 u32 socket_id, aid_id, hbm_id; 5068 u32 fw_status; 5069 u32 boot_error; 5070 u64 reg_addr; 5071 5072 /* The pattern for smn addressing in other SOC could be different from 5073 * the one for aqua_vanjaram. We should revisit the code if the pattern 5074 * is changed. In such case, replace the aqua_vanjaram implementation 5075 * with more common helper */ 5076 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) + 5077 aqua_vanjaram_encode_ext_smn_addressing(instance); 5078 fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr); 5079 5080 reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) + 5081 aqua_vanjaram_encode_ext_smn_addressing(instance); 5082 boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr); 5083 5084 socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error); 5085 aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error); 5086 hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1); 5087 5088 if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error)) 5089 dev_info(adev->dev, 5090 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n", 5091 socket_id, aid_id, hbm_id, fw_status); 5092 5093 if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error)) 5094 dev_info(adev->dev, 5095 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n", 5096 socket_id, aid_id, fw_status); 5097 5098 if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error)) 5099 dev_info(adev->dev, 5100 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n", 5101 socket_id, aid_id, fw_status); 5102 5103 if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error)) 5104 dev_info(adev->dev, 5105 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n", 5106 socket_id, aid_id, fw_status); 5107 5108 if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error)) 5109 dev_info(adev->dev, 5110 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n", 5111 socket_id, aid_id, fw_status); 5112 5113 if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error)) 5114 dev_info(adev->dev, 5115 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n", 5116 socket_id, aid_id, fw_status); 5117 5118 if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error)) 5119 dev_info(adev->dev, 5120 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n", 5121 socket_id, aid_id, hbm_id, fw_status); 5122 5123 if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error)) 5124 dev_info(adev->dev, 5125 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n", 5126 socket_id, aid_id, hbm_id, fw_status); 5127 5128 if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error)) 5129 dev_info(adev->dev, 5130 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n", 5131 socket_id, aid_id, fw_status); 5132 5133 if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error)) 5134 dev_info(adev->dev, 5135 "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n", 5136 socket_id, aid_id, fw_status); 5137 } 5138 5139 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev, 5140 u32 instance) 5141 { 5142 u64 reg_addr; 5143 u32 reg_data; 5144 int retry_loop; 5145 5146 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) + 5147 aqua_vanjaram_encode_ext_smn_addressing(instance); 5148 5149 for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) { 5150 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr); 5151 if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) 5152 return false; 5153 else 5154 msleep(1); 5155 } 5156 5157 return true; 5158 } 5159 5160 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances) 5161 { 5162 u32 i; 5163 5164 for (i = 0; i < num_instances; i++) { 5165 if (amdgpu_ras_boot_error_detected(adev, i)) 5166 amdgpu_ras_boot_time_error_reporting(adev, i); 5167 } 5168 } 5169 5170 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn) 5171 { 5172 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5173 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 5174 uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT; 5175 int ret = 0; 5176 5177 mutex_lock(&con->page_rsv_lock); 5178 ret = amdgpu_vram_mgr_query_page_status(mgr, start); 5179 if (ret == -ENOENT) 5180 ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE); 5181 mutex_unlock(&con->page_rsv_lock); 5182 5183 return ret; 5184 } 5185 5186 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id, 5187 const char *fmt, ...) 5188 { 5189 struct va_format vaf; 5190 va_list args; 5191 5192 va_start(args, fmt); 5193 vaf.fmt = fmt; 5194 vaf.va = &args; 5195 5196 if (RAS_EVENT_ID_IS_VALID(event_id)) 5197 dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf); 5198 else 5199 dev_printk(KERN_INFO, adev->dev, "%pV", &vaf); 5200 5201 va_end(args); 5202 } 5203 5204 bool amdgpu_ras_is_rma(struct amdgpu_device *adev) 5205 { 5206 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5207 5208 if (!con) 5209 return false; 5210 5211 return con->is_rma; 5212 } 5213