1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/debugfs.h> 25 #include <linux/list.h> 26 #include <linux/module.h> 27 #include <linux/uaccess.h> 28 #include <linux/reboot.h> 29 #include <linux/syscalls.h> 30 #include <linux/pm_runtime.h> 31 #include <linux/list_sort.h> 32 33 #include "amdgpu.h" 34 #include "amdgpu_ras.h" 35 #include "amdgpu_atomfirmware.h" 36 #include "amdgpu_xgmi.h" 37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 38 #include "nbio_v4_3.h" 39 #include "nbio_v7_9.h" 40 #include "atom.h" 41 #include "amdgpu_reset.h" 42 #include "amdgpu_psp.h" 43 44 #ifdef CONFIG_X86_MCE_AMD 45 #include <asm/mce.h> 46 47 static bool notifier_registered; 48 #endif 49 static const char *RAS_FS_NAME = "ras"; 50 51 const char *ras_error_string[] = { 52 "none", 53 "parity", 54 "single_correctable", 55 "multi_uncorrectable", 56 "poison", 57 }; 58 59 const char *ras_block_string[] = { 60 "umc", 61 "sdma", 62 "gfx", 63 "mmhub", 64 "athub", 65 "pcie_bif", 66 "hdp", 67 "xgmi_wafl", 68 "df", 69 "smn", 70 "sem", 71 "mp0", 72 "mp1", 73 "fuse", 74 "mca", 75 "vcn", 76 "jpeg", 77 "ih", 78 "mpio", 79 }; 80 81 const char *ras_mca_block_string[] = { 82 "mca_mp0", 83 "mca_mp1", 84 "mca_mpio", 85 "mca_iohc", 86 }; 87 88 struct amdgpu_ras_block_list { 89 /* ras block link */ 90 struct list_head node; 91 92 struct amdgpu_ras_block_object *ras_obj; 93 }; 94 95 const char *get_ras_block_str(struct ras_common_if *ras_block) 96 { 97 if (!ras_block) 98 return "NULL"; 99 100 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT || 101 ras_block->block >= ARRAY_SIZE(ras_block_string)) 102 return "OUT OF RANGE"; 103 104 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) 105 return ras_mca_block_string[ras_block->sub_block_index]; 106 107 return ras_block_string[ras_block->block]; 108 } 109 110 #define ras_block_str(_BLOCK_) \ 111 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range") 112 113 #define ras_err_str(i) (ras_error_string[ffs(i)]) 114 115 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) 116 117 /* inject address is 52 bits */ 118 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) 119 120 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ 121 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) 122 123 #define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms 124 125 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms 126 127 enum amdgpu_ras_retire_page_reservation { 128 AMDGPU_RAS_RETIRE_PAGE_RESERVED, 129 AMDGPU_RAS_RETIRE_PAGE_PENDING, 130 AMDGPU_RAS_RETIRE_PAGE_FAULT, 131 }; 132 133 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); 134 135 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 136 uint64_t addr); 137 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 138 uint64_t addr); 139 #ifdef CONFIG_X86_MCE_AMD 140 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); 141 struct mce_notifier_adev_list { 142 struct amdgpu_device *devs[MAX_GPU_INSTANCE]; 143 int num_gpu; 144 }; 145 static struct mce_notifier_adev_list mce_adev_list; 146 #endif 147 148 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) 149 { 150 if (adev && amdgpu_ras_get_context(adev)) 151 amdgpu_ras_get_context(adev)->error_query_ready = ready; 152 } 153 154 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) 155 { 156 if (adev && amdgpu_ras_get_context(adev)) 157 return amdgpu_ras_get_context(adev)->error_query_ready; 158 159 return false; 160 } 161 162 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) 163 { 164 struct ras_err_data err_data; 165 struct eeprom_table_record err_rec; 166 int ret; 167 168 if ((address >= adev->gmc.mc_vram_size) || 169 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 170 dev_warn(adev->dev, 171 "RAS WARN: input address 0x%llx is invalid.\n", 172 address); 173 return -EINVAL; 174 } 175 176 if (amdgpu_ras_check_bad_page(adev, address)) { 177 dev_warn(adev->dev, 178 "RAS WARN: 0x%llx has already been marked as bad page!\n", 179 address); 180 return 0; 181 } 182 183 ret = amdgpu_ras_error_data_init(&err_data); 184 if (ret) 185 return ret; 186 187 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); 188 err_data.err_addr = &err_rec; 189 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0); 190 191 if (amdgpu_bad_page_threshold != 0) { 192 amdgpu_ras_add_bad_pages(adev, err_data.err_addr, 193 err_data.err_addr_cnt); 194 amdgpu_ras_save_bad_pages(adev, NULL); 195 } 196 197 amdgpu_ras_error_data_fini(&err_data); 198 199 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n"); 200 dev_warn(adev->dev, "Clear EEPROM:\n"); 201 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n"); 202 203 return 0; 204 } 205 206 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, 207 size_t size, loff_t *pos) 208 { 209 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; 210 struct ras_query_if info = { 211 .head = obj->head, 212 }; 213 ssize_t s; 214 char val[128]; 215 216 if (amdgpu_ras_query_error_status(obj->adev, &info)) 217 return -EINVAL; 218 219 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ 220 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 221 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 222 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 223 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 224 } 225 226 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", 227 "ue", info.ue_count, 228 "ce", info.ce_count); 229 if (*pos >= s) 230 return 0; 231 232 s -= *pos; 233 s = min_t(u64, s, size); 234 235 236 if (copy_to_user(buf, &val[*pos], s)) 237 return -EINVAL; 238 239 *pos += s; 240 241 return s; 242 } 243 244 static const struct file_operations amdgpu_ras_debugfs_ops = { 245 .owner = THIS_MODULE, 246 .read = amdgpu_ras_debugfs_read, 247 .write = NULL, 248 .llseek = default_llseek 249 }; 250 251 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) 252 { 253 int i; 254 255 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { 256 *block_id = i; 257 if (strcmp(name, ras_block_string[i]) == 0) 258 return 0; 259 } 260 return -EINVAL; 261 } 262 263 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, 264 const char __user *buf, size_t size, 265 loff_t *pos, struct ras_debug_if *data) 266 { 267 ssize_t s = min_t(u64, 64, size); 268 char str[65]; 269 char block_name[33]; 270 char err[9] = "ue"; 271 int op = -1; 272 int block_id; 273 uint32_t sub_block; 274 u64 address, value; 275 /* default value is 0 if the mask is not set by user */ 276 u32 instance_mask = 0; 277 278 if (*pos) 279 return -EINVAL; 280 *pos = size; 281 282 memset(str, 0, sizeof(str)); 283 memset(data, 0, sizeof(*data)); 284 285 if (copy_from_user(str, buf, s)) 286 return -EINVAL; 287 288 if (sscanf(str, "disable %32s", block_name) == 1) 289 op = 0; 290 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2) 291 op = 1; 292 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) 293 op = 2; 294 else if (strstr(str, "retire_page") != NULL) 295 op = 3; 296 else if (str[0] && str[1] && str[2] && str[3]) 297 /* ascii string, but commands are not matched. */ 298 return -EINVAL; 299 300 if (op != -1) { 301 if (op == 3) { 302 if (sscanf(str, "%*s 0x%llx", &address) != 1 && 303 sscanf(str, "%*s %llu", &address) != 1) 304 return -EINVAL; 305 306 data->op = op; 307 data->inject.address = address; 308 309 return 0; 310 } 311 312 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) 313 return -EINVAL; 314 315 data->head.block = block_id; 316 /* only ue, ce and poison errors are supported */ 317 if (!memcmp("ue", err, 2)) 318 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 319 else if (!memcmp("ce", err, 2)) 320 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; 321 else if (!memcmp("poison", err, 6)) 322 data->head.type = AMDGPU_RAS_ERROR__POISON; 323 else 324 return -EINVAL; 325 326 data->op = op; 327 328 if (op == 2) { 329 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x", 330 &sub_block, &address, &value, &instance_mask) != 4 && 331 sscanf(str, "%*s %*s %*s %u %llu %llu %u", 332 &sub_block, &address, &value, &instance_mask) != 4 && 333 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", 334 &sub_block, &address, &value) != 3 && 335 sscanf(str, "%*s %*s %*s %u %llu %llu", 336 &sub_block, &address, &value) != 3) 337 return -EINVAL; 338 data->head.sub_block_index = sub_block; 339 data->inject.address = address; 340 data->inject.value = value; 341 data->inject.instance_mask = instance_mask; 342 } 343 } else { 344 if (size < sizeof(*data)) 345 return -EINVAL; 346 347 if (copy_from_user(data, buf, sizeof(*data))) 348 return -EINVAL; 349 } 350 351 return 0; 352 } 353 354 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev, 355 struct ras_debug_if *data) 356 { 357 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; 358 uint32_t mask, inst_mask = data->inject.instance_mask; 359 360 /* no need to set instance mask if there is only one instance */ 361 if (num_xcc <= 1 && inst_mask) { 362 data->inject.instance_mask = 0; 363 dev_dbg(adev->dev, 364 "RAS inject mask(0x%x) isn't supported and force it to 0.\n", 365 inst_mask); 366 367 return; 368 } 369 370 switch (data->head.block) { 371 case AMDGPU_RAS_BLOCK__GFX: 372 mask = GENMASK(num_xcc - 1, 0); 373 break; 374 case AMDGPU_RAS_BLOCK__SDMA: 375 mask = GENMASK(adev->sdma.num_instances - 1, 0); 376 break; 377 case AMDGPU_RAS_BLOCK__VCN: 378 case AMDGPU_RAS_BLOCK__JPEG: 379 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0); 380 break; 381 default: 382 mask = inst_mask; 383 break; 384 } 385 386 /* remove invalid bits in instance mask */ 387 data->inject.instance_mask &= mask; 388 if (inst_mask != data->inject.instance_mask) 389 dev_dbg(adev->dev, 390 "Adjust RAS inject mask 0x%x to 0x%x\n", 391 inst_mask, data->inject.instance_mask); 392 } 393 394 /** 395 * DOC: AMDGPU RAS debugfs control interface 396 * 397 * The control interface accepts struct ras_debug_if which has two members. 398 * 399 * First member: ras_debug_if::head or ras_debug_if::inject. 400 * 401 * head is used to indicate which IP block will be under control. 402 * 403 * head has four members, they are block, type, sub_block_index, name. 404 * block: which IP will be under control. 405 * type: what kind of error will be enabled/disabled/injected. 406 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. 407 * name: the name of IP. 408 * 409 * inject has three more members than head, they are address, value and mask. 410 * As their names indicate, inject operation will write the 411 * value to the address. 412 * 413 * The second member: struct ras_debug_if::op. 414 * It has three kinds of operations. 415 * 416 * - 0: disable RAS on the block. Take ::head as its data. 417 * - 1: enable RAS on the block. Take ::head as its data. 418 * - 2: inject errors on the block. Take ::inject as its data. 419 * 420 * How to use the interface? 421 * 422 * In a program 423 * 424 * Copy the struct ras_debug_if in your code and initialize it. 425 * Write the struct to the control interface. 426 * 427 * From shell 428 * 429 * .. code-block:: bash 430 * 431 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 432 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 433 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 434 * 435 * Where N, is the card which you want to affect. 436 * 437 * "disable" requires only the block. 438 * "enable" requires the block and error type. 439 * "inject" requires the block, error type, address, and value. 440 * 441 * The block is one of: umc, sdma, gfx, etc. 442 * see ras_block_string[] for details 443 * 444 * The error type is one of: ue, ce and poison where, 445 * ue is multi-uncorrectable 446 * ce is single-correctable 447 * poison is poison 448 * 449 * The sub-block is a the sub-block index, pass 0 if there is no sub-block. 450 * The address and value are hexadecimal numbers, leading 0x is optional. 451 * The mask means instance mask, is optional, default value is 0x1. 452 * 453 * For instance, 454 * 455 * .. code-block:: bash 456 * 457 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 458 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl 459 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl 460 * 461 * How to check the result of the operation? 462 * 463 * To check disable/enable, see "ras" features at, 464 * /sys/class/drm/card[0/1/2...]/device/ras/features 465 * 466 * To check inject, see the corresponding error count at, 467 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count 468 * 469 * .. note:: 470 * Operations are only allowed on blocks which are supported. 471 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask 472 * to see which blocks support RAS on a particular asic. 473 * 474 */ 475 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, 476 const char __user *buf, 477 size_t size, loff_t *pos) 478 { 479 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 480 struct ras_debug_if data; 481 int ret = 0; 482 483 if (!amdgpu_ras_get_error_query_ready(adev)) { 484 dev_warn(adev->dev, "RAS WARN: error injection " 485 "currently inaccessible\n"); 486 return size; 487 } 488 489 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); 490 if (ret) 491 return ret; 492 493 if (data.op == 3) { 494 ret = amdgpu_reserve_page_direct(adev, data.inject.address); 495 if (!ret) 496 return size; 497 else 498 return ret; 499 } 500 501 if (!amdgpu_ras_is_supported(adev, data.head.block)) 502 return -EINVAL; 503 504 switch (data.op) { 505 case 0: 506 ret = amdgpu_ras_feature_enable(adev, &data.head, 0); 507 break; 508 case 1: 509 ret = amdgpu_ras_feature_enable(adev, &data.head, 1); 510 break; 511 case 2: 512 if ((data.inject.address >= adev->gmc.mc_vram_size && 513 adev->gmc.mc_vram_size) || 514 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 515 dev_warn(adev->dev, "RAS WARN: input address " 516 "0x%llx is invalid.", 517 data.inject.address); 518 ret = -EINVAL; 519 break; 520 } 521 522 /* umc ce/ue error injection for a bad page is not allowed */ 523 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && 524 amdgpu_ras_check_bad_page(adev, data.inject.address)) { 525 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has " 526 "already been marked as bad!\n", 527 data.inject.address); 528 break; 529 } 530 531 amdgpu_ras_instance_mask_check(adev, &data); 532 533 /* data.inject.address is offset instead of absolute gpu address */ 534 ret = amdgpu_ras_error_inject(adev, &data.inject); 535 break; 536 default: 537 ret = -EINVAL; 538 break; 539 } 540 541 if (ret) 542 return ret; 543 544 return size; 545 } 546 547 /** 548 * DOC: AMDGPU RAS debugfs EEPROM table reset interface 549 * 550 * Some boards contain an EEPROM which is used to persistently store a list of 551 * bad pages which experiences ECC errors in vram. This interface provides 552 * a way to reset the EEPROM, e.g., after testing error injection. 553 * 554 * Usage: 555 * 556 * .. code-block:: bash 557 * 558 * echo 1 > ../ras/ras_eeprom_reset 559 * 560 * will reset EEPROM table to 0 entries. 561 * 562 */ 563 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, 564 const char __user *buf, 565 size_t size, loff_t *pos) 566 { 567 struct amdgpu_device *adev = 568 (struct amdgpu_device *)file_inode(f)->i_private; 569 int ret; 570 571 ret = amdgpu_ras_eeprom_reset_table( 572 &(amdgpu_ras_get_context(adev)->eeprom_control)); 573 574 if (!ret) { 575 /* Something was written to EEPROM. 576 */ 577 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; 578 return size; 579 } else { 580 return ret; 581 } 582 } 583 584 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { 585 .owner = THIS_MODULE, 586 .read = NULL, 587 .write = amdgpu_ras_debugfs_ctrl_write, 588 .llseek = default_llseek 589 }; 590 591 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { 592 .owner = THIS_MODULE, 593 .read = NULL, 594 .write = amdgpu_ras_debugfs_eeprom_write, 595 .llseek = default_llseek 596 }; 597 598 /** 599 * DOC: AMDGPU RAS sysfs Error Count Interface 600 * 601 * It allows the user to read the error count for each IP block on the gpu through 602 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count 603 * 604 * It outputs the multiple lines which report the uncorrected (ue) and corrected 605 * (ce) error counts. 606 * 607 * The format of one line is below, 608 * 609 * [ce|ue]: count 610 * 611 * Example: 612 * 613 * .. code-block:: bash 614 * 615 * ue: 0 616 * ce: 1 617 * 618 */ 619 static ssize_t amdgpu_ras_sysfs_read(struct device *dev, 620 struct device_attribute *attr, char *buf) 621 { 622 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); 623 struct ras_query_if info = { 624 .head = obj->head, 625 }; 626 627 if (!amdgpu_ras_get_error_query_ready(obj->adev)) 628 return sysfs_emit(buf, "Query currently inaccessible\n"); 629 630 if (amdgpu_ras_query_error_status(obj->adev, &info)) 631 return -EINVAL; 632 633 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 634 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 635 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 636 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 637 } 638 639 if (info.head.block == AMDGPU_RAS_BLOCK__UMC) 640 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count, 641 "ce", info.ce_count, "de", info.de_count); 642 else 643 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, 644 "ce", info.ce_count); 645 } 646 647 /* obj begin */ 648 649 #define get_obj(obj) do { (obj)->use++; } while (0) 650 #define alive_obj(obj) ((obj)->use) 651 652 static inline void put_obj(struct ras_manager *obj) 653 { 654 if (obj && (--obj->use == 0)) { 655 list_del(&obj->node); 656 amdgpu_ras_error_data_fini(&obj->err_data); 657 } 658 659 if (obj && (obj->use < 0)) 660 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); 661 } 662 663 /* make one obj and return it. */ 664 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, 665 struct ras_common_if *head) 666 { 667 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 668 struct ras_manager *obj; 669 670 if (!adev->ras_enabled || !con) 671 return NULL; 672 673 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 674 return NULL; 675 676 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 677 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 678 return NULL; 679 680 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 681 } else 682 obj = &con->objs[head->block]; 683 684 /* already exist. return obj? */ 685 if (alive_obj(obj)) 686 return NULL; 687 688 if (amdgpu_ras_error_data_init(&obj->err_data)) 689 return NULL; 690 691 obj->head = *head; 692 obj->adev = adev; 693 list_add(&obj->node, &con->head); 694 get_obj(obj); 695 696 return obj; 697 } 698 699 /* return an obj equal to head, or the first when head is NULL */ 700 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, 701 struct ras_common_if *head) 702 { 703 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 704 struct ras_manager *obj; 705 int i; 706 707 if (!adev->ras_enabled || !con) 708 return NULL; 709 710 if (head) { 711 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 712 return NULL; 713 714 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 715 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 716 return NULL; 717 718 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 719 } else 720 obj = &con->objs[head->block]; 721 722 if (alive_obj(obj)) 723 return obj; 724 } else { 725 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 726 obj = &con->objs[i]; 727 if (alive_obj(obj)) 728 return obj; 729 } 730 } 731 732 return NULL; 733 } 734 /* obj end */ 735 736 /* feature ctl begin */ 737 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, 738 struct ras_common_if *head) 739 { 740 return adev->ras_hw_enabled & BIT(head->block); 741 } 742 743 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, 744 struct ras_common_if *head) 745 { 746 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 747 748 return con->features & BIT(head->block); 749 } 750 751 /* 752 * if obj is not created, then create one. 753 * set feature enable flag. 754 */ 755 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, 756 struct ras_common_if *head, int enable) 757 { 758 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 759 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 760 761 /* If hardware does not support ras, then do not create obj. 762 * But if hardware support ras, we can create the obj. 763 * Ras framework checks con->hw_supported to see if it need do 764 * corresponding initialization. 765 * IP checks con->support to see if it need disable ras. 766 */ 767 if (!amdgpu_ras_is_feature_allowed(adev, head)) 768 return 0; 769 770 if (enable) { 771 if (!obj) { 772 obj = amdgpu_ras_create_obj(adev, head); 773 if (!obj) 774 return -EINVAL; 775 } else { 776 /* In case we create obj somewhere else */ 777 get_obj(obj); 778 } 779 con->features |= BIT(head->block); 780 } else { 781 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { 782 con->features &= ~BIT(head->block); 783 put_obj(obj); 784 } 785 } 786 787 return 0; 788 } 789 790 /* wrapper of psp_ras_enable_features */ 791 int amdgpu_ras_feature_enable(struct amdgpu_device *adev, 792 struct ras_common_if *head, bool enable) 793 { 794 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 795 union ta_ras_cmd_input *info; 796 int ret; 797 798 if (!con) 799 return -EINVAL; 800 801 /* For non-gfx ip, do not enable ras feature if it is not allowed */ 802 /* For gfx ip, regardless of feature support status, */ 803 /* Force issue enable or disable ras feature commands */ 804 if (head->block != AMDGPU_RAS_BLOCK__GFX && 805 !amdgpu_ras_is_feature_allowed(adev, head)) 806 return 0; 807 808 /* Only enable gfx ras feature from host side */ 809 if (head->block == AMDGPU_RAS_BLOCK__GFX && 810 !amdgpu_sriov_vf(adev) && 811 !amdgpu_ras_intr_triggered()) { 812 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); 813 if (!info) 814 return -ENOMEM; 815 816 if (!enable) { 817 info->disable_features = (struct ta_ras_disable_features_input) { 818 .block_id = amdgpu_ras_block_to_ta(head->block), 819 .error_type = amdgpu_ras_error_to_ta(head->type), 820 }; 821 } else { 822 info->enable_features = (struct ta_ras_enable_features_input) { 823 .block_id = amdgpu_ras_block_to_ta(head->block), 824 .error_type = amdgpu_ras_error_to_ta(head->type), 825 }; 826 } 827 828 ret = psp_ras_enable_features(&adev->psp, info, enable); 829 if (ret) { 830 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n", 831 enable ? "enable":"disable", 832 get_ras_block_str(head), 833 amdgpu_ras_is_poison_mode_supported(adev), ret); 834 kfree(info); 835 return ret; 836 } 837 838 kfree(info); 839 } 840 841 /* setup the obj */ 842 __amdgpu_ras_feature_enable(adev, head, enable); 843 844 return 0; 845 } 846 847 /* Only used in device probe stage and called only once. */ 848 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, 849 struct ras_common_if *head, bool enable) 850 { 851 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 852 int ret; 853 854 if (!con) 855 return -EINVAL; 856 857 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 858 if (enable) { 859 /* There is no harm to issue a ras TA cmd regardless of 860 * the currecnt ras state. 861 * If current state == target state, it will do nothing 862 * But sometimes it requests driver to reset and repost 863 * with error code -EAGAIN. 864 */ 865 ret = amdgpu_ras_feature_enable(adev, head, 1); 866 /* With old ras TA, we might fail to enable ras. 867 * Log it and just setup the object. 868 * TODO need remove this WA in the future. 869 */ 870 if (ret == -EINVAL) { 871 ret = __amdgpu_ras_feature_enable(adev, head, 1); 872 if (!ret) 873 dev_info(adev->dev, 874 "RAS INFO: %s setup object\n", 875 get_ras_block_str(head)); 876 } 877 } else { 878 /* setup the object then issue a ras TA disable cmd.*/ 879 ret = __amdgpu_ras_feature_enable(adev, head, 1); 880 if (ret) 881 return ret; 882 883 /* gfx block ras dsiable cmd must send to ras-ta */ 884 if (head->block == AMDGPU_RAS_BLOCK__GFX) 885 con->features |= BIT(head->block); 886 887 ret = amdgpu_ras_feature_enable(adev, head, 0); 888 889 /* clean gfx block ras features flag */ 890 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) 891 con->features &= ~BIT(head->block); 892 } 893 } else 894 ret = amdgpu_ras_feature_enable(adev, head, enable); 895 896 return ret; 897 } 898 899 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, 900 bool bypass) 901 { 902 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 903 struct ras_manager *obj, *tmp; 904 905 list_for_each_entry_safe(obj, tmp, &con->head, node) { 906 /* bypass psp. 907 * aka just release the obj and corresponding flags 908 */ 909 if (bypass) { 910 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) 911 break; 912 } else { 913 if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) 914 break; 915 } 916 } 917 918 return con->features; 919 } 920 921 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, 922 bool bypass) 923 { 924 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 925 int i; 926 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; 927 928 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { 929 struct ras_common_if head = { 930 .block = i, 931 .type = default_ras_type, 932 .sub_block_index = 0, 933 }; 934 935 if (i == AMDGPU_RAS_BLOCK__MCA) 936 continue; 937 938 if (bypass) { 939 /* 940 * bypass psp. vbios enable ras for us. 941 * so just create the obj 942 */ 943 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 944 break; 945 } else { 946 if (amdgpu_ras_feature_enable(adev, &head, 1)) 947 break; 948 } 949 } 950 951 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 952 struct ras_common_if head = { 953 .block = AMDGPU_RAS_BLOCK__MCA, 954 .type = default_ras_type, 955 .sub_block_index = i, 956 }; 957 958 if (bypass) { 959 /* 960 * bypass psp. vbios enable ras for us. 961 * so just create the obj 962 */ 963 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 964 break; 965 } else { 966 if (amdgpu_ras_feature_enable(adev, &head, 1)) 967 break; 968 } 969 } 970 971 return con->features; 972 } 973 /* feature ctl end */ 974 975 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj, 976 enum amdgpu_ras_block block) 977 { 978 if (!block_obj) 979 return -EINVAL; 980 981 if (block_obj->ras_comm.block == block) 982 return 0; 983 984 return -EINVAL; 985 } 986 987 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev, 988 enum amdgpu_ras_block block, uint32_t sub_block_index) 989 { 990 struct amdgpu_ras_block_list *node, *tmp; 991 struct amdgpu_ras_block_object *obj; 992 993 if (block >= AMDGPU_RAS_BLOCK__LAST) 994 return NULL; 995 996 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 997 if (!node->ras_obj) { 998 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 999 continue; 1000 } 1001 1002 obj = node->ras_obj; 1003 if (obj->ras_block_match) { 1004 if (obj->ras_block_match(obj, block, sub_block_index) == 0) 1005 return obj; 1006 } else { 1007 if (amdgpu_ras_block_match_default(obj, block) == 0) 1008 return obj; 1009 } 1010 } 1011 1012 return NULL; 1013 } 1014 1015 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) 1016 { 1017 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1018 int ret = 0; 1019 1020 /* 1021 * choosing right query method according to 1022 * whether smu support query error information 1023 */ 1024 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); 1025 if (ret == -EOPNOTSUPP) { 1026 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 1027 adev->umc.ras->ras_block.hw_ops->query_ras_error_count) 1028 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); 1029 1030 /* umc query_ras_error_address is also responsible for clearing 1031 * error status 1032 */ 1033 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 1034 adev->umc.ras->ras_block.hw_ops->query_ras_error_address) 1035 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data); 1036 } else if (!ret) { 1037 if (adev->umc.ras && 1038 adev->umc.ras->ecc_info_query_ras_error_count) 1039 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data); 1040 1041 if (adev->umc.ras && 1042 adev->umc.ras->ecc_info_query_ras_error_address) 1043 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data); 1044 } 1045 } 1046 1047 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, 1048 struct ras_manager *ras_mgr, 1049 struct ras_err_data *err_data, 1050 struct ras_query_context *qctx, 1051 const char *blk_name, 1052 bool is_ue, 1053 bool is_de) 1054 { 1055 struct amdgpu_smuio_mcm_config_info *mcm_info; 1056 struct ras_err_node *err_node; 1057 struct ras_err_info *err_info; 1058 u64 event_id = qctx->event_id; 1059 1060 if (is_ue) { 1061 for_each_ras_error(err_node, err_data) { 1062 err_info = &err_node->err_info; 1063 mcm_info = &err_info->mcm_info; 1064 if (err_info->ue_count) { 1065 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1066 "%lld new uncorrectable hardware errors detected in %s block\n", 1067 mcm_info->socket_id, 1068 mcm_info->die_id, 1069 err_info->ue_count, 1070 blk_name); 1071 } 1072 } 1073 1074 for_each_ras_error(err_node, &ras_mgr->err_data) { 1075 err_info = &err_node->err_info; 1076 mcm_info = &err_info->mcm_info; 1077 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1078 "%lld uncorrectable hardware errors detected in total in %s block\n", 1079 mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); 1080 } 1081 1082 } else { 1083 if (is_de) { 1084 for_each_ras_error(err_node, err_data) { 1085 err_info = &err_node->err_info; 1086 mcm_info = &err_info->mcm_info; 1087 if (err_info->de_count) { 1088 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1089 "%lld new deferred hardware errors detected in %s block\n", 1090 mcm_info->socket_id, 1091 mcm_info->die_id, 1092 err_info->de_count, 1093 blk_name); 1094 } 1095 } 1096 1097 for_each_ras_error(err_node, &ras_mgr->err_data) { 1098 err_info = &err_node->err_info; 1099 mcm_info = &err_info->mcm_info; 1100 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1101 "%lld deferred hardware errors detected in total in %s block\n", 1102 mcm_info->socket_id, mcm_info->die_id, 1103 err_info->de_count, blk_name); 1104 } 1105 } else { 1106 for_each_ras_error(err_node, err_data) { 1107 err_info = &err_node->err_info; 1108 mcm_info = &err_info->mcm_info; 1109 if (err_info->ce_count) { 1110 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1111 "%lld new correctable hardware errors detected in %s block\n", 1112 mcm_info->socket_id, 1113 mcm_info->die_id, 1114 err_info->ce_count, 1115 blk_name); 1116 } 1117 } 1118 1119 for_each_ras_error(err_node, &ras_mgr->err_data) { 1120 err_info = &err_node->err_info; 1121 mcm_info = &err_info->mcm_info; 1122 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " 1123 "%lld correctable hardware errors detected in total in %s block\n", 1124 mcm_info->socket_id, mcm_info->die_id, 1125 err_info->ce_count, blk_name); 1126 } 1127 } 1128 } 1129 } 1130 1131 static inline bool err_data_has_source_info(struct ras_err_data *data) 1132 { 1133 return !list_empty(&data->err_node_list); 1134 } 1135 1136 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, 1137 struct ras_query_if *query_if, 1138 struct ras_err_data *err_data, 1139 struct ras_query_context *qctx) 1140 { 1141 struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head); 1142 const char *blk_name = get_ras_block_str(&query_if->head); 1143 u64 event_id = qctx->event_id; 1144 1145 if (err_data->ce_count) { 1146 if (err_data_has_source_info(err_data)) { 1147 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, 1148 blk_name, false, false); 1149 } else if (!adev->aid_mask && 1150 adev->smuio.funcs && 1151 adev->smuio.funcs->get_socket_id && 1152 adev->smuio.funcs->get_die_id) { 1153 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " 1154 "%ld correctable hardware errors " 1155 "detected in %s block\n", 1156 adev->smuio.funcs->get_socket_id(adev), 1157 adev->smuio.funcs->get_die_id(adev), 1158 ras_mgr->err_data.ce_count, 1159 blk_name); 1160 } else { 1161 RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors " 1162 "detected in %s block\n", 1163 ras_mgr->err_data.ce_count, 1164 blk_name); 1165 } 1166 } 1167 1168 if (err_data->ue_count) { 1169 if (err_data_has_source_info(err_data)) { 1170 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, 1171 blk_name, true, false); 1172 } else if (!adev->aid_mask && 1173 adev->smuio.funcs && 1174 adev->smuio.funcs->get_socket_id && 1175 adev->smuio.funcs->get_die_id) { 1176 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " 1177 "%ld uncorrectable hardware errors " 1178 "detected in %s block\n", 1179 adev->smuio.funcs->get_socket_id(adev), 1180 adev->smuio.funcs->get_die_id(adev), 1181 ras_mgr->err_data.ue_count, 1182 blk_name); 1183 } else { 1184 RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors " 1185 "detected in %s block\n", 1186 ras_mgr->err_data.ue_count, 1187 blk_name); 1188 } 1189 } 1190 1191 if (err_data->de_count) { 1192 if (err_data_has_source_info(err_data)) { 1193 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, 1194 blk_name, false, true); 1195 } else if (!adev->aid_mask && 1196 adev->smuio.funcs && 1197 adev->smuio.funcs->get_socket_id && 1198 adev->smuio.funcs->get_die_id) { 1199 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " 1200 "%ld deferred hardware errors " 1201 "detected in %s block\n", 1202 adev->smuio.funcs->get_socket_id(adev), 1203 adev->smuio.funcs->get_die_id(adev), 1204 ras_mgr->err_data.de_count, 1205 blk_name); 1206 } else { 1207 RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors " 1208 "detected in %s block\n", 1209 ras_mgr->err_data.de_count, 1210 blk_name); 1211 } 1212 } 1213 } 1214 1215 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) 1216 { 1217 struct ras_err_node *err_node; 1218 struct ras_err_info *err_info; 1219 1220 if (err_data_has_source_info(err_data)) { 1221 for_each_ras_error(err_node, err_data) { 1222 err_info = &err_node->err_info; 1223 amdgpu_ras_error_statistic_de_count(&obj->err_data, 1224 &err_info->mcm_info, NULL, err_info->de_count); 1225 amdgpu_ras_error_statistic_ce_count(&obj->err_data, 1226 &err_info->mcm_info, NULL, err_info->ce_count); 1227 amdgpu_ras_error_statistic_ue_count(&obj->err_data, 1228 &err_info->mcm_info, NULL, err_info->ue_count); 1229 } 1230 } else { 1231 /* for legacy asic path which doesn't has error source info */ 1232 obj->err_data.ue_count += err_data->ue_count; 1233 obj->err_data.ce_count += err_data->ce_count; 1234 obj->err_data.de_count += err_data->de_count; 1235 } 1236 } 1237 1238 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk) 1239 { 1240 struct ras_common_if head; 1241 1242 memset(&head, 0, sizeof(head)); 1243 head.block = blk; 1244 1245 return amdgpu_ras_find_obj(adev, &head); 1246 } 1247 1248 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 1249 const struct aca_info *aca_info, void *data) 1250 { 1251 struct ras_manager *obj; 1252 1253 /* in resume phase, no need to create aca fs node */ 1254 if (adev->in_suspend || amdgpu_in_reset(adev)) 1255 return 0; 1256 1257 obj = get_ras_manager(adev, blk); 1258 if (!obj) 1259 return -EINVAL; 1260 1261 return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data); 1262 } 1263 1264 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk) 1265 { 1266 struct ras_manager *obj; 1267 1268 obj = get_ras_manager(adev, blk); 1269 if (!obj) 1270 return -EINVAL; 1271 1272 amdgpu_aca_remove_handle(&obj->aca_handle); 1273 1274 return 0; 1275 } 1276 1277 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 1278 enum aca_error_type type, struct ras_err_data *err_data, 1279 struct ras_query_context *qctx) 1280 { 1281 struct ras_manager *obj; 1282 1283 obj = get_ras_manager(adev, blk); 1284 if (!obj) 1285 return -EINVAL; 1286 1287 return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx); 1288 } 1289 1290 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr, 1291 struct aca_handle *handle, char *buf, void *data) 1292 { 1293 struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle); 1294 struct ras_query_if info = { 1295 .head = obj->head, 1296 }; 1297 1298 if (amdgpu_ras_query_error_status(obj->adev, &info)) 1299 return -EINVAL; 1300 1301 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count, 1302 "ce", info.ce_count, "de", info.de_count); 1303 } 1304 1305 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, 1306 struct ras_query_if *info, 1307 struct ras_err_data *err_data, 1308 struct ras_query_context *qctx, 1309 unsigned int error_query_mode) 1310 { 1311 enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT; 1312 struct amdgpu_ras_block_object *block_obj = NULL; 1313 int ret; 1314 1315 if (blk == AMDGPU_RAS_BLOCK_COUNT) 1316 return -EINVAL; 1317 1318 if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY) 1319 return -EINVAL; 1320 1321 if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { 1322 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { 1323 amdgpu_ras_get_ecc_info(adev, err_data); 1324 } else { 1325 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); 1326 if (!block_obj || !block_obj->hw_ops) { 1327 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1328 get_ras_block_str(&info->head)); 1329 return -EINVAL; 1330 } 1331 1332 if (block_obj->hw_ops->query_ras_error_count) 1333 block_obj->hw_ops->query_ras_error_count(adev, err_data); 1334 1335 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || 1336 (info->head.block == AMDGPU_RAS_BLOCK__GFX) || 1337 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { 1338 if (block_obj->hw_ops->query_ras_error_status) 1339 block_obj->hw_ops->query_ras_error_status(adev); 1340 } 1341 } 1342 } else { 1343 if (amdgpu_aca_is_enabled(adev)) { 1344 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx); 1345 if (ret) 1346 return ret; 1347 1348 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx); 1349 if (ret) 1350 return ret; 1351 1352 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx); 1353 if (ret) 1354 return ret; 1355 } else { 1356 /* FIXME: add code to check return value later */ 1357 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx); 1358 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx); 1359 } 1360 } 1361 1362 return 0; 1363 } 1364 1365 /* query/inject/cure begin */ 1366 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) 1367 { 1368 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1369 struct ras_err_data err_data; 1370 struct ras_query_context qctx; 1371 unsigned int error_query_mode; 1372 int ret; 1373 1374 if (!obj) 1375 return -EINVAL; 1376 1377 ret = amdgpu_ras_error_data_init(&err_data); 1378 if (ret) 1379 return ret; 1380 1381 if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) 1382 return -EINVAL; 1383 1384 memset(&qctx, 0, sizeof(qctx)); 1385 qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ? 1386 RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID); 1387 ret = amdgpu_ras_query_error_status_helper(adev, info, 1388 &err_data, 1389 &qctx, 1390 error_query_mode); 1391 if (ret) 1392 goto out_fini_err_data; 1393 1394 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); 1395 1396 info->ue_count = obj->err_data.ue_count; 1397 info->ce_count = obj->err_data.ce_count; 1398 info->de_count = obj->err_data.de_count; 1399 1400 amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); 1401 1402 out_fini_err_data: 1403 amdgpu_ras_error_data_fini(&err_data); 1404 1405 return ret; 1406 } 1407 1408 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, 1409 enum amdgpu_ras_block block) 1410 { 1411 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1412 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1413 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 1414 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; 1415 struct amdgpu_hive_info *hive; 1416 int hive_ras_recovery = 0; 1417 1418 if (!block_obj || !block_obj->hw_ops) { 1419 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1420 ras_block_str(block)); 1421 return -EOPNOTSUPP; 1422 } 1423 1424 if (!amdgpu_ras_is_supported(adev, block) || 1425 !amdgpu_ras_get_aca_debug_mode(adev)) 1426 return -EOPNOTSUPP; 1427 1428 hive = amdgpu_get_xgmi_hive(adev); 1429 if (hive) { 1430 hive_ras_recovery = atomic_read(&hive->ras_recovery); 1431 amdgpu_put_xgmi_hive(hive); 1432 } 1433 1434 /* skip ras error reset in gpu reset */ 1435 if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) || 1436 hive_ras_recovery) && 1437 ((smu_funcs && smu_funcs->set_debug_mode) || 1438 (mca_funcs && mca_funcs->mca_set_debug_mode))) 1439 return -EOPNOTSUPP; 1440 1441 if (block_obj->hw_ops->reset_ras_error_count) 1442 block_obj->hw_ops->reset_ras_error_count(adev); 1443 1444 return 0; 1445 } 1446 1447 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, 1448 enum amdgpu_ras_block block) 1449 { 1450 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1451 1452 if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP) 1453 return 0; 1454 1455 if ((block == AMDGPU_RAS_BLOCK__GFX) || 1456 (block == AMDGPU_RAS_BLOCK__MMHUB)) { 1457 if (block_obj->hw_ops->reset_ras_error_status) 1458 block_obj->hw_ops->reset_ras_error_status(adev); 1459 } 1460 1461 return 0; 1462 } 1463 1464 /* wrapper of psp_ras_trigger_error */ 1465 int amdgpu_ras_error_inject(struct amdgpu_device *adev, 1466 struct ras_inject_if *info) 1467 { 1468 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1469 struct ta_ras_trigger_error_input block_info = { 1470 .block_id = amdgpu_ras_block_to_ta(info->head.block), 1471 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), 1472 .sub_block_index = info->head.sub_block_index, 1473 .address = info->address, 1474 .value = info->value, 1475 }; 1476 int ret = -EINVAL; 1477 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, 1478 info->head.block, 1479 info->head.sub_block_index); 1480 1481 /* inject on guest isn't allowed, return success directly */ 1482 if (amdgpu_sriov_vf(adev)) 1483 return 0; 1484 1485 if (!obj) 1486 return -EINVAL; 1487 1488 if (!block_obj || !block_obj->hw_ops) { 1489 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1490 get_ras_block_str(&info->head)); 1491 return -EINVAL; 1492 } 1493 1494 /* Calculate XGMI relative offset */ 1495 if (adev->gmc.xgmi.num_physical_nodes > 1 && 1496 info->head.block != AMDGPU_RAS_BLOCK__GFX) { 1497 block_info.address = 1498 amdgpu_xgmi_get_relative_phy_addr(adev, 1499 block_info.address); 1500 } 1501 1502 if (block_obj->hw_ops->ras_error_inject) { 1503 if (info->head.block == AMDGPU_RAS_BLOCK__GFX) 1504 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask); 1505 else /* Special ras_error_inject is defined (e.g: xgmi) */ 1506 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info, 1507 info->instance_mask); 1508 } else { 1509 /* default path */ 1510 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask); 1511 } 1512 1513 if (ret) 1514 dev_err(adev->dev, "ras inject %s failed %d\n", 1515 get_ras_block_str(&info->head), ret); 1516 1517 return ret; 1518 } 1519 1520 /** 1521 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP 1522 * @adev: pointer to AMD GPU device 1523 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1524 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors. 1525 * @query_info: pointer to ras_query_if 1526 * 1527 * Return 0 for query success or do nothing, otherwise return an error 1528 * on failures 1529 */ 1530 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev, 1531 unsigned long *ce_count, 1532 unsigned long *ue_count, 1533 struct ras_query_if *query_info) 1534 { 1535 int ret; 1536 1537 if (!query_info) 1538 /* do nothing if query_info is not specified */ 1539 return 0; 1540 1541 ret = amdgpu_ras_query_error_status(adev, query_info); 1542 if (ret) 1543 return ret; 1544 1545 *ce_count += query_info->ce_count; 1546 *ue_count += query_info->ue_count; 1547 1548 /* some hardware/IP supports read to clear 1549 * no need to explictly reset the err status after the query call */ 1550 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 1551 amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 1552 if (amdgpu_ras_reset_error_status(adev, query_info->head.block)) 1553 dev_warn(adev->dev, 1554 "Failed to reset error counter and error status\n"); 1555 } 1556 1557 return 0; 1558 } 1559 1560 /** 1561 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP 1562 * @adev: pointer to AMD GPU device 1563 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1564 * @ue_count: pointer to an integer to be set to the count of uncorrectible 1565 * errors. 1566 * @query_info: pointer to ras_query_if if the query request is only for 1567 * specific ip block; if info is NULL, then the qurey request is for 1568 * all the ip blocks that support query ras error counters/status 1569 * 1570 * If set, @ce_count or @ue_count, count and return the corresponding 1571 * error counts in those integer pointers. Return 0 if the device 1572 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. 1573 */ 1574 int amdgpu_ras_query_error_count(struct amdgpu_device *adev, 1575 unsigned long *ce_count, 1576 unsigned long *ue_count, 1577 struct ras_query_if *query_info) 1578 { 1579 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1580 struct ras_manager *obj; 1581 unsigned long ce, ue; 1582 int ret; 1583 1584 if (!adev->ras_enabled || !con) 1585 return -EOPNOTSUPP; 1586 1587 /* Don't count since no reporting. 1588 */ 1589 if (!ce_count && !ue_count) 1590 return 0; 1591 1592 ce = 0; 1593 ue = 0; 1594 if (!query_info) { 1595 /* query all the ip blocks that support ras query interface */ 1596 list_for_each_entry(obj, &con->head, node) { 1597 struct ras_query_if info = { 1598 .head = obj->head, 1599 }; 1600 1601 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info); 1602 } 1603 } else { 1604 /* query specific ip block */ 1605 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info); 1606 } 1607 1608 if (ret) 1609 return ret; 1610 1611 if (ce_count) 1612 *ce_count = ce; 1613 1614 if (ue_count) 1615 *ue_count = ue; 1616 1617 return 0; 1618 } 1619 /* query/inject/cure end */ 1620 1621 1622 /* sysfs begin */ 1623 1624 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1625 struct ras_badpage **bps, unsigned int *count); 1626 1627 static char *amdgpu_ras_badpage_flags_str(unsigned int flags) 1628 { 1629 switch (flags) { 1630 case AMDGPU_RAS_RETIRE_PAGE_RESERVED: 1631 return "R"; 1632 case AMDGPU_RAS_RETIRE_PAGE_PENDING: 1633 return "P"; 1634 case AMDGPU_RAS_RETIRE_PAGE_FAULT: 1635 default: 1636 return "F"; 1637 } 1638 } 1639 1640 /** 1641 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface 1642 * 1643 * It allows user to read the bad pages of vram on the gpu through 1644 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages 1645 * 1646 * It outputs multiple lines, and each line stands for one gpu page. 1647 * 1648 * The format of one line is below, 1649 * gpu pfn : gpu page size : flags 1650 * 1651 * gpu pfn and gpu page size are printed in hex format. 1652 * flags can be one of below character, 1653 * 1654 * R: reserved, this gpu page is reserved and not able to use. 1655 * 1656 * P: pending for reserve, this gpu page is marked as bad, will be reserved 1657 * in next window of page_reserve. 1658 * 1659 * F: unable to reserve. this gpu page can't be reserved due to some reasons. 1660 * 1661 * Examples: 1662 * 1663 * .. code-block:: bash 1664 * 1665 * 0x00000001 : 0x00001000 : R 1666 * 0x00000002 : 0x00001000 : P 1667 * 1668 */ 1669 1670 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, 1671 struct kobject *kobj, struct bin_attribute *attr, 1672 char *buf, loff_t ppos, size_t count) 1673 { 1674 struct amdgpu_ras *con = 1675 container_of(attr, struct amdgpu_ras, badpages_attr); 1676 struct amdgpu_device *adev = con->adev; 1677 const unsigned int element_size = 1678 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1; 1679 unsigned int start = div64_ul(ppos + element_size - 1, element_size); 1680 unsigned int end = div64_ul(ppos + count - 1, element_size); 1681 ssize_t s = 0; 1682 struct ras_badpage *bps = NULL; 1683 unsigned int bps_count = 0; 1684 1685 memset(buf, 0, count); 1686 1687 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) 1688 return 0; 1689 1690 for (; start < end && start < bps_count; start++) 1691 s += scnprintf(&buf[s], element_size + 1, 1692 "0x%08x : 0x%08x : %1s\n", 1693 bps[start].bp, 1694 bps[start].size, 1695 amdgpu_ras_badpage_flags_str(bps[start].flags)); 1696 1697 kfree(bps); 1698 1699 return s; 1700 } 1701 1702 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, 1703 struct device_attribute *attr, char *buf) 1704 { 1705 struct amdgpu_ras *con = 1706 container_of(attr, struct amdgpu_ras, features_attr); 1707 1708 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); 1709 } 1710 1711 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev, 1712 struct device_attribute *attr, char *buf) 1713 { 1714 struct amdgpu_ras *con = 1715 container_of(attr, struct amdgpu_ras, version_attr); 1716 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); 1717 } 1718 1719 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev, 1720 struct device_attribute *attr, char *buf) 1721 { 1722 struct amdgpu_ras *con = 1723 container_of(attr, struct amdgpu_ras, schema_attr); 1724 return sysfs_emit(buf, "schema: 0x%x\n", con->schema); 1725 } 1726 1727 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) 1728 { 1729 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1730 1731 if (adev->dev->kobj.sd) 1732 sysfs_remove_file_from_group(&adev->dev->kobj, 1733 &con->badpages_attr.attr, 1734 RAS_FS_NAME); 1735 } 1736 1737 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev) 1738 { 1739 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1740 struct attribute *attrs[] = { 1741 &con->features_attr.attr, 1742 &con->version_attr.attr, 1743 &con->schema_attr.attr, 1744 NULL 1745 }; 1746 struct attribute_group group = { 1747 .name = RAS_FS_NAME, 1748 .attrs = attrs, 1749 }; 1750 1751 if (adev->dev->kobj.sd) 1752 sysfs_remove_group(&adev->dev->kobj, &group); 1753 1754 return 0; 1755 } 1756 1757 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, 1758 struct ras_common_if *head) 1759 { 1760 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1761 1762 if (amdgpu_aca_is_enabled(adev)) 1763 return 0; 1764 1765 if (!obj || obj->attr_inuse) 1766 return -EINVAL; 1767 1768 get_obj(obj); 1769 1770 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name), 1771 "%s_err_count", head->name); 1772 1773 obj->sysfs_attr = (struct device_attribute){ 1774 .attr = { 1775 .name = obj->fs_data.sysfs_name, 1776 .mode = S_IRUGO, 1777 }, 1778 .show = amdgpu_ras_sysfs_read, 1779 }; 1780 sysfs_attr_init(&obj->sysfs_attr.attr); 1781 1782 if (sysfs_add_file_to_group(&adev->dev->kobj, 1783 &obj->sysfs_attr.attr, 1784 RAS_FS_NAME)) { 1785 put_obj(obj); 1786 return -EINVAL; 1787 } 1788 1789 obj->attr_inuse = 1; 1790 1791 return 0; 1792 } 1793 1794 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, 1795 struct ras_common_if *head) 1796 { 1797 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1798 1799 if (amdgpu_aca_is_enabled(adev)) 1800 return 0; 1801 1802 if (!obj || !obj->attr_inuse) 1803 return -EINVAL; 1804 1805 if (adev->dev->kobj.sd) 1806 sysfs_remove_file_from_group(&adev->dev->kobj, 1807 &obj->sysfs_attr.attr, 1808 RAS_FS_NAME); 1809 obj->attr_inuse = 0; 1810 put_obj(obj); 1811 1812 return 0; 1813 } 1814 1815 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) 1816 { 1817 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1818 struct ras_manager *obj, *tmp; 1819 1820 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1821 amdgpu_ras_sysfs_remove(adev, &obj->head); 1822 } 1823 1824 if (amdgpu_bad_page_threshold != 0) 1825 amdgpu_ras_sysfs_remove_bad_page_node(adev); 1826 1827 amdgpu_ras_sysfs_remove_dev_attr_node(adev); 1828 1829 return 0; 1830 } 1831 /* sysfs end */ 1832 1833 /** 1834 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors 1835 * 1836 * Normally when there is an uncorrectable error, the driver will reset 1837 * the GPU to recover. However, in the event of an unrecoverable error, 1838 * the driver provides an interface to reboot the system automatically 1839 * in that event. 1840 * 1841 * The following file in debugfs provides that interface: 1842 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot 1843 * 1844 * Usage: 1845 * 1846 * .. code-block:: bash 1847 * 1848 * echo true > .../ras/auto_reboot 1849 * 1850 */ 1851 /* debugfs begin */ 1852 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) 1853 { 1854 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1855 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; 1856 struct drm_minor *minor = adev_to_drm(adev)->primary; 1857 struct dentry *dir; 1858 1859 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); 1860 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev, 1861 &amdgpu_ras_debugfs_ctrl_ops); 1862 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, 1863 &amdgpu_ras_debugfs_eeprom_ops); 1864 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, 1865 &con->bad_page_cnt_threshold); 1866 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs); 1867 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled); 1868 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled); 1869 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev, 1870 &amdgpu_ras_debugfs_eeprom_size_ops); 1871 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", 1872 S_IRUGO, dir, adev, 1873 &amdgpu_ras_debugfs_eeprom_table_ops); 1874 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); 1875 1876 /* 1877 * After one uncorrectable error happens, usually GPU recovery will 1878 * be scheduled. But due to the known problem in GPU recovery failing 1879 * to bring GPU back, below interface provides one direct way to 1880 * user to reboot system automatically in such case within 1881 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine 1882 * will never be called. 1883 */ 1884 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); 1885 1886 /* 1887 * User could set this not to clean up hardware's error count register 1888 * of RAS IPs during ras recovery. 1889 */ 1890 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir, 1891 &con->disable_ras_err_cnt_harvest); 1892 return dir; 1893 } 1894 1895 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, 1896 struct ras_fs_if *head, 1897 struct dentry *dir) 1898 { 1899 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); 1900 1901 if (!obj || !dir) 1902 return; 1903 1904 get_obj(obj); 1905 1906 memcpy(obj->fs_data.debugfs_name, 1907 head->debugfs_name, 1908 sizeof(obj->fs_data.debugfs_name)); 1909 1910 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir, 1911 obj, &amdgpu_ras_debugfs_ops); 1912 } 1913 1914 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev) 1915 { 1916 bool ret; 1917 1918 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 1919 case IP_VERSION(13, 0, 6): 1920 case IP_VERSION(13, 0, 14): 1921 ret = true; 1922 break; 1923 default: 1924 ret = false; 1925 break; 1926 } 1927 1928 return ret; 1929 } 1930 1931 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) 1932 { 1933 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1934 struct dentry *dir; 1935 struct ras_manager *obj; 1936 struct ras_fs_if fs_info; 1937 1938 /* 1939 * it won't be called in resume path, no need to check 1940 * suspend and gpu reset status 1941 */ 1942 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) 1943 return; 1944 1945 dir = amdgpu_ras_debugfs_create_ctrl_node(adev); 1946 1947 list_for_each_entry(obj, &con->head, node) { 1948 if (amdgpu_ras_is_supported(adev, obj->head.block) && 1949 (obj->attr_inuse == 1)) { 1950 sprintf(fs_info.debugfs_name, "%s_err_inject", 1951 get_ras_block_str(&obj->head)); 1952 fs_info.head = obj->head; 1953 amdgpu_ras_debugfs_create(adev, &fs_info, dir); 1954 } 1955 } 1956 1957 if (amdgpu_ras_aca_is_supported(adev)) { 1958 if (amdgpu_aca_is_enabled(adev)) 1959 amdgpu_aca_smu_debugfs_init(adev, dir); 1960 else 1961 amdgpu_mca_smu_debugfs_init(adev, dir); 1962 } 1963 } 1964 1965 /* debugfs end */ 1966 1967 /* ras fs */ 1968 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, 1969 amdgpu_ras_sysfs_badpages_read, NULL, 0); 1970 static DEVICE_ATTR(features, S_IRUGO, 1971 amdgpu_ras_sysfs_features_read, NULL); 1972 static DEVICE_ATTR(version, 0444, 1973 amdgpu_ras_sysfs_version_show, NULL); 1974 static DEVICE_ATTR(schema, 0444, 1975 amdgpu_ras_sysfs_schema_show, NULL); 1976 static int amdgpu_ras_fs_init(struct amdgpu_device *adev) 1977 { 1978 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1979 struct attribute_group group = { 1980 .name = RAS_FS_NAME, 1981 }; 1982 struct attribute *attrs[] = { 1983 &con->features_attr.attr, 1984 &con->version_attr.attr, 1985 &con->schema_attr.attr, 1986 NULL 1987 }; 1988 struct bin_attribute *bin_attrs[] = { 1989 NULL, 1990 NULL, 1991 }; 1992 int r; 1993 1994 group.attrs = attrs; 1995 1996 /* add features entry */ 1997 con->features_attr = dev_attr_features; 1998 sysfs_attr_init(attrs[0]); 1999 2000 /* add version entry */ 2001 con->version_attr = dev_attr_version; 2002 sysfs_attr_init(attrs[1]); 2003 2004 /* add schema entry */ 2005 con->schema_attr = dev_attr_schema; 2006 sysfs_attr_init(attrs[2]); 2007 2008 if (amdgpu_bad_page_threshold != 0) { 2009 /* add bad_page_features entry */ 2010 bin_attr_gpu_vram_bad_pages.private = NULL; 2011 con->badpages_attr = bin_attr_gpu_vram_bad_pages; 2012 bin_attrs[0] = &con->badpages_attr; 2013 group.bin_attrs = bin_attrs; 2014 sysfs_bin_attr_init(bin_attrs[0]); 2015 } 2016 2017 r = sysfs_create_group(&adev->dev->kobj, &group); 2018 if (r) 2019 dev_err(adev->dev, "Failed to create RAS sysfs group!"); 2020 2021 return 0; 2022 } 2023 2024 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) 2025 { 2026 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2027 struct ras_manager *con_obj, *ip_obj, *tmp; 2028 2029 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 2030 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { 2031 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); 2032 if (ip_obj) 2033 put_obj(ip_obj); 2034 } 2035 } 2036 2037 amdgpu_ras_sysfs_remove_all(adev); 2038 return 0; 2039 } 2040 /* ras fs end */ 2041 2042 /* ih begin */ 2043 2044 /* For the hardware that cannot enable bif ring for both ras_controller_irq 2045 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status 2046 * register to check whether the interrupt is triggered or not, and properly 2047 * ack the interrupt if it is there 2048 */ 2049 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) 2050 { 2051 /* Fatal error events are handled on host side */ 2052 if (amdgpu_sriov_vf(adev)) 2053 return; 2054 2055 if (adev->nbio.ras && 2056 adev->nbio.ras->handle_ras_controller_intr_no_bifring) 2057 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); 2058 2059 if (adev->nbio.ras && 2060 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring) 2061 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev); 2062 } 2063 2064 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj, 2065 struct amdgpu_iv_entry *entry) 2066 { 2067 bool poison_stat = false; 2068 struct amdgpu_device *adev = obj->adev; 2069 struct amdgpu_ras_block_object *block_obj = 2070 amdgpu_ras_get_ras_block(adev, obj->head.block, 0); 2071 2072 if (!block_obj) 2073 return; 2074 2075 /* both query_poison_status and handle_poison_consumption are optional, 2076 * but at least one of them should be implemented if we need poison 2077 * consumption handler 2078 */ 2079 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) { 2080 poison_stat = block_obj->hw_ops->query_poison_status(adev); 2081 if (!poison_stat) { 2082 /* Not poison consumption interrupt, no need to handle it */ 2083 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n", 2084 block_obj->ras_comm.name); 2085 2086 return; 2087 } 2088 } 2089 2090 amdgpu_umc_poison_handler(adev, obj->head.block, 0); 2091 2092 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption) 2093 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); 2094 2095 /* gpu reset is fallback for failed and default cases */ 2096 if (poison_stat) { 2097 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n", 2098 block_obj->ras_comm.name); 2099 amdgpu_ras_reset_gpu(adev); 2100 } else { 2101 amdgpu_gfx_poison_consumption_handler(adev, entry); 2102 } 2103 } 2104 2105 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj, 2106 struct amdgpu_iv_entry *entry) 2107 { 2108 dev_info(obj->adev->dev, 2109 "Poison is created\n"); 2110 2111 if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) { 2112 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); 2113 2114 amdgpu_ras_put_poison_req(obj->adev, 2115 AMDGPU_RAS_BLOCK__UMC, 0, NULL, NULL, false); 2116 2117 atomic_inc(&con->page_retirement_req_cnt); 2118 2119 wake_up(&con->page_retirement_wq); 2120 } 2121 } 2122 2123 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, 2124 struct amdgpu_iv_entry *entry) 2125 { 2126 struct ras_ih_data *data = &obj->ih_data; 2127 struct ras_err_data err_data; 2128 int ret; 2129 2130 if (!data->cb) 2131 return; 2132 2133 ret = amdgpu_ras_error_data_init(&err_data); 2134 if (ret) 2135 return; 2136 2137 /* Let IP handle its data, maybe we need get the output 2138 * from the callback to update the error type/count, etc 2139 */ 2140 ret = data->cb(obj->adev, &err_data, entry); 2141 /* ue will trigger an interrupt, and in that case 2142 * we need do a reset to recovery the whole system. 2143 * But leave IP do that recovery, here we just dispatch 2144 * the error. 2145 */ 2146 if (ret == AMDGPU_RAS_SUCCESS) { 2147 /* these counts could be left as 0 if 2148 * some blocks do not count error number 2149 */ 2150 obj->err_data.ue_count += err_data.ue_count; 2151 obj->err_data.ce_count += err_data.ce_count; 2152 obj->err_data.de_count += err_data.de_count; 2153 } 2154 2155 amdgpu_ras_error_data_fini(&err_data); 2156 } 2157 2158 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) 2159 { 2160 struct ras_ih_data *data = &obj->ih_data; 2161 struct amdgpu_iv_entry entry; 2162 2163 while (data->rptr != data->wptr) { 2164 rmb(); 2165 memcpy(&entry, &data->ring[data->rptr], 2166 data->element_size); 2167 2168 wmb(); 2169 data->rptr = (data->aligned_element_size + 2170 data->rptr) % data->ring_size; 2171 2172 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) { 2173 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 2174 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry); 2175 else 2176 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry); 2177 } else { 2178 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 2179 amdgpu_ras_interrupt_umc_handler(obj, &entry); 2180 else 2181 dev_warn(obj->adev->dev, 2182 "No RAS interrupt handler for non-UMC block with poison disabled.\n"); 2183 } 2184 } 2185 } 2186 2187 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) 2188 { 2189 struct ras_ih_data *data = 2190 container_of(work, struct ras_ih_data, ih_work); 2191 struct ras_manager *obj = 2192 container_of(data, struct ras_manager, ih_data); 2193 2194 amdgpu_ras_interrupt_handler(obj); 2195 } 2196 2197 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, 2198 struct ras_dispatch_if *info) 2199 { 2200 struct ras_manager *obj; 2201 struct ras_ih_data *data; 2202 2203 obj = amdgpu_ras_find_obj(adev, &info->head); 2204 if (!obj) 2205 return -EINVAL; 2206 2207 data = &obj->ih_data; 2208 2209 if (data->inuse == 0) 2210 return 0; 2211 2212 /* Might be overflow... */ 2213 memcpy(&data->ring[data->wptr], info->entry, 2214 data->element_size); 2215 2216 wmb(); 2217 data->wptr = (data->aligned_element_size + 2218 data->wptr) % data->ring_size; 2219 2220 schedule_work(&data->ih_work); 2221 2222 return 0; 2223 } 2224 2225 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, 2226 struct ras_common_if *head) 2227 { 2228 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 2229 struct ras_ih_data *data; 2230 2231 if (!obj) 2232 return -EINVAL; 2233 2234 data = &obj->ih_data; 2235 if (data->inuse == 0) 2236 return 0; 2237 2238 cancel_work_sync(&data->ih_work); 2239 2240 kfree(data->ring); 2241 memset(data, 0, sizeof(*data)); 2242 put_obj(obj); 2243 2244 return 0; 2245 } 2246 2247 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, 2248 struct ras_common_if *head) 2249 { 2250 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 2251 struct ras_ih_data *data; 2252 struct amdgpu_ras_block_object *ras_obj; 2253 2254 if (!obj) { 2255 /* in case we registe the IH before enable ras feature */ 2256 obj = amdgpu_ras_create_obj(adev, head); 2257 if (!obj) 2258 return -EINVAL; 2259 } else 2260 get_obj(obj); 2261 2262 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm); 2263 2264 data = &obj->ih_data; 2265 /* add the callback.etc */ 2266 *data = (struct ras_ih_data) { 2267 .inuse = 0, 2268 .cb = ras_obj->ras_cb, 2269 .element_size = sizeof(struct amdgpu_iv_entry), 2270 .rptr = 0, 2271 .wptr = 0, 2272 }; 2273 2274 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); 2275 2276 data->aligned_element_size = ALIGN(data->element_size, 8); 2277 /* the ring can store 64 iv entries. */ 2278 data->ring_size = 64 * data->aligned_element_size; 2279 data->ring = kmalloc(data->ring_size, GFP_KERNEL); 2280 if (!data->ring) { 2281 put_obj(obj); 2282 return -ENOMEM; 2283 } 2284 2285 /* IH is ready */ 2286 data->inuse = 1; 2287 2288 return 0; 2289 } 2290 2291 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) 2292 { 2293 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2294 struct ras_manager *obj, *tmp; 2295 2296 list_for_each_entry_safe(obj, tmp, &con->head, node) { 2297 amdgpu_ras_interrupt_remove_handler(adev, &obj->head); 2298 } 2299 2300 return 0; 2301 } 2302 /* ih end */ 2303 2304 /* traversal all IPs except NBIO to query error counter */ 2305 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) 2306 { 2307 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2308 struct ras_manager *obj; 2309 2310 if (!adev->ras_enabled || !con) 2311 return; 2312 2313 list_for_each_entry(obj, &con->head, node) { 2314 struct ras_query_if info = { 2315 .head = obj->head, 2316 }; 2317 2318 /* 2319 * PCIE_BIF IP has one different isr by ras controller 2320 * interrupt, the specific ras counter query will be 2321 * done in that isr. So skip such block from common 2322 * sync flood interrupt isr calling. 2323 */ 2324 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) 2325 continue; 2326 2327 /* 2328 * this is a workaround for aldebaran, skip send msg to 2329 * smu to get ecc_info table due to smu handle get ecc 2330 * info table failed temporarily. 2331 * should be removed until smu fix handle ecc_info table. 2332 */ 2333 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && 2334 (amdgpu_ip_version(adev, MP1_HWIP, 0) == 2335 IP_VERSION(13, 0, 2))) 2336 continue; 2337 2338 amdgpu_ras_query_error_status(adev, &info); 2339 2340 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != 2341 IP_VERSION(11, 0, 2) && 2342 amdgpu_ip_version(adev, MP0_HWIP, 0) != 2343 IP_VERSION(11, 0, 4) && 2344 amdgpu_ip_version(adev, MP0_HWIP, 0) != 2345 IP_VERSION(13, 0, 0)) { 2346 if (amdgpu_ras_reset_error_status(adev, info.head.block)) 2347 dev_warn(adev->dev, "Failed to reset error counter and error status"); 2348 } 2349 } 2350 } 2351 2352 /* Parse RdRspStatus and WrRspStatus */ 2353 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, 2354 struct ras_query_if *info) 2355 { 2356 struct amdgpu_ras_block_object *block_obj; 2357 /* 2358 * Only two block need to query read/write 2359 * RspStatus at current state 2360 */ 2361 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) && 2362 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB)) 2363 return; 2364 2365 block_obj = amdgpu_ras_get_ras_block(adev, 2366 info->head.block, 2367 info->head.sub_block_index); 2368 2369 if (!block_obj || !block_obj->hw_ops) { 2370 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 2371 get_ras_block_str(&info->head)); 2372 return; 2373 } 2374 2375 if (block_obj->hw_ops->query_ras_error_status) 2376 block_obj->hw_ops->query_ras_error_status(adev); 2377 2378 } 2379 2380 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) 2381 { 2382 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2383 struct ras_manager *obj; 2384 2385 if (!adev->ras_enabled || !con) 2386 return; 2387 2388 list_for_each_entry(obj, &con->head, node) { 2389 struct ras_query_if info = { 2390 .head = obj->head, 2391 }; 2392 2393 amdgpu_ras_error_status_query(adev, &info); 2394 } 2395 } 2396 2397 /* recovery begin */ 2398 2399 /* return 0 on success. 2400 * caller need free bps. 2401 */ 2402 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 2403 struct ras_badpage **bps, unsigned int *count) 2404 { 2405 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2406 struct ras_err_handler_data *data; 2407 int i = 0; 2408 int ret = 0, status; 2409 2410 if (!con || !con->eh_data || !bps || !count) 2411 return -EINVAL; 2412 2413 mutex_lock(&con->recovery_lock); 2414 data = con->eh_data; 2415 if (!data || data->count == 0) { 2416 *bps = NULL; 2417 ret = -EINVAL; 2418 goto out; 2419 } 2420 2421 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL); 2422 if (!*bps) { 2423 ret = -ENOMEM; 2424 goto out; 2425 } 2426 2427 for (; i < data->count; i++) { 2428 (*bps)[i] = (struct ras_badpage){ 2429 .bp = data->bps[i].retired_page, 2430 .size = AMDGPU_GPU_PAGE_SIZE, 2431 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, 2432 }; 2433 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, 2434 data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT); 2435 if (status == -EBUSY) 2436 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; 2437 else if (status == -ENOENT) 2438 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; 2439 } 2440 2441 *count = data->count; 2442 out: 2443 mutex_unlock(&con->recovery_lock); 2444 return ret; 2445 } 2446 2447 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev, 2448 struct amdgpu_hive_info *hive, bool status) 2449 { 2450 struct amdgpu_device *tmp_adev; 2451 2452 if (hive) { 2453 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 2454 amdgpu_ras_set_fed(tmp_adev, status); 2455 } else { 2456 amdgpu_ras_set_fed(adev, status); 2457 } 2458 } 2459 2460 static void amdgpu_ras_do_recovery(struct work_struct *work) 2461 { 2462 struct amdgpu_ras *ras = 2463 container_of(work, struct amdgpu_ras, recovery_work); 2464 struct amdgpu_device *remote_adev = NULL; 2465 struct amdgpu_device *adev = ras->adev; 2466 struct list_head device_list, *device_list_handle = NULL; 2467 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2468 2469 if (hive) { 2470 atomic_set(&hive->ras_recovery, 1); 2471 2472 /* If any device which is part of the hive received RAS fatal 2473 * error interrupt, set fatal error status on all. This 2474 * condition will need a recovery, and flag will be cleared 2475 * as part of recovery. 2476 */ 2477 list_for_each_entry(remote_adev, &hive->device_list, 2478 gmc.xgmi.head) 2479 if (amdgpu_ras_get_fed_status(remote_adev)) { 2480 amdgpu_ras_set_fed_all(adev, hive, true); 2481 break; 2482 } 2483 } 2484 if (!ras->disable_ras_err_cnt_harvest) { 2485 2486 /* Build list of devices to query RAS related errors */ 2487 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { 2488 device_list_handle = &hive->device_list; 2489 } else { 2490 INIT_LIST_HEAD(&device_list); 2491 list_add_tail(&adev->gmc.xgmi.head, &device_list); 2492 device_list_handle = &device_list; 2493 } 2494 2495 list_for_each_entry(remote_adev, 2496 device_list_handle, gmc.xgmi.head) { 2497 amdgpu_ras_query_err_status(remote_adev); 2498 amdgpu_ras_log_on_err_counter(remote_adev); 2499 } 2500 2501 } 2502 2503 if (amdgpu_device_should_recover_gpu(ras->adev)) { 2504 struct amdgpu_reset_context reset_context; 2505 memset(&reset_context, 0, sizeof(reset_context)); 2506 2507 reset_context.method = AMD_RESET_METHOD_NONE; 2508 reset_context.reset_req_dev = adev; 2509 reset_context.src = AMDGPU_RESET_SRC_RAS; 2510 2511 /* Perform full reset in fatal error mode */ 2512 if (!amdgpu_ras_is_poison_mode_supported(ras->adev)) 2513 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2514 else { 2515 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2516 2517 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) { 2518 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET; 2519 reset_context.method = AMD_RESET_METHOD_MODE2; 2520 } 2521 2522 /* Fatal error occurs in poison mode, mode1 reset is used to 2523 * recover gpu. 2524 */ 2525 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) { 2526 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET; 2527 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2528 2529 psp_fatal_error_recovery_quirk(&adev->psp); 2530 } 2531 } 2532 2533 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); 2534 } 2535 atomic_set(&ras->in_recovery, 0); 2536 if (hive) { 2537 atomic_set(&hive->ras_recovery, 0); 2538 amdgpu_put_xgmi_hive(hive); 2539 } 2540 } 2541 2542 /* alloc/realloc bps array */ 2543 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, 2544 struct ras_err_handler_data *data, int pages) 2545 { 2546 unsigned int old_space = data->count + data->space_left; 2547 unsigned int new_space = old_space + pages; 2548 unsigned int align_space = ALIGN(new_space, 512); 2549 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); 2550 2551 if (!bps) { 2552 return -ENOMEM; 2553 } 2554 2555 if (data->bps) { 2556 memcpy(bps, data->bps, 2557 data->count * sizeof(*data->bps)); 2558 kfree(data->bps); 2559 } 2560 2561 data->bps = bps; 2562 data->space_left += align_space - old_space; 2563 return 0; 2564 } 2565 2566 /* it deal with vram only. */ 2567 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, 2568 struct eeprom_table_record *bps, int pages) 2569 { 2570 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2571 struct ras_err_handler_data *data; 2572 int ret = 0; 2573 uint32_t i; 2574 2575 if (!con || !con->eh_data || !bps || pages <= 0) 2576 return 0; 2577 2578 mutex_lock(&con->recovery_lock); 2579 data = con->eh_data; 2580 if (!data) 2581 goto out; 2582 2583 for (i = 0; i < pages; i++) { 2584 if (amdgpu_ras_check_bad_page_unlock(con, 2585 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) 2586 continue; 2587 2588 if (!data->space_left && 2589 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { 2590 ret = -ENOMEM; 2591 goto out; 2592 } 2593 2594 amdgpu_ras_reserve_page(adev, bps[i].retired_page); 2595 2596 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps)); 2597 data->count++; 2598 data->space_left--; 2599 } 2600 out: 2601 mutex_unlock(&con->recovery_lock); 2602 2603 return ret; 2604 } 2605 2606 /* 2607 * write error record array to eeprom, the function should be 2608 * protected by recovery_lock 2609 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL 2610 */ 2611 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, 2612 unsigned long *new_cnt) 2613 { 2614 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2615 struct ras_err_handler_data *data; 2616 struct amdgpu_ras_eeprom_control *control; 2617 int save_count; 2618 2619 if (!con || !con->eh_data) { 2620 if (new_cnt) 2621 *new_cnt = 0; 2622 2623 return 0; 2624 } 2625 2626 mutex_lock(&con->recovery_lock); 2627 control = &con->eeprom_control; 2628 data = con->eh_data; 2629 save_count = data->count - control->ras_num_recs; 2630 mutex_unlock(&con->recovery_lock); 2631 2632 if (new_cnt) 2633 *new_cnt = save_count / adev->umc.retire_unit; 2634 2635 /* only new entries are saved */ 2636 if (save_count > 0) { 2637 if (amdgpu_ras_eeprom_append(control, 2638 &data->bps[control->ras_num_recs], 2639 save_count)) { 2640 dev_err(adev->dev, "Failed to save EEPROM table data!"); 2641 return -EIO; 2642 } 2643 2644 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); 2645 } 2646 2647 return 0; 2648 } 2649 2650 /* 2651 * read error record array in eeprom and reserve enough space for 2652 * storing new bad pages 2653 */ 2654 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) 2655 { 2656 struct amdgpu_ras_eeprom_control *control = 2657 &adev->psp.ras_context.ras->eeprom_control; 2658 struct eeprom_table_record *bps; 2659 int ret; 2660 2661 /* no bad page record, skip eeprom access */ 2662 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0) 2663 return 0; 2664 2665 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL); 2666 if (!bps) 2667 return -ENOMEM; 2668 2669 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs); 2670 if (ret) 2671 dev_err(adev->dev, "Failed to load EEPROM table records!"); 2672 else 2673 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs); 2674 2675 kfree(bps); 2676 return ret; 2677 } 2678 2679 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 2680 uint64_t addr) 2681 { 2682 struct ras_err_handler_data *data = con->eh_data; 2683 int i; 2684 2685 addr >>= AMDGPU_GPU_PAGE_SHIFT; 2686 for (i = 0; i < data->count; i++) 2687 if (addr == data->bps[i].retired_page) 2688 return true; 2689 2690 return false; 2691 } 2692 2693 /* 2694 * check if an address belongs to bad page 2695 * 2696 * Note: this check is only for umc block 2697 */ 2698 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 2699 uint64_t addr) 2700 { 2701 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2702 bool ret = false; 2703 2704 if (!con || !con->eh_data) 2705 return ret; 2706 2707 mutex_lock(&con->recovery_lock); 2708 ret = amdgpu_ras_check_bad_page_unlock(con, addr); 2709 mutex_unlock(&con->recovery_lock); 2710 return ret; 2711 } 2712 2713 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, 2714 uint32_t max_count) 2715 { 2716 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2717 2718 /* 2719 * Justification of value bad_page_cnt_threshold in ras structure 2720 * 2721 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length 2722 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two 2723 * scenarios accordingly. 2724 * 2725 * Bad page retirement enablement: 2726 * - If amdgpu_bad_page_threshold = -2, 2727 * bad_page_cnt_threshold = typical value by formula. 2728 * 2729 * - When the value from user is 0 < amdgpu_bad_page_threshold < 2730 * max record length in eeprom, use it directly. 2731 * 2732 * Bad page retirement disablement: 2733 * - If amdgpu_bad_page_threshold = 0, bad page retirement 2734 * functionality is disabled, and bad_page_cnt_threshold will 2735 * take no effect. 2736 */ 2737 2738 if (amdgpu_bad_page_threshold < 0) { 2739 u64 val = adev->gmc.mc_vram_size; 2740 2741 do_div(val, RAS_BAD_PAGE_COVER); 2742 con->bad_page_cnt_threshold = min(lower_32_bits(val), 2743 max_count); 2744 } else { 2745 con->bad_page_cnt_threshold = min_t(int, max_count, 2746 amdgpu_bad_page_threshold); 2747 } 2748 } 2749 2750 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev, 2751 enum amdgpu_ras_block block, uint16_t pasid, 2752 pasid_notify pasid_fn, void *data, uint32_t reset) 2753 { 2754 int ret = 0; 2755 struct ras_poison_msg poison_msg; 2756 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2757 2758 memset(&poison_msg, 0, sizeof(poison_msg)); 2759 poison_msg.block = block; 2760 poison_msg.pasid = pasid; 2761 poison_msg.reset = reset; 2762 poison_msg.pasid_fn = pasid_fn; 2763 poison_msg.data = data; 2764 2765 ret = kfifo_put(&con->poison_fifo, poison_msg); 2766 if (!ret) { 2767 dev_err(adev->dev, "Poison message fifo is full!\n"); 2768 return -ENOSPC; 2769 } 2770 2771 return 0; 2772 } 2773 2774 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev, 2775 struct ras_poison_msg *poison_msg) 2776 { 2777 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2778 2779 return kfifo_get(&con->poison_fifo, poison_msg); 2780 } 2781 2782 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log) 2783 { 2784 mutex_init(&ecc_log->lock); 2785 2786 /* Set any value as siphash key */ 2787 memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key)); 2788 2789 INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL); 2790 ecc_log->de_updated = false; 2791 } 2792 2793 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log) 2794 { 2795 struct radix_tree_iter iter; 2796 void __rcu **slot; 2797 struct ras_ecc_err *ecc_err; 2798 2799 mutex_lock(&ecc_log->lock); 2800 radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) { 2801 ecc_err = radix_tree_deref_slot(slot); 2802 kfree(ecc_err->err_pages.pfn); 2803 kfree(ecc_err); 2804 radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot); 2805 } 2806 mutex_unlock(&ecc_log->lock); 2807 2808 mutex_destroy(&ecc_log->lock); 2809 ecc_log->de_updated = false; 2810 } 2811 2812 static void amdgpu_ras_do_page_retirement(struct work_struct *work) 2813 { 2814 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, 2815 page_retirement_dwork.work); 2816 struct amdgpu_device *adev = con->adev; 2817 struct ras_err_data err_data; 2818 2819 if (amdgpu_in_reset(adev) || atomic_read(&con->in_recovery)) 2820 return; 2821 2822 amdgpu_ras_error_data_init(&err_data); 2823 2824 amdgpu_umc_handle_bad_pages(adev, &err_data); 2825 2826 amdgpu_ras_error_data_fini(&err_data); 2827 2828 mutex_lock(&con->umc_ecc_log.lock); 2829 if (radix_tree_tagged(&con->umc_ecc_log.de_page_tree, 2830 UMC_ECC_NEW_DETECTED_TAG)) 2831 schedule_delayed_work(&con->page_retirement_dwork, 2832 msecs_to_jiffies(AMDGPU_RAS_RETIRE_PAGE_INTERVAL)); 2833 mutex_unlock(&con->umc_ecc_log.lock); 2834 } 2835 2836 static void amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, 2837 uint32_t timeout_ms) 2838 { 2839 int ret = 0; 2840 struct ras_ecc_log_info *ecc_log; 2841 struct ras_query_if info; 2842 uint32_t timeout = timeout_ms; 2843 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 2844 2845 memset(&info, 0, sizeof(info)); 2846 info.head.block = AMDGPU_RAS_BLOCK__UMC; 2847 2848 ecc_log = &ras->umc_ecc_log; 2849 ecc_log->de_updated = false; 2850 do { 2851 ret = amdgpu_ras_query_error_status(adev, &info); 2852 if (ret) { 2853 dev_err(adev->dev, "Failed to query ras error! ret:%d\n", ret); 2854 return; 2855 } 2856 2857 if (timeout && !ecc_log->de_updated) { 2858 msleep(1); 2859 timeout--; 2860 } 2861 } while (timeout && !ecc_log->de_updated); 2862 2863 if (timeout_ms && !timeout) { 2864 dev_warn(adev->dev, "Can't find deferred error\n"); 2865 return; 2866 } 2867 2868 if (!ret) 2869 schedule_delayed_work(&ras->page_retirement_dwork, 0); 2870 } 2871 2872 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev, 2873 struct ras_poison_msg *poison_msg) 2874 { 2875 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2876 uint32_t reset = poison_msg->reset; 2877 uint16_t pasid = poison_msg->pasid; 2878 2879 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 2880 2881 if (poison_msg->pasid_fn) 2882 poison_msg->pasid_fn(adev, pasid, poison_msg->data); 2883 2884 if (reset) { 2885 flush_delayed_work(&con->page_retirement_dwork); 2886 2887 con->gpu_reset_flags |= reset; 2888 amdgpu_ras_reset_gpu(adev); 2889 } 2890 2891 return 0; 2892 } 2893 2894 static int amdgpu_ras_page_retirement_thread(void *param) 2895 { 2896 struct amdgpu_device *adev = (struct amdgpu_device *)param; 2897 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2898 struct ras_poison_msg poison_msg; 2899 enum amdgpu_ras_block ras_block; 2900 bool poison_creation_is_handled = false; 2901 2902 while (!kthread_should_stop()) { 2903 2904 wait_event_interruptible(con->page_retirement_wq, 2905 kthread_should_stop() || 2906 atomic_read(&con->page_retirement_req_cnt)); 2907 2908 if (kthread_should_stop()) 2909 break; 2910 2911 atomic_dec(&con->page_retirement_req_cnt); 2912 2913 if (!amdgpu_ras_get_poison_req(adev, &poison_msg)) 2914 continue; 2915 2916 ras_block = poison_msg.block; 2917 2918 dev_dbg(adev->dev, "Start processing ras block %s(%d)\n", 2919 ras_block_str(ras_block), ras_block); 2920 2921 if (ras_block == AMDGPU_RAS_BLOCK__UMC) { 2922 amdgpu_ras_poison_creation_handler(adev, 2923 MAX_UMC_POISON_POLLING_TIME_ASYNC); 2924 poison_creation_is_handled = true; 2925 } else { 2926 /* poison_creation_is_handled: 2927 * false: no poison creation interrupt, but it has poison 2928 * consumption interrupt. 2929 * true: It has poison creation interrupt at the beginning, 2930 * but it has no poison creation interrupt later. 2931 */ 2932 amdgpu_ras_poison_creation_handler(adev, 2933 poison_creation_is_handled ? 2934 0 : MAX_UMC_POISON_POLLING_TIME_ASYNC); 2935 2936 amdgpu_ras_poison_consumption_handler(adev, &poison_msg); 2937 poison_creation_is_handled = false; 2938 } 2939 } 2940 2941 return 0; 2942 } 2943 2944 int amdgpu_ras_recovery_init(struct amdgpu_device *adev) 2945 { 2946 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2947 struct ras_err_handler_data **data; 2948 u32 max_eeprom_records_count = 0; 2949 int ret; 2950 2951 if (!con || amdgpu_sriov_vf(adev)) 2952 return 0; 2953 2954 /* Allow access to RAS EEPROM via debugfs, when the ASIC 2955 * supports RAS and debugfs is enabled, but when 2956 * adev->ras_enabled is unset, i.e. when "ras_enable" 2957 * module parameter is set to 0. 2958 */ 2959 con->adev = adev; 2960 2961 if (!adev->ras_enabled) 2962 return 0; 2963 2964 data = &con->eh_data; 2965 *data = kzalloc(sizeof(**data), GFP_KERNEL); 2966 if (!*data) { 2967 ret = -ENOMEM; 2968 goto out; 2969 } 2970 2971 mutex_init(&con->recovery_lock); 2972 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); 2973 atomic_set(&con->in_recovery, 0); 2974 con->eeprom_control.bad_channel_bitmap = 0; 2975 2976 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); 2977 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); 2978 2979 /* Todo: During test the SMU might fail to read the eeprom through I2C 2980 * when the GPU is pending on XGMI reset during probe time 2981 * (Mostly after second bus reset), skip it now 2982 */ 2983 if (adev->gmc.xgmi.pending_reset) 2984 return 0; 2985 ret = amdgpu_ras_eeprom_init(&con->eeprom_control); 2986 /* 2987 * This calling fails when is_rma is true or 2988 * ret != 0. 2989 */ 2990 if (con->is_rma || ret) 2991 goto free; 2992 2993 if (con->eeprom_control.ras_num_recs) { 2994 ret = amdgpu_ras_load_bad_pages(adev); 2995 if (ret) 2996 goto free; 2997 2998 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); 2999 3000 if (con->update_channel_flag == true) { 3001 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); 3002 con->update_channel_flag = false; 3003 } 3004 } 3005 3006 mutex_init(&con->page_rsv_lock); 3007 INIT_KFIFO(con->poison_fifo); 3008 mutex_init(&con->page_retirement_lock); 3009 init_waitqueue_head(&con->page_retirement_wq); 3010 atomic_set(&con->page_retirement_req_cnt, 0); 3011 con->page_retirement_thread = 3012 kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement"); 3013 if (IS_ERR(con->page_retirement_thread)) { 3014 con->page_retirement_thread = NULL; 3015 dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n"); 3016 } 3017 3018 INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement); 3019 amdgpu_ras_ecc_log_init(&con->umc_ecc_log); 3020 #ifdef CONFIG_X86_MCE_AMD 3021 if ((adev->asic_type == CHIP_ALDEBARAN) && 3022 (adev->gmc.xgmi.connected_to_cpu)) 3023 amdgpu_register_bad_pages_mca_notifier(adev); 3024 #endif 3025 return 0; 3026 3027 free: 3028 kfree((*data)->bps); 3029 kfree(*data); 3030 con->eh_data = NULL; 3031 out: 3032 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret); 3033 3034 /* 3035 * Except error threshold exceeding case, other failure cases in this 3036 * function would not fail amdgpu driver init. 3037 */ 3038 if (!con->is_rma) 3039 ret = 0; 3040 else 3041 ret = -EINVAL; 3042 3043 return ret; 3044 } 3045 3046 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) 3047 { 3048 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3049 struct ras_err_handler_data *data = con->eh_data; 3050 3051 /* recovery_init failed to init it, fini is useless */ 3052 if (!data) 3053 return 0; 3054 3055 if (con->page_retirement_thread) 3056 kthread_stop(con->page_retirement_thread); 3057 3058 atomic_set(&con->page_retirement_req_cnt, 0); 3059 3060 mutex_destroy(&con->page_rsv_lock); 3061 3062 cancel_work_sync(&con->recovery_work); 3063 3064 cancel_delayed_work_sync(&con->page_retirement_dwork); 3065 3066 amdgpu_ras_ecc_log_fini(&con->umc_ecc_log); 3067 3068 mutex_lock(&con->recovery_lock); 3069 con->eh_data = NULL; 3070 kfree(data->bps); 3071 kfree(data); 3072 mutex_unlock(&con->recovery_lock); 3073 3074 return 0; 3075 } 3076 /* recovery end */ 3077 3078 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) 3079 { 3080 if (amdgpu_sriov_vf(adev)) { 3081 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 3082 case IP_VERSION(13, 0, 2): 3083 case IP_VERSION(13, 0, 6): 3084 case IP_VERSION(13, 0, 14): 3085 return true; 3086 default: 3087 return false; 3088 } 3089 } 3090 3091 if (adev->asic_type == CHIP_IP_DISCOVERY) { 3092 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 3093 case IP_VERSION(13, 0, 0): 3094 case IP_VERSION(13, 0, 6): 3095 case IP_VERSION(13, 0, 10): 3096 case IP_VERSION(13, 0, 14): 3097 return true; 3098 default: 3099 return false; 3100 } 3101 } 3102 3103 return adev->asic_type == CHIP_VEGA10 || 3104 adev->asic_type == CHIP_VEGA20 || 3105 adev->asic_type == CHIP_ARCTURUS || 3106 adev->asic_type == CHIP_ALDEBARAN || 3107 adev->asic_type == CHIP_SIENNA_CICHLID; 3108 } 3109 3110 /* 3111 * this is workaround for vega20 workstation sku, 3112 * force enable gfx ras, ignore vbios gfx ras flag 3113 * due to GC EDC can not write 3114 */ 3115 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) 3116 { 3117 struct atom_context *ctx = adev->mode_info.atom_context; 3118 3119 if (!ctx) 3120 return; 3121 3122 if (strnstr(ctx->vbios_pn, "D16406", 3123 sizeof(ctx->vbios_pn)) || 3124 strnstr(ctx->vbios_pn, "D36002", 3125 sizeof(ctx->vbios_pn))) 3126 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); 3127 } 3128 3129 /* Query ras capablity via atomfirmware interface */ 3130 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev) 3131 { 3132 /* mem_ecc cap */ 3133 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { 3134 dev_info(adev->dev, "MEM ECC is active.\n"); 3135 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | 3136 1 << AMDGPU_RAS_BLOCK__DF); 3137 } else { 3138 dev_info(adev->dev, "MEM ECC is not presented.\n"); 3139 } 3140 3141 /* sram_ecc cap */ 3142 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { 3143 dev_info(adev->dev, "SRAM ECC is active.\n"); 3144 if (!amdgpu_sriov_vf(adev)) 3145 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | 3146 1 << AMDGPU_RAS_BLOCK__DF); 3147 else 3148 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | 3149 1 << AMDGPU_RAS_BLOCK__SDMA | 3150 1 << AMDGPU_RAS_BLOCK__GFX); 3151 3152 /* 3153 * VCN/JPEG RAS can be supported on both bare metal and 3154 * SRIOV environment 3155 */ 3156 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) || 3157 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) || 3158 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3)) 3159 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 3160 1 << AMDGPU_RAS_BLOCK__JPEG); 3161 else 3162 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | 3163 1 << AMDGPU_RAS_BLOCK__JPEG); 3164 3165 /* 3166 * XGMI RAS is not supported if xgmi num physical nodes 3167 * is zero 3168 */ 3169 if (!adev->gmc.xgmi.num_physical_nodes) 3170 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL); 3171 } else { 3172 dev_info(adev->dev, "SRAM ECC is not presented.\n"); 3173 } 3174 } 3175 3176 /* Query poison mode from umc/df IP callbacks */ 3177 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) 3178 { 3179 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3180 bool df_poison, umc_poison; 3181 3182 /* poison setting is useless on SRIOV guest */ 3183 if (amdgpu_sriov_vf(adev) || !con) 3184 return; 3185 3186 /* Init poison supported flag, the default value is false */ 3187 if (adev->gmc.xgmi.connected_to_cpu || 3188 adev->gmc.is_app_apu) { 3189 /* enabled by default when GPU is connected to CPU */ 3190 con->poison_supported = true; 3191 } else if (adev->df.funcs && 3192 adev->df.funcs->query_ras_poison_mode && 3193 adev->umc.ras && 3194 adev->umc.ras->query_ras_poison_mode) { 3195 df_poison = 3196 adev->df.funcs->query_ras_poison_mode(adev); 3197 umc_poison = 3198 adev->umc.ras->query_ras_poison_mode(adev); 3199 3200 /* Only poison is set in both DF and UMC, we can support it */ 3201 if (df_poison && umc_poison) 3202 con->poison_supported = true; 3203 else if (df_poison != umc_poison) 3204 dev_warn(adev->dev, 3205 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", 3206 df_poison, umc_poison); 3207 } 3208 } 3209 3210 /* 3211 * check hardware's ras ability which will be saved in hw_supported. 3212 * if hardware does not support ras, we can skip some ras initializtion and 3213 * forbid some ras operations from IP. 3214 * if software itself, say boot parameter, limit the ras ability. We still 3215 * need allow IP do some limited operations, like disable. In such case, 3216 * we have to initialize ras as normal. but need check if operation is 3217 * allowed or not in each function. 3218 */ 3219 static void amdgpu_ras_check_supported(struct amdgpu_device *adev) 3220 { 3221 adev->ras_hw_enabled = adev->ras_enabled = 0; 3222 3223 if (!amdgpu_ras_asic_supported(adev)) 3224 return; 3225 3226 /* query ras capability from psp */ 3227 if (amdgpu_psp_get_ras_capability(&adev->psp)) 3228 goto init_ras_enabled_flag; 3229 3230 /* query ras capablity from bios */ 3231 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { 3232 amdgpu_ras_query_ras_capablity_from_vbios(adev); 3233 } else { 3234 /* driver only manages a few IP blocks RAS feature 3235 * when GPU is connected cpu through XGMI */ 3236 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | 3237 1 << AMDGPU_RAS_BLOCK__SDMA | 3238 1 << AMDGPU_RAS_BLOCK__MMHUB); 3239 } 3240 3241 /* apply asic specific settings (vega20 only for now) */ 3242 amdgpu_ras_get_quirks(adev); 3243 3244 /* query poison mode from umc/df ip callback */ 3245 amdgpu_ras_query_poison_mode(adev); 3246 3247 init_ras_enabled_flag: 3248 /* hw_supported needs to be aligned with RAS block mask. */ 3249 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; 3250 3251 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : 3252 adev->ras_hw_enabled & amdgpu_ras_mask; 3253 3254 /* aca is disabled by default */ 3255 adev->aca.is_enabled = false; 3256 } 3257 3258 static void amdgpu_ras_counte_dw(struct work_struct *work) 3259 { 3260 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, 3261 ras_counte_delay_work.work); 3262 struct amdgpu_device *adev = con->adev; 3263 struct drm_device *dev = adev_to_drm(adev); 3264 unsigned long ce_count, ue_count; 3265 int res; 3266 3267 res = pm_runtime_get_sync(dev->dev); 3268 if (res < 0) 3269 goto Out; 3270 3271 /* Cache new values. 3272 */ 3273 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) { 3274 atomic_set(&con->ras_ce_count, ce_count); 3275 atomic_set(&con->ras_ue_count, ue_count); 3276 } 3277 3278 pm_runtime_mark_last_busy(dev->dev); 3279 Out: 3280 pm_runtime_put_autosuspend(dev->dev); 3281 } 3282 3283 static int amdgpu_get_ras_schema(struct amdgpu_device *adev) 3284 { 3285 return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 | 3286 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE | 3287 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE | 3288 AMDGPU_RAS_ERROR__PARITY; 3289 } 3290 3291 static void ras_event_mgr_init(struct ras_event_manager *mgr) 3292 { 3293 int i; 3294 3295 for (i = 0; i < ARRAY_SIZE(mgr->seqnos); i++) 3296 atomic64_set(&mgr->seqnos[i], 0); 3297 } 3298 3299 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev) 3300 { 3301 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3302 struct amdgpu_hive_info *hive; 3303 3304 if (!ras) 3305 return; 3306 3307 hive = amdgpu_get_xgmi_hive(adev); 3308 ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr; 3309 3310 /* init event manager with node 0 on xgmi system */ 3311 if (!amdgpu_in_reset(adev)) { 3312 if (!hive || adev->gmc.xgmi.node_id == 0) 3313 ras_event_mgr_init(ras->event_mgr); 3314 } 3315 3316 if (hive) 3317 amdgpu_put_xgmi_hive(hive); 3318 } 3319 3320 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev) 3321 { 3322 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3323 3324 if (!con || (adev->flags & AMD_IS_APU)) 3325 return; 3326 3327 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 3328 case IP_VERSION(13, 0, 2): 3329 case IP_VERSION(13, 0, 6): 3330 case IP_VERSION(13, 0, 14): 3331 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE; 3332 break; 3333 default: 3334 break; 3335 } 3336 } 3337 3338 int amdgpu_ras_init(struct amdgpu_device *adev) 3339 { 3340 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3341 int r; 3342 3343 if (con) 3344 return 0; 3345 3346 con = kzalloc(sizeof(*con) + 3347 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + 3348 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, 3349 GFP_KERNEL); 3350 if (!con) 3351 return -ENOMEM; 3352 3353 con->adev = adev; 3354 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); 3355 atomic_set(&con->ras_ce_count, 0); 3356 atomic_set(&con->ras_ue_count, 0); 3357 3358 con->objs = (struct ras_manager *)(con + 1); 3359 3360 amdgpu_ras_set_context(adev, con); 3361 3362 amdgpu_ras_check_supported(adev); 3363 3364 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { 3365 /* set gfx block ras context feature for VEGA20 Gaming 3366 * send ras disable cmd to ras ta during ras late init. 3367 */ 3368 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { 3369 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); 3370 3371 return 0; 3372 } 3373 3374 r = 0; 3375 goto release_con; 3376 } 3377 3378 con->update_channel_flag = false; 3379 con->features = 0; 3380 con->schema = 0; 3381 INIT_LIST_HEAD(&con->head); 3382 /* Might need get this flag from vbios. */ 3383 con->flags = RAS_DEFAULT_FLAGS; 3384 3385 /* initialize nbio ras function ahead of any other 3386 * ras functions so hardware fatal error interrupt 3387 * can be enabled as early as possible */ 3388 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 3389 case IP_VERSION(7, 4, 0): 3390 case IP_VERSION(7, 4, 1): 3391 case IP_VERSION(7, 4, 4): 3392 if (!adev->gmc.xgmi.connected_to_cpu) 3393 adev->nbio.ras = &nbio_v7_4_ras; 3394 break; 3395 case IP_VERSION(4, 3, 0): 3396 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF)) 3397 /* unlike other generation of nbio ras, 3398 * nbio v4_3 only support fatal error interrupt 3399 * to inform software that DF is freezed due to 3400 * system fatal error event. driver should not 3401 * enable nbio ras in such case. Instead, 3402 * check DF RAS */ 3403 adev->nbio.ras = &nbio_v4_3_ras; 3404 break; 3405 case IP_VERSION(7, 9, 0): 3406 if (!adev->gmc.is_app_apu) 3407 adev->nbio.ras = &nbio_v7_9_ras; 3408 break; 3409 default: 3410 /* nbio ras is not available */ 3411 break; 3412 } 3413 3414 /* nbio ras block needs to be enabled ahead of other ras blocks 3415 * to handle fatal error */ 3416 r = amdgpu_nbio_ras_sw_init(adev); 3417 if (r) 3418 return r; 3419 3420 if (adev->nbio.ras && 3421 adev->nbio.ras->init_ras_controller_interrupt) { 3422 r = adev->nbio.ras->init_ras_controller_interrupt(adev); 3423 if (r) 3424 goto release_con; 3425 } 3426 3427 if (adev->nbio.ras && 3428 adev->nbio.ras->init_ras_err_event_athub_interrupt) { 3429 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev); 3430 if (r) 3431 goto release_con; 3432 } 3433 3434 /* Packed socket_id to ras feature mask bits[31:29] */ 3435 if (adev->smuio.funcs && 3436 adev->smuio.funcs->get_socket_id) 3437 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 3438 AMDGPU_RAS_FEATURES_SOCKETID_SHIFT); 3439 3440 /* Get RAS schema for particular SOC */ 3441 con->schema = amdgpu_get_ras_schema(adev); 3442 3443 amdgpu_ras_init_reserved_vram_size(adev); 3444 3445 if (amdgpu_ras_fs_init(adev)) { 3446 r = -EINVAL; 3447 goto release_con; 3448 } 3449 3450 if (amdgpu_ras_aca_is_supported(adev)) { 3451 if (amdgpu_aca_is_enabled(adev)) 3452 r = amdgpu_aca_init(adev); 3453 else 3454 r = amdgpu_mca_init(adev); 3455 if (r) 3456 goto release_con; 3457 } 3458 3459 dev_info(adev->dev, "RAS INFO: ras initialized successfully, " 3460 "hardware ability[%x] ras_mask[%x]\n", 3461 adev->ras_hw_enabled, adev->ras_enabled); 3462 3463 return 0; 3464 release_con: 3465 amdgpu_ras_set_context(adev, NULL); 3466 kfree(con); 3467 3468 return r; 3469 } 3470 3471 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) 3472 { 3473 if (adev->gmc.xgmi.connected_to_cpu || 3474 adev->gmc.is_app_apu) 3475 return 1; 3476 return 0; 3477 } 3478 3479 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, 3480 struct ras_common_if *ras_block) 3481 { 3482 struct ras_query_if info = { 3483 .head = *ras_block, 3484 }; 3485 3486 if (!amdgpu_persistent_edc_harvesting_supported(adev)) 3487 return 0; 3488 3489 if (amdgpu_ras_query_error_status(adev, &info) != 0) 3490 DRM_WARN("RAS init harvest failure"); 3491 3492 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) 3493 DRM_WARN("RAS init harvest reset failure"); 3494 3495 return 0; 3496 } 3497 3498 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) 3499 { 3500 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3501 3502 if (!con) 3503 return false; 3504 3505 return con->poison_supported; 3506 } 3507 3508 /* helper function to handle common stuff in ip late init phase */ 3509 int amdgpu_ras_block_late_init(struct amdgpu_device *adev, 3510 struct ras_common_if *ras_block) 3511 { 3512 struct amdgpu_ras_block_object *ras_obj = NULL; 3513 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3514 struct ras_query_if *query_info; 3515 unsigned long ue_count, ce_count; 3516 int r; 3517 3518 /* disable RAS feature per IP block if it is not supported */ 3519 if (!amdgpu_ras_is_supported(adev, ras_block->block)) { 3520 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); 3521 return 0; 3522 } 3523 3524 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); 3525 if (r) { 3526 if (adev->in_suspend || amdgpu_in_reset(adev)) { 3527 /* in resume phase, if fail to enable ras, 3528 * clean up all ras fs nodes, and disable ras */ 3529 goto cleanup; 3530 } else 3531 return r; 3532 } 3533 3534 /* check for errors on warm reset edc persisant supported ASIC */ 3535 amdgpu_persistent_edc_harvesting(adev, ras_block); 3536 3537 /* in resume phase, no need to create ras fs node */ 3538 if (adev->in_suspend || amdgpu_in_reset(adev)) 3539 return 0; 3540 3541 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 3542 if (ras_obj->ras_cb || (ras_obj->hw_ops && 3543 (ras_obj->hw_ops->query_poison_status || 3544 ras_obj->hw_ops->handle_poison_consumption))) { 3545 r = amdgpu_ras_interrupt_add_handler(adev, ras_block); 3546 if (r) 3547 goto cleanup; 3548 } 3549 3550 if (ras_obj->hw_ops && 3551 (ras_obj->hw_ops->query_ras_error_count || 3552 ras_obj->hw_ops->query_ras_error_status)) { 3553 r = amdgpu_ras_sysfs_create(adev, ras_block); 3554 if (r) 3555 goto interrupt; 3556 3557 /* Those are the cached values at init. 3558 */ 3559 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL); 3560 if (!query_info) 3561 return -ENOMEM; 3562 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if)); 3563 3564 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) { 3565 atomic_set(&con->ras_ce_count, ce_count); 3566 atomic_set(&con->ras_ue_count, ue_count); 3567 } 3568 3569 kfree(query_info); 3570 } 3571 3572 return 0; 3573 3574 interrupt: 3575 if (ras_obj->ras_cb) 3576 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 3577 cleanup: 3578 amdgpu_ras_feature_enable(adev, ras_block, 0); 3579 return r; 3580 } 3581 3582 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev, 3583 struct ras_common_if *ras_block) 3584 { 3585 return amdgpu_ras_block_late_init(adev, ras_block); 3586 } 3587 3588 /* helper function to remove ras fs node and interrupt handler */ 3589 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev, 3590 struct ras_common_if *ras_block) 3591 { 3592 struct amdgpu_ras_block_object *ras_obj; 3593 if (!ras_block) 3594 return; 3595 3596 amdgpu_ras_sysfs_remove(adev, ras_block); 3597 3598 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 3599 if (ras_obj->ras_cb) 3600 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 3601 } 3602 3603 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev, 3604 struct ras_common_if *ras_block) 3605 { 3606 return amdgpu_ras_block_late_fini(adev, ras_block); 3607 } 3608 3609 /* do some init work after IP late init as dependence. 3610 * and it runs in resume/gpu reset/booting up cases. 3611 */ 3612 void amdgpu_ras_resume(struct amdgpu_device *adev) 3613 { 3614 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3615 struct ras_manager *obj, *tmp; 3616 3617 if (!adev->ras_enabled || !con) { 3618 /* clean ras context for VEGA20 Gaming after send ras disable cmd */ 3619 amdgpu_release_ras_context(adev); 3620 3621 return; 3622 } 3623 3624 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 3625 /* Set up all other IPs which are not implemented. There is a 3626 * tricky thing that IP's actual ras error type should be 3627 * MULTI_UNCORRECTABLE, but as driver does not handle it, so 3628 * ERROR_NONE make sense anyway. 3629 */ 3630 amdgpu_ras_enable_all_features(adev, 1); 3631 3632 /* We enable ras on all hw_supported block, but as boot 3633 * parameter might disable some of them and one or more IP has 3634 * not implemented yet. So we disable them on behalf. 3635 */ 3636 list_for_each_entry_safe(obj, tmp, &con->head, node) { 3637 if (!amdgpu_ras_is_supported(adev, obj->head.block)) { 3638 amdgpu_ras_feature_enable(adev, &obj->head, 0); 3639 /* there should be no any reference. */ 3640 WARN_ON(alive_obj(obj)); 3641 } 3642 } 3643 } 3644 } 3645 3646 void amdgpu_ras_suspend(struct amdgpu_device *adev) 3647 { 3648 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3649 3650 if (!adev->ras_enabled || !con) 3651 return; 3652 3653 amdgpu_ras_disable_all_features(adev, 0); 3654 /* Make sure all ras objects are disabled. */ 3655 if (AMDGPU_RAS_GET_FEATURES(con->features)) 3656 amdgpu_ras_disable_all_features(adev, 1); 3657 } 3658 3659 int amdgpu_ras_late_init(struct amdgpu_device *adev) 3660 { 3661 struct amdgpu_ras_block_list *node, *tmp; 3662 struct amdgpu_ras_block_object *obj; 3663 int r; 3664 3665 amdgpu_ras_event_mgr_init(adev); 3666 3667 if (amdgpu_ras_aca_is_supported(adev)) { 3668 if (amdgpu_in_reset(adev)) { 3669 if (amdgpu_aca_is_enabled(adev)) 3670 r = amdgpu_aca_reset(adev); 3671 else 3672 r = amdgpu_mca_reset(adev); 3673 if (r) 3674 return r; 3675 } 3676 3677 if (!amdgpu_sriov_vf(adev)) { 3678 if (amdgpu_aca_is_enabled(adev)) 3679 amdgpu_ras_set_aca_debug_mode(adev, false); 3680 else 3681 amdgpu_ras_set_mca_debug_mode(adev, false); 3682 } 3683 } 3684 3685 /* Guest side doesn't need init ras feature */ 3686 if (amdgpu_sriov_vf(adev)) 3687 return 0; 3688 3689 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 3690 obj = node->ras_obj; 3691 if (!obj) { 3692 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 3693 continue; 3694 } 3695 3696 if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block)) 3697 continue; 3698 3699 if (obj->ras_late_init) { 3700 r = obj->ras_late_init(adev, &obj->ras_comm); 3701 if (r) { 3702 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n", 3703 obj->ras_comm.name, r); 3704 return r; 3705 } 3706 } else 3707 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm); 3708 } 3709 3710 return 0; 3711 } 3712 3713 /* do some fini work before IP fini as dependence */ 3714 int amdgpu_ras_pre_fini(struct amdgpu_device *adev) 3715 { 3716 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3717 3718 if (!adev->ras_enabled || !con) 3719 return 0; 3720 3721 3722 /* Need disable ras on all IPs here before ip [hw/sw]fini */ 3723 if (AMDGPU_RAS_GET_FEATURES(con->features)) 3724 amdgpu_ras_disable_all_features(adev, 0); 3725 amdgpu_ras_recovery_fini(adev); 3726 return 0; 3727 } 3728 3729 int amdgpu_ras_fini(struct amdgpu_device *adev) 3730 { 3731 struct amdgpu_ras_block_list *ras_node, *tmp; 3732 struct amdgpu_ras_block_object *obj = NULL; 3733 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3734 3735 if (!adev->ras_enabled || !con) 3736 return 0; 3737 3738 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) { 3739 if (ras_node->ras_obj) { 3740 obj = ras_node->ras_obj; 3741 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) && 3742 obj->ras_fini) 3743 obj->ras_fini(adev, &obj->ras_comm); 3744 else 3745 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm); 3746 } 3747 3748 /* Clear ras blocks from ras_list and free ras block list node */ 3749 list_del(&ras_node->node); 3750 kfree(ras_node); 3751 } 3752 3753 amdgpu_ras_fs_fini(adev); 3754 amdgpu_ras_interrupt_remove_all(adev); 3755 3756 if (amdgpu_ras_aca_is_supported(adev)) { 3757 if (amdgpu_aca_is_enabled(adev)) 3758 amdgpu_aca_fini(adev); 3759 else 3760 amdgpu_mca_fini(adev); 3761 } 3762 3763 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared"); 3764 3765 if (AMDGPU_RAS_GET_FEATURES(con->features)) 3766 amdgpu_ras_disable_all_features(adev, 0); 3767 3768 cancel_delayed_work_sync(&con->ras_counte_delay_work); 3769 3770 amdgpu_ras_set_context(adev, NULL); 3771 kfree(con); 3772 3773 return 0; 3774 } 3775 3776 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev) 3777 { 3778 struct amdgpu_ras *ras; 3779 3780 ras = amdgpu_ras_get_context(adev); 3781 if (!ras) 3782 return false; 3783 3784 return atomic_read(&ras->fed); 3785 } 3786 3787 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status) 3788 { 3789 struct amdgpu_ras *ras; 3790 3791 ras = amdgpu_ras_get_context(adev); 3792 if (ras) 3793 atomic_set(&ras->fed, !!status); 3794 } 3795 3796 bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id) 3797 { 3798 return !(id & BIT_ULL(63)); 3799 } 3800 3801 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type) 3802 { 3803 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3804 u64 id; 3805 3806 switch (type) { 3807 case RAS_EVENT_TYPE_ISR: 3808 id = (u64)atomic64_read(&ras->event_mgr->seqnos[type]); 3809 break; 3810 case RAS_EVENT_TYPE_INVALID: 3811 default: 3812 id = BIT_ULL(63) | 0ULL; 3813 break; 3814 } 3815 3816 return id; 3817 } 3818 3819 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) 3820 { 3821 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { 3822 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3823 u64 event_id = (u64)atomic64_inc_return(&ras->event_mgr->seqnos[RAS_EVENT_TYPE_ISR]); 3824 3825 RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error" 3826 "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); 3827 3828 amdgpu_ras_set_fed(adev, true); 3829 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; 3830 amdgpu_ras_reset_gpu(adev); 3831 } 3832 } 3833 3834 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) 3835 { 3836 if (adev->asic_type == CHIP_VEGA20 && 3837 adev->pm.fw_version <= 0x283400) { 3838 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) && 3839 amdgpu_ras_intr_triggered(); 3840 } 3841 3842 return false; 3843 } 3844 3845 void amdgpu_release_ras_context(struct amdgpu_device *adev) 3846 { 3847 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 3848 3849 if (!con) 3850 return; 3851 3852 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { 3853 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); 3854 amdgpu_ras_set_context(adev, NULL); 3855 kfree(con); 3856 } 3857 } 3858 3859 #ifdef CONFIG_X86_MCE_AMD 3860 static struct amdgpu_device *find_adev(uint32_t node_id) 3861 { 3862 int i; 3863 struct amdgpu_device *adev = NULL; 3864 3865 for (i = 0; i < mce_adev_list.num_gpu; i++) { 3866 adev = mce_adev_list.devs[i]; 3867 3868 if (adev && adev->gmc.xgmi.connected_to_cpu && 3869 adev->gmc.xgmi.physical_node_id == node_id) 3870 break; 3871 adev = NULL; 3872 } 3873 3874 return adev; 3875 } 3876 3877 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) 3878 #define GET_UMC_INST(m) (((m) >> 21) & 0x7) 3879 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) 3880 #define GPU_ID_OFFSET 8 3881 3882 static int amdgpu_bad_page_notifier(struct notifier_block *nb, 3883 unsigned long val, void *data) 3884 { 3885 struct mce *m = (struct mce *)data; 3886 struct amdgpu_device *adev = NULL; 3887 uint32_t gpu_id = 0; 3888 uint32_t umc_inst = 0, ch_inst = 0; 3889 3890 /* 3891 * If the error was generated in UMC_V2, which belongs to GPU UMCs, 3892 * and error occurred in DramECC (Extended error code = 0) then only 3893 * process the error, else bail out. 3894 */ 3895 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) && 3896 (XEC(m->status, 0x3f) == 0x0))) 3897 return NOTIFY_DONE; 3898 3899 /* 3900 * If it is correctable error, return. 3901 */ 3902 if (mce_is_correctable(m)) 3903 return NOTIFY_OK; 3904 3905 /* 3906 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. 3907 */ 3908 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; 3909 3910 adev = find_adev(gpu_id); 3911 if (!adev) { 3912 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__, 3913 gpu_id); 3914 return NOTIFY_DONE; 3915 } 3916 3917 /* 3918 * If it is uncorrectable error, then find out UMC instance and 3919 * channel index. 3920 */ 3921 umc_inst = GET_UMC_INST(m->ipid); 3922 ch_inst = GET_CHAN_INDEX(m->ipid); 3923 3924 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d", 3925 umc_inst, ch_inst); 3926 3927 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst)) 3928 return NOTIFY_OK; 3929 else 3930 return NOTIFY_DONE; 3931 } 3932 3933 static struct notifier_block amdgpu_bad_page_nb = { 3934 .notifier_call = amdgpu_bad_page_notifier, 3935 .priority = MCE_PRIO_UC, 3936 }; 3937 3938 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) 3939 { 3940 /* 3941 * Add the adev to the mce_adev_list. 3942 * During mode2 reset, amdgpu device is temporarily 3943 * removed from the mgpu_info list which can cause 3944 * page retirement to fail. 3945 * Use this list instead of mgpu_info to find the amdgpu 3946 * device on which the UMC error was reported. 3947 */ 3948 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; 3949 3950 /* 3951 * Register the x86 notifier only once 3952 * with MCE subsystem. 3953 */ 3954 if (notifier_registered == false) { 3955 mce_register_decode_chain(&amdgpu_bad_page_nb); 3956 notifier_registered = true; 3957 } 3958 } 3959 #endif 3960 3961 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) 3962 { 3963 if (!adev) 3964 return NULL; 3965 3966 return adev->psp.ras_context.ras; 3967 } 3968 3969 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con) 3970 { 3971 if (!adev) 3972 return -EINVAL; 3973 3974 adev->psp.ras_context.ras = ras_con; 3975 return 0; 3976 } 3977 3978 /* check if ras is supported on block, say, sdma, gfx */ 3979 int amdgpu_ras_is_supported(struct amdgpu_device *adev, 3980 unsigned int block) 3981 { 3982 int ret = 0; 3983 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 3984 3985 if (block >= AMDGPU_RAS_BLOCK_COUNT) 3986 return 0; 3987 3988 ret = ras && (adev->ras_enabled & (1 << block)); 3989 3990 /* For the special asic with mem ecc enabled but sram ecc 3991 * not enabled, even if the ras block is not supported on 3992 * .ras_enabled, if the asic supports poison mode and the 3993 * ras block has ras configuration, it can be considered 3994 * that the ras block supports ras function. 3995 */ 3996 if (!ret && 3997 (block == AMDGPU_RAS_BLOCK__GFX || 3998 block == AMDGPU_RAS_BLOCK__SDMA || 3999 block == AMDGPU_RAS_BLOCK__VCN || 4000 block == AMDGPU_RAS_BLOCK__JPEG) && 4001 (amdgpu_ras_mask & (1 << block)) && 4002 amdgpu_ras_is_poison_mode_supported(adev) && 4003 amdgpu_ras_get_ras_block(adev, block, 0)) 4004 ret = 1; 4005 4006 return ret; 4007 } 4008 4009 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) 4010 { 4011 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 4012 4013 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) 4014 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); 4015 return 0; 4016 } 4017 4018 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable) 4019 { 4020 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4021 int ret = 0; 4022 4023 if (con) { 4024 ret = amdgpu_mca_smu_set_debug_mode(adev, enable); 4025 if (!ret) 4026 con->is_aca_debug_mode = enable; 4027 } 4028 4029 return ret; 4030 } 4031 4032 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable) 4033 { 4034 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4035 int ret = 0; 4036 4037 if (con) { 4038 if (amdgpu_aca_is_enabled(adev)) 4039 ret = amdgpu_aca_smu_set_debug_mode(adev, enable); 4040 else 4041 ret = amdgpu_mca_smu_set_debug_mode(adev, enable); 4042 if (!ret) 4043 con->is_aca_debug_mode = enable; 4044 } 4045 4046 return ret; 4047 } 4048 4049 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev) 4050 { 4051 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4052 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; 4053 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 4054 4055 if (!con) 4056 return false; 4057 4058 if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) || 4059 (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode)) 4060 return con->is_aca_debug_mode; 4061 else 4062 return true; 4063 } 4064 4065 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, 4066 unsigned int *error_query_mode) 4067 { 4068 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4069 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 4070 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; 4071 4072 if (!con) { 4073 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY; 4074 return false; 4075 } 4076 4077 if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) 4078 *error_query_mode = 4079 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; 4080 else 4081 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY; 4082 4083 return true; 4084 } 4085 4086 /* Register each ip ras block into amdgpu ras */ 4087 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, 4088 struct amdgpu_ras_block_object *ras_block_obj) 4089 { 4090 struct amdgpu_ras_block_list *ras_node; 4091 if (!adev || !ras_block_obj) 4092 return -EINVAL; 4093 4094 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL); 4095 if (!ras_node) 4096 return -ENOMEM; 4097 4098 INIT_LIST_HEAD(&ras_node->node); 4099 ras_node->ras_obj = ras_block_obj; 4100 list_add_tail(&ras_node->node, &adev->ras_list); 4101 4102 return 0; 4103 } 4104 4105 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name) 4106 { 4107 if (!err_type_name) 4108 return; 4109 4110 switch (err_type) { 4111 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE: 4112 sprintf(err_type_name, "correctable"); 4113 break; 4114 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE: 4115 sprintf(err_type_name, "uncorrectable"); 4116 break; 4117 default: 4118 sprintf(err_type_name, "unknown"); 4119 break; 4120 } 4121 } 4122 4123 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev, 4124 const struct amdgpu_ras_err_status_reg_entry *reg_entry, 4125 uint32_t instance, 4126 uint32_t *memory_id) 4127 { 4128 uint32_t err_status_lo_data, err_status_lo_offset; 4129 4130 if (!reg_entry) 4131 return false; 4132 4133 err_status_lo_offset = 4134 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance, 4135 reg_entry->seg_lo, reg_entry->reg_lo); 4136 err_status_lo_data = RREG32(err_status_lo_offset); 4137 4138 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) && 4139 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG)) 4140 return false; 4141 4142 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID); 4143 4144 return true; 4145 } 4146 4147 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev, 4148 const struct amdgpu_ras_err_status_reg_entry *reg_entry, 4149 uint32_t instance, 4150 unsigned long *err_cnt) 4151 { 4152 uint32_t err_status_hi_data, err_status_hi_offset; 4153 4154 if (!reg_entry) 4155 return false; 4156 4157 err_status_hi_offset = 4158 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance, 4159 reg_entry->seg_hi, reg_entry->reg_hi); 4160 err_status_hi_data = RREG32(err_status_hi_offset); 4161 4162 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) && 4163 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG)) 4164 /* keep the check here in case we need to refer to the result later */ 4165 dev_dbg(adev->dev, "Invalid err_info field\n"); 4166 4167 /* read err count */ 4168 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT); 4169 4170 return true; 4171 } 4172 4173 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev, 4174 const struct amdgpu_ras_err_status_reg_entry *reg_list, 4175 uint32_t reg_list_size, 4176 const struct amdgpu_ras_memory_id_entry *mem_list, 4177 uint32_t mem_list_size, 4178 uint32_t instance, 4179 uint32_t err_type, 4180 unsigned long *err_count) 4181 { 4182 uint32_t memory_id; 4183 unsigned long err_cnt; 4184 char err_type_name[16]; 4185 uint32_t i, j; 4186 4187 for (i = 0; i < reg_list_size; i++) { 4188 /* query memory_id from err_status_lo */ 4189 if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i], 4190 instance, &memory_id)) 4191 continue; 4192 4193 /* query err_cnt from err_status_hi */ 4194 if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i], 4195 instance, &err_cnt) || 4196 !err_cnt) 4197 continue; 4198 4199 *err_count += err_cnt; 4200 4201 /* log the errors */ 4202 amdgpu_ras_get_error_type_name(err_type, err_type_name); 4203 if (!mem_list) { 4204 /* memory_list is not supported */ 4205 dev_info(adev->dev, 4206 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n", 4207 err_cnt, err_type_name, 4208 reg_list[i].block_name, 4209 instance, memory_id); 4210 } else { 4211 for (j = 0; j < mem_list_size; j++) { 4212 if (memory_id == mem_list[j].memory_id) { 4213 dev_info(adev->dev, 4214 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n", 4215 err_cnt, err_type_name, 4216 reg_list[i].block_name, 4217 instance, mem_list[j].name); 4218 break; 4219 } 4220 } 4221 } 4222 } 4223 } 4224 4225 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, 4226 const struct amdgpu_ras_err_status_reg_entry *reg_list, 4227 uint32_t reg_list_size, 4228 uint32_t instance) 4229 { 4230 uint32_t err_status_lo_offset, err_status_hi_offset; 4231 uint32_t i; 4232 4233 for (i = 0; i < reg_list_size; i++) { 4234 err_status_lo_offset = 4235 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance, 4236 reg_list[i].seg_lo, reg_list[i].reg_lo); 4237 err_status_hi_offset = 4238 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance, 4239 reg_list[i].seg_hi, reg_list[i].reg_hi); 4240 WREG32(err_status_lo_offset, 0); 4241 WREG32(err_status_hi_offset, 0); 4242 } 4243 } 4244 4245 int amdgpu_ras_error_data_init(struct ras_err_data *err_data) 4246 { 4247 memset(err_data, 0, sizeof(*err_data)); 4248 4249 INIT_LIST_HEAD(&err_data->err_node_list); 4250 4251 return 0; 4252 } 4253 4254 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node) 4255 { 4256 if (!err_node) 4257 return; 4258 4259 list_del(&err_node->node); 4260 kvfree(err_node); 4261 } 4262 4263 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data) 4264 { 4265 struct ras_err_node *err_node, *tmp; 4266 4267 list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node) 4268 amdgpu_ras_error_node_release(err_node); 4269 } 4270 4271 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data, 4272 struct amdgpu_smuio_mcm_config_info *mcm_info) 4273 { 4274 struct ras_err_node *err_node; 4275 struct amdgpu_smuio_mcm_config_info *ref_id; 4276 4277 if (!err_data || !mcm_info) 4278 return NULL; 4279 4280 for_each_ras_error(err_node, err_data) { 4281 ref_id = &err_node->err_info.mcm_info; 4282 4283 if (mcm_info->socket_id == ref_id->socket_id && 4284 mcm_info->die_id == ref_id->die_id) 4285 return err_node; 4286 } 4287 4288 return NULL; 4289 } 4290 4291 static struct ras_err_node *amdgpu_ras_error_node_new(void) 4292 { 4293 struct ras_err_node *err_node; 4294 4295 err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL); 4296 if (!err_node) 4297 return NULL; 4298 4299 INIT_LIST_HEAD(&err_node->node); 4300 4301 return err_node; 4302 } 4303 4304 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b) 4305 { 4306 struct ras_err_node *nodea = container_of(a, struct ras_err_node, node); 4307 struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node); 4308 struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info; 4309 struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info; 4310 4311 if (unlikely(infoa->socket_id != infob->socket_id)) 4312 return infoa->socket_id - infob->socket_id; 4313 else 4314 return infoa->die_id - infob->die_id; 4315 4316 return 0; 4317 } 4318 4319 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data, 4320 struct amdgpu_smuio_mcm_config_info *mcm_info) 4321 { 4322 struct ras_err_node *err_node; 4323 4324 err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info); 4325 if (err_node) 4326 return &err_node->err_info; 4327 4328 err_node = amdgpu_ras_error_node_new(); 4329 if (!err_node) 4330 return NULL; 4331 4332 INIT_LIST_HEAD(&err_node->err_info.err_addr_list); 4333 4334 memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info)); 4335 4336 err_data->err_list_count++; 4337 list_add_tail(&err_node->node, &err_data->err_node_list); 4338 list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp); 4339 4340 return &err_node->err_info; 4341 } 4342 4343 void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr) 4344 { 4345 /* This function will be retired. */ 4346 return; 4347 } 4348 4349 void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr) 4350 { 4351 list_del(&mca_err_addr->node); 4352 kfree(mca_err_addr); 4353 } 4354 4355 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, 4356 struct amdgpu_smuio_mcm_config_info *mcm_info, 4357 struct ras_err_addr *err_addr, u64 count) 4358 { 4359 struct ras_err_info *err_info; 4360 4361 if (!err_data || !mcm_info) 4362 return -EINVAL; 4363 4364 if (!count) 4365 return 0; 4366 4367 err_info = amdgpu_ras_error_get_info(err_data, mcm_info); 4368 if (!err_info) 4369 return -EINVAL; 4370 4371 if (err_addr && err_addr->err_status) 4372 amdgpu_ras_add_mca_err_addr(err_info, err_addr); 4373 4374 err_info->ue_count += count; 4375 err_data->ue_count += count; 4376 4377 return 0; 4378 } 4379 4380 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, 4381 struct amdgpu_smuio_mcm_config_info *mcm_info, 4382 struct ras_err_addr *err_addr, u64 count) 4383 { 4384 struct ras_err_info *err_info; 4385 4386 if (!err_data || !mcm_info) 4387 return -EINVAL; 4388 4389 if (!count) 4390 return 0; 4391 4392 err_info = amdgpu_ras_error_get_info(err_data, mcm_info); 4393 if (!err_info) 4394 return -EINVAL; 4395 4396 err_info->ce_count += count; 4397 err_data->ce_count += count; 4398 4399 return 0; 4400 } 4401 4402 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data, 4403 struct amdgpu_smuio_mcm_config_info *mcm_info, 4404 struct ras_err_addr *err_addr, u64 count) 4405 { 4406 struct ras_err_info *err_info; 4407 4408 if (!err_data || !mcm_info) 4409 return -EINVAL; 4410 4411 if (!count) 4412 return 0; 4413 4414 err_info = amdgpu_ras_error_get_info(err_data, mcm_info); 4415 if (!err_info) 4416 return -EINVAL; 4417 4418 if (err_addr && err_addr->err_status) 4419 amdgpu_ras_add_mca_err_addr(err_info, err_addr); 4420 4421 err_info->de_count += count; 4422 err_data->de_count += count; 4423 4424 return 0; 4425 } 4426 4427 #define mmMP0_SMN_C2PMSG_92 0x1609C 4428 #define mmMP0_SMN_C2PMSG_126 0x160BE 4429 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev, 4430 u32 instance) 4431 { 4432 u32 socket_id, aid_id, hbm_id; 4433 u32 fw_status; 4434 u32 boot_error; 4435 u64 reg_addr; 4436 4437 /* The pattern for smn addressing in other SOC could be different from 4438 * the one for aqua_vanjaram. We should revisit the code if the pattern 4439 * is changed. In such case, replace the aqua_vanjaram implementation 4440 * with more common helper */ 4441 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) + 4442 aqua_vanjaram_encode_ext_smn_addressing(instance); 4443 fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr); 4444 4445 reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) + 4446 aqua_vanjaram_encode_ext_smn_addressing(instance); 4447 boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr); 4448 4449 socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error); 4450 aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error); 4451 hbm_id = AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error); 4452 4453 if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error)) 4454 dev_info(adev->dev, 4455 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n", 4456 socket_id, aid_id, hbm_id, fw_status); 4457 4458 if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error)) 4459 dev_info(adev->dev, 4460 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n", 4461 socket_id, aid_id, fw_status); 4462 4463 if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error)) 4464 dev_info(adev->dev, 4465 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n", 4466 socket_id, aid_id, fw_status); 4467 4468 if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error)) 4469 dev_info(adev->dev, 4470 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n", 4471 socket_id, aid_id, fw_status); 4472 4473 if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error)) 4474 dev_info(adev->dev, 4475 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n", 4476 socket_id, aid_id, fw_status); 4477 4478 if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error)) 4479 dev_info(adev->dev, 4480 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n", 4481 socket_id, aid_id, fw_status); 4482 4483 if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error)) 4484 dev_info(adev->dev, 4485 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n", 4486 socket_id, aid_id, hbm_id, fw_status); 4487 4488 if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error)) 4489 dev_info(adev->dev, 4490 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n", 4491 socket_id, aid_id, hbm_id, fw_status); 4492 } 4493 4494 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev, 4495 u32 instance) 4496 { 4497 u64 reg_addr; 4498 u32 reg_data; 4499 int retry_loop; 4500 4501 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) + 4502 aqua_vanjaram_encode_ext_smn_addressing(instance); 4503 4504 for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) { 4505 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr); 4506 if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) 4507 return false; 4508 else 4509 msleep(1); 4510 } 4511 4512 return true; 4513 } 4514 4515 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances) 4516 { 4517 u32 i; 4518 4519 for (i = 0; i < num_instances; i++) { 4520 if (amdgpu_ras_boot_error_detected(adev, i)) 4521 amdgpu_ras_boot_time_error_reporting(adev, i); 4522 } 4523 } 4524 4525 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn) 4526 { 4527 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 4528 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 4529 uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT; 4530 int ret = 0; 4531 4532 mutex_lock(&con->page_rsv_lock); 4533 ret = amdgpu_vram_mgr_query_page_status(mgr, start); 4534 if (ret == -ENOENT) 4535 ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE); 4536 mutex_unlock(&con->page_rsv_lock); 4537 4538 return ret; 4539 } 4540 4541 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id, 4542 const char *fmt, ...) 4543 { 4544 struct va_format vaf; 4545 va_list args; 4546 4547 va_start(args, fmt); 4548 vaf.fmt = fmt; 4549 vaf.va = &args; 4550 4551 if (amdgpu_ras_event_id_is_valid(adev, event_id)) 4552 dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf); 4553 else 4554 dev_printk(KERN_INFO, adev->dev, "%pV", &vaf); 4555 4556 va_end(args); 4557 } 4558