1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include <linux/aperture.h> 30 #include <linux/power_supply.h> 31 #include <linux/kthread.h> 32 #include <linux/module.h> 33 #include <linux/console.h> 34 #include <linux/slab.h> 35 #include <linux/iommu.h> 36 #include <linux/pci.h> 37 #include <linux/pci-p2pdma.h> 38 #include <linux/apple-gmux.h> 39 #include <linux/nospec.h> 40 41 #include <drm/drm_atomic_helper.h> 42 #include <drm/drm_client_event.h> 43 #include <drm/drm_crtc_helper.h> 44 #include <drm/drm_probe_helper.h> 45 #include <drm/amdgpu_drm.h> 46 #include <linux/device.h> 47 #include <linux/vgaarb.h> 48 #include <linux/vga_switcheroo.h> 49 #include <linux/efi.h> 50 #include "amdgpu.h" 51 #include "amdgpu_trace.h" 52 #include "amdgpu_i2c.h" 53 #include "atom.h" 54 #include "amdgpu_atombios.h" 55 #include "amdgpu_atomfirmware.h" 56 #include "amd_pcie.h" 57 #ifdef CONFIG_DRM_AMDGPU_SI 58 #include "si.h" 59 #endif 60 #ifdef CONFIG_DRM_AMDGPU_CIK 61 #include "cik.h" 62 #endif 63 #include "vi.h" 64 #include "soc15.h" 65 #include "nv.h" 66 #include "bif/bif_4_1_d.h" 67 #include <linux/firmware.h> 68 #include "amdgpu_vf_error.h" 69 70 #include "amdgpu_amdkfd.h" 71 #include "amdgpu_pm.h" 72 73 #include "amdgpu_xgmi.h" 74 #include "amdgpu_ras.h" 75 #include "amdgpu_ras_mgr.h" 76 #include "amdgpu_pmu.h" 77 #include "amdgpu_fru_eeprom.h" 78 #include "amdgpu_reset.h" 79 #include "amdgpu_virt.h" 80 #include "amdgpu_dev_coredump.h" 81 82 #include <linux/suspend.h> 83 #include <drm/task_barrier.h> 84 #include <linux/pm_runtime.h> 85 86 #include <drm/drm_drv.h> 87 88 #if IS_ENABLED(CONFIG_X86) 89 #include <asm/intel-family.h> 90 #include <asm/cpu_device_id.h> 91 #endif 92 93 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 94 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); 95 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 96 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); 97 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); 98 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); 99 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); 100 MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin"); 101 102 #define AMDGPU_RESUME_MS 2000 103 #define AMDGPU_MAX_RETRY_LIMIT 2 104 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL) 105 #define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2) 106 #define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2) 107 #define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2) 108 109 #define AMDGPU_VBIOS_SKIP (1U << 0) 110 #define AMDGPU_VBIOS_OPTIONAL (1U << 1) 111 112 static const struct drm_driver amdgpu_kms_driver; 113 114 const char *amdgpu_asic_name[] = { 115 "TAHITI", 116 "PITCAIRN", 117 "VERDE", 118 "OLAND", 119 "HAINAN", 120 "BONAIRE", 121 "KAVERI", 122 "KABINI", 123 "HAWAII", 124 "MULLINS", 125 "TOPAZ", 126 "TONGA", 127 "FIJI", 128 "CARRIZO", 129 "STONEY", 130 "POLARIS10", 131 "POLARIS11", 132 "POLARIS12", 133 "VEGAM", 134 "VEGA10", 135 "VEGA12", 136 "VEGA20", 137 "RAVEN", 138 "ARCTURUS", 139 "RENOIR", 140 "ALDEBARAN", 141 "NAVI10", 142 "CYAN_SKILLFISH", 143 "NAVI14", 144 "NAVI12", 145 "SIENNA_CICHLID", 146 "NAVY_FLOUNDER", 147 "VANGOGH", 148 "DIMGREY_CAVEFISH", 149 "BEIGE_GOBY", 150 "YELLOW_CARP", 151 "IP DISCOVERY", 152 "LAST", 153 }; 154 155 #define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0) 156 /* 157 * Default init level where all blocks are expected to be initialized. This is 158 * the level of initialization expected by default and also after a full reset 159 * of the device. 160 */ 161 struct amdgpu_init_level amdgpu_init_default = { 162 .level = AMDGPU_INIT_LEVEL_DEFAULT, 163 .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL, 164 }; 165 166 struct amdgpu_init_level amdgpu_init_recovery = { 167 .level = AMDGPU_INIT_LEVEL_RESET_RECOVERY, 168 .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL, 169 }; 170 171 /* 172 * Minimal blocks needed to be initialized before a XGMI hive can be reset. This 173 * is used for cases like reset on initialization where the entire hive needs to 174 * be reset before first use. 175 */ 176 struct amdgpu_init_level amdgpu_init_minimal_xgmi = { 177 .level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI, 178 .hwini_ip_block_mask = 179 BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) | 180 BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) | 181 BIT(AMD_IP_BLOCK_TYPE_PSP) 182 }; 183 184 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev); 185 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev); 186 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev); 187 188 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev); 189 190 static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev, 191 enum amd_ip_block_type block) 192 { 193 return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0; 194 } 195 196 void amdgpu_set_init_level(struct amdgpu_device *adev, 197 enum amdgpu_init_lvl_id lvl) 198 { 199 switch (lvl) { 200 case AMDGPU_INIT_LEVEL_MINIMAL_XGMI: 201 adev->init_lvl = &amdgpu_init_minimal_xgmi; 202 break; 203 case AMDGPU_INIT_LEVEL_RESET_RECOVERY: 204 adev->init_lvl = &amdgpu_init_recovery; 205 break; 206 case AMDGPU_INIT_LEVEL_DEFAULT: 207 fallthrough; 208 default: 209 adev->init_lvl = &amdgpu_init_default; 210 break; 211 } 212 } 213 214 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev); 215 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, 216 void *data); 217 218 /** 219 * DOC: pcie_replay_count 220 * 221 * The amdgpu driver provides a sysfs API for reporting the total number 222 * of PCIe replays (NAKs). 223 * The file pcie_replay_count is used for this and returns the total 224 * number of replays as a sum of the NAKs generated and NAKs received. 225 */ 226 227 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, 228 struct device_attribute *attr, char *buf) 229 { 230 struct drm_device *ddev = dev_get_drvdata(dev); 231 struct amdgpu_device *adev = drm_to_adev(ddev); 232 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); 233 234 return sysfs_emit(buf, "%llu\n", cnt); 235 } 236 237 static DEVICE_ATTR(pcie_replay_count, 0444, 238 amdgpu_device_get_pcie_replay_count, NULL); 239 240 static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev) 241 { 242 int ret = 0; 243 244 if (amdgpu_nbio_is_replay_cnt_supported(adev)) 245 ret = sysfs_create_file(&adev->dev->kobj, 246 &dev_attr_pcie_replay_count.attr); 247 248 return ret; 249 } 250 251 static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev) 252 { 253 if (amdgpu_nbio_is_replay_cnt_supported(adev)) 254 sysfs_remove_file(&adev->dev->kobj, 255 &dev_attr_pcie_replay_count.attr); 256 } 257 258 static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj, 259 const struct bin_attribute *attr, char *buf, 260 loff_t ppos, size_t count) 261 { 262 struct device *dev = kobj_to_dev(kobj); 263 struct drm_device *ddev = dev_get_drvdata(dev); 264 struct amdgpu_device *adev = drm_to_adev(ddev); 265 ssize_t bytes_read; 266 267 switch (ppos) { 268 case AMDGPU_SYS_REG_STATE_XGMI: 269 bytes_read = amdgpu_asic_get_reg_state( 270 adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count); 271 break; 272 case AMDGPU_SYS_REG_STATE_WAFL: 273 bytes_read = amdgpu_asic_get_reg_state( 274 adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count); 275 break; 276 case AMDGPU_SYS_REG_STATE_PCIE: 277 bytes_read = amdgpu_asic_get_reg_state( 278 adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count); 279 break; 280 case AMDGPU_SYS_REG_STATE_USR: 281 bytes_read = amdgpu_asic_get_reg_state( 282 adev, AMDGPU_REG_STATE_TYPE_USR, buf, count); 283 break; 284 case AMDGPU_SYS_REG_STATE_USR_1: 285 bytes_read = amdgpu_asic_get_reg_state( 286 adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count); 287 break; 288 default: 289 return -EINVAL; 290 } 291 292 return bytes_read; 293 } 294 295 static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL, 296 AMDGPU_SYS_REG_STATE_END); 297 298 int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev) 299 { 300 int ret; 301 302 if (!amdgpu_asic_get_reg_state_supported(adev)) 303 return 0; 304 305 ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state); 306 307 return ret; 308 } 309 310 void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev) 311 { 312 if (!amdgpu_asic_get_reg_state_supported(adev)) 313 return; 314 sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state); 315 } 316 317 /** 318 * DOC: board_info 319 * 320 * The amdgpu driver provides a sysfs API for giving board related information. 321 * It provides the form factor information in the format 322 * 323 * type : form factor 324 * 325 * Possible form factor values 326 * 327 * - "cem" - PCIE CEM card 328 * - "oam" - Open Compute Accelerator Module 329 * - "unknown" - Not known 330 * 331 */ 332 333 static ssize_t amdgpu_device_get_board_info(struct device *dev, 334 struct device_attribute *attr, 335 char *buf) 336 { 337 struct drm_device *ddev = dev_get_drvdata(dev); 338 struct amdgpu_device *adev = drm_to_adev(ddev); 339 enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM; 340 const char *pkg; 341 342 if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type) 343 pkg_type = adev->smuio.funcs->get_pkg_type(adev); 344 345 switch (pkg_type) { 346 case AMDGPU_PKG_TYPE_CEM: 347 pkg = "cem"; 348 break; 349 case AMDGPU_PKG_TYPE_OAM: 350 pkg = "oam"; 351 break; 352 default: 353 pkg = "unknown"; 354 break; 355 } 356 357 return sysfs_emit(buf, "%s : %s\n", "type", pkg); 358 } 359 360 static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL); 361 362 static struct attribute *amdgpu_board_attrs[] = { 363 &dev_attr_board_info.attr, 364 NULL, 365 }; 366 367 static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj, 368 struct attribute *attr, int n) 369 { 370 struct device *dev = kobj_to_dev(kobj); 371 struct drm_device *ddev = dev_get_drvdata(dev); 372 struct amdgpu_device *adev = drm_to_adev(ddev); 373 374 if (adev->flags & AMD_IS_APU) 375 return 0; 376 377 return attr->mode; 378 } 379 380 static const struct attribute_group amdgpu_board_attrs_group = { 381 .attrs = amdgpu_board_attrs, 382 .is_visible = amdgpu_board_attrs_is_visible 383 }; 384 385 /** 386 * DOC: uma/carveout_options 387 * 388 * This is a read-only file that lists all available UMA allocation 389 * options and their corresponding indices. Example output:: 390 * 391 * $ cat uma/carveout_options 392 * 0: Minimum (512 MB) 393 * 1: (1 GB) 394 * 2: (2 GB) 395 * 3: (4 GB) 396 * 4: (6 GB) 397 * 5: (8 GB) 398 * 6: (12 GB) 399 * 7: Medium (16 GB) 400 * 8: (24 GB) 401 * 9: High (32 GB) 402 */ 403 static ssize_t carveout_options_show(struct device *dev, 404 struct device_attribute *attr, 405 char *buf) 406 { 407 struct drm_device *ddev = dev_get_drvdata(dev); 408 struct amdgpu_device *adev = drm_to_adev(ddev); 409 struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info; 410 uint32_t memory_carved; 411 ssize_t size = 0; 412 413 if (!uma_info || !uma_info->num_entries) 414 return -ENODEV; 415 416 for (int i = 0; i < uma_info->num_entries; i++) { 417 memory_carved = uma_info->entries[i].memory_carved_mb; 418 if (memory_carved >= SZ_1G/SZ_1M) { 419 size += sysfs_emit_at(buf, size, "%d: %s (%u GB)\n", 420 i, 421 uma_info->entries[i].name, 422 memory_carved >> 10); 423 } else { 424 size += sysfs_emit_at(buf, size, "%d: %s (%u MB)\n", 425 i, 426 uma_info->entries[i].name, 427 memory_carved); 428 } 429 } 430 431 return size; 432 } 433 static DEVICE_ATTR_RO(carveout_options); 434 435 /** 436 * DOC: uma/carveout 437 * 438 * This file is both readable and writable. When read, it shows the 439 * index of the current setting. Writing a valid index to this file 440 * allows users to change the UMA carveout size to the selected option 441 * on the next boot. 442 * 443 * The available options and their corresponding indices can be read 444 * from the uma/carveout_options file. 445 */ 446 static ssize_t carveout_show(struct device *dev, 447 struct device_attribute *attr, 448 char *buf) 449 { 450 struct drm_device *ddev = dev_get_drvdata(dev); 451 struct amdgpu_device *adev = drm_to_adev(ddev); 452 453 return sysfs_emit(buf, "%u\n", adev->uma_info.uma_option_index); 454 } 455 456 static ssize_t carveout_store(struct device *dev, 457 struct device_attribute *attr, 458 const char *buf, size_t count) 459 { 460 struct drm_device *ddev = dev_get_drvdata(dev); 461 struct amdgpu_device *adev = drm_to_adev(ddev); 462 struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info; 463 struct amdgpu_uma_carveout_option *opt; 464 unsigned long val; 465 uint8_t flags; 466 int r; 467 468 r = kstrtoul(buf, 10, &val); 469 if (r) 470 return r; 471 472 if (val >= uma_info->num_entries) 473 return -EINVAL; 474 475 val = array_index_nospec(val, uma_info->num_entries); 476 opt = &uma_info->entries[val]; 477 478 if (!(opt->flags & AMDGPU_UMA_FLAG_AUTO) && 479 !(opt->flags & AMDGPU_UMA_FLAG_CUSTOM)) { 480 drm_err_once(ddev, "Option %lu not supported due to lack of Custom/Auto flag", val); 481 return -EINVAL; 482 } 483 484 flags = opt->flags; 485 flags &= ~((flags & AMDGPU_UMA_FLAG_AUTO) >> 1); 486 487 guard(mutex)(&uma_info->update_lock); 488 489 r = amdgpu_acpi_set_uma_allocation_size(adev, val, flags); 490 if (r) 491 return r; 492 493 uma_info->uma_option_index = val; 494 495 return count; 496 } 497 static DEVICE_ATTR_RW(carveout); 498 499 static struct attribute *amdgpu_uma_attrs[] = { 500 &dev_attr_carveout.attr, 501 &dev_attr_carveout_options.attr, 502 NULL 503 }; 504 505 const struct attribute_group amdgpu_uma_attr_group = { 506 .name = "uma", 507 .attrs = amdgpu_uma_attrs 508 }; 509 510 static void amdgpu_uma_sysfs_init(struct amdgpu_device *adev) 511 { 512 int rc; 513 514 if (!(adev->flags & AMD_IS_APU)) 515 return; 516 517 if (!amdgpu_acpi_is_set_uma_allocation_size_supported()) 518 return; 519 520 rc = amdgpu_atomfirmware_get_uma_carveout_info(adev, &adev->uma_info); 521 if (rc) { 522 drm_dbg(adev_to_drm(adev), 523 "Failed to parse UMA carveout info from VBIOS: %d\n", rc); 524 goto out_info; 525 } 526 527 mutex_init(&adev->uma_info.update_lock); 528 529 rc = devm_device_add_group(adev->dev, &amdgpu_uma_attr_group); 530 if (rc) { 531 drm_dbg(adev_to_drm(adev), "Failed to add UMA carveout sysfs interfaces %d\n", rc); 532 goto out_attr; 533 } 534 535 return; 536 537 out_attr: 538 mutex_destroy(&adev->uma_info.update_lock); 539 out_info: 540 return; 541 } 542 543 static void amdgpu_uma_sysfs_fini(struct amdgpu_device *adev) 544 { 545 struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info; 546 547 if (!amdgpu_acpi_is_set_uma_allocation_size_supported()) 548 return; 549 550 mutex_destroy(&uma_info->update_lock); 551 uma_info->num_entries = 0; 552 } 553 554 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); 555 556 /** 557 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control 558 * 559 * @adev: amdgpu device pointer 560 * 561 * Returns true if the device is a dGPU with ATPX power control, 562 * otherwise return false. 563 */ 564 bool amdgpu_device_supports_px(struct amdgpu_device *adev) 565 { 566 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) 567 return true; 568 return false; 569 } 570 571 /** 572 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources 573 * 574 * @adev: amdgpu device pointer 575 * 576 * Returns true if the device is a dGPU with ACPI power control, 577 * otherwise return false. 578 */ 579 bool amdgpu_device_supports_boco(struct amdgpu_device *adev) 580 { 581 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) 582 return false; 583 584 if (adev->has_pr3 || 585 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) 586 return true; 587 return false; 588 } 589 590 /** 591 * amdgpu_device_supports_baco - Does the device support BACO 592 * 593 * @adev: amdgpu device pointer 594 * 595 * Return: 596 * 1 if the device supports BACO; 597 * 3 if the device supports MACO (only works if BACO is supported) 598 * otherwise return 0. 599 */ 600 int amdgpu_device_supports_baco(struct amdgpu_device *adev) 601 { 602 return amdgpu_asic_supports_baco(adev); 603 } 604 605 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) 606 { 607 int bamaco_support; 608 609 adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; 610 bamaco_support = amdgpu_device_supports_baco(adev); 611 612 switch (amdgpu_runtime_pm) { 613 case 2: 614 if (bamaco_support & MACO_SUPPORT) { 615 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; 616 dev_info(adev->dev, "Forcing BAMACO for runtime pm\n"); 617 } else if (bamaco_support == BACO_SUPPORT) { 618 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 619 dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n"); 620 } 621 break; 622 case 1: 623 if (bamaco_support & BACO_SUPPORT) { 624 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 625 dev_info(adev->dev, "Forcing BACO for runtime pm\n"); 626 } 627 break; 628 case -1: 629 case -2: 630 if (amdgpu_device_supports_px(adev)) { 631 /* enable PX as runtime mode */ 632 adev->pm.rpm_mode = AMDGPU_RUNPM_PX; 633 dev_info(adev->dev, "Using ATPX for runtime pm\n"); 634 } else if (amdgpu_device_supports_boco(adev)) { 635 /* enable boco as runtime mode */ 636 adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; 637 dev_info(adev->dev, "Using BOCO for runtime pm\n"); 638 } else { 639 if (!bamaco_support) 640 goto no_runtime_pm; 641 642 switch (adev->asic_type) { 643 case CHIP_VEGA20: 644 case CHIP_ARCTURUS: 645 /* BACO are not supported on vega20 and arctrus */ 646 break; 647 case CHIP_VEGA10: 648 /* enable BACO as runpm mode if noretry=0 */ 649 if (!adev->gmc.noretry && !amdgpu_passthrough(adev)) 650 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 651 break; 652 default: 653 /* enable BACO as runpm mode on CI+ */ 654 if (!amdgpu_passthrough(adev)) 655 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 656 break; 657 } 658 659 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { 660 if (bamaco_support & MACO_SUPPORT) { 661 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; 662 dev_info(adev->dev, "Using BAMACO for runtime pm\n"); 663 } else { 664 dev_info(adev->dev, "Using BACO for runtime pm\n"); 665 } 666 } 667 } 668 break; 669 case 0: 670 dev_info(adev->dev, "runtime pm is manually disabled\n"); 671 break; 672 default: 673 break; 674 } 675 676 no_runtime_pm: 677 if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) 678 dev_info(adev->dev, "Runtime PM not available\n"); 679 } 680 /** 681 * amdgpu_device_supports_smart_shift - Is the device dGPU with 682 * smart shift support 683 * 684 * @adev: amdgpu device pointer 685 * 686 * Returns true if the device is a dGPU with Smart Shift support, 687 * otherwise returns false. 688 */ 689 bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev) 690 { 691 return (amdgpu_device_supports_boco(adev) && 692 amdgpu_acpi_is_power_shift_control_supported()); 693 } 694 695 /* 696 * VRAM access helper functions 697 */ 698 699 /** 700 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA 701 * 702 * @adev: amdgpu_device pointer 703 * @pos: offset of the buffer in vram 704 * @buf: virtual address of the buffer in system memory 705 * @size: read/write size, sizeof(@buf) must > @size 706 * @write: true - write to vram, otherwise - read from vram 707 */ 708 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, 709 void *buf, size_t size, bool write) 710 { 711 unsigned long flags; 712 uint32_t hi = ~0, tmp = 0; 713 uint32_t *data = buf; 714 uint64_t last; 715 int idx; 716 717 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 718 return; 719 720 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4)); 721 722 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 723 for (last = pos + size; pos < last; pos += 4) { 724 tmp = pos >> 31; 725 726 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); 727 if (tmp != hi) { 728 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); 729 hi = tmp; 730 } 731 if (write) 732 WREG32_NO_KIQ(mmMM_DATA, *data++); 733 else 734 *data++ = RREG32_NO_KIQ(mmMM_DATA); 735 } 736 737 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 738 drm_dev_exit(idx); 739 } 740 741 /** 742 * amdgpu_device_aper_access - access vram by vram aperture 743 * 744 * @adev: amdgpu_device pointer 745 * @pos: offset of the buffer in vram 746 * @buf: virtual address of the buffer in system memory 747 * @size: read/write size, sizeof(@buf) must > @size 748 * @write: true - write to vram, otherwise - read from vram 749 * 750 * The return value means how many bytes have been transferred. 751 */ 752 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, 753 void *buf, size_t size, bool write) 754 { 755 #ifdef CONFIG_64BIT 756 void __iomem *addr; 757 size_t count = 0; 758 uint64_t last; 759 760 if (!adev->mman.aper_base_kaddr) 761 return 0; 762 763 last = min(pos + size, adev->gmc.visible_vram_size); 764 if (last > pos) { 765 addr = adev->mman.aper_base_kaddr + pos; 766 count = last - pos; 767 768 if (write) { 769 memcpy_toio(addr, buf, count); 770 /* Make sure HDP write cache flush happens without any reordering 771 * after the system memory contents are sent over PCIe device 772 */ 773 mb(); 774 amdgpu_device_flush_hdp(adev, NULL); 775 } else { 776 amdgpu_device_invalidate_hdp(adev, NULL); 777 /* Make sure HDP read cache is invalidated before issuing a read 778 * to the PCIe device 779 */ 780 mb(); 781 memcpy_fromio(buf, addr, count); 782 } 783 784 } 785 786 return count; 787 #else 788 return 0; 789 #endif 790 } 791 792 /** 793 * amdgpu_device_vram_access - read/write a buffer in vram 794 * 795 * @adev: amdgpu_device pointer 796 * @pos: offset of the buffer in vram 797 * @buf: virtual address of the buffer in system memory 798 * @size: read/write size, sizeof(@buf) must > @size 799 * @write: true - write to vram, otherwise - read from vram 800 */ 801 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 802 void *buf, size_t size, bool write) 803 { 804 size_t count; 805 806 /* try to using vram apreature to access vram first */ 807 count = amdgpu_device_aper_access(adev, pos, buf, size, write); 808 size -= count; 809 if (size) { 810 /* using MM to access rest vram */ 811 pos += count; 812 buf += count; 813 amdgpu_device_mm_access(adev, pos, buf, size, write); 814 } 815 } 816 817 /* 818 * register access helper functions. 819 */ 820 821 /* Check if hw access should be skipped because of hotplug or device error */ 822 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) 823 { 824 if (adev->no_hw_access) 825 return true; 826 827 #ifdef CONFIG_LOCKDEP 828 /* 829 * This is a bit complicated to understand, so worth a comment. What we assert 830 * here is that the GPU reset is not running on another thread in parallel. 831 * 832 * For this we trylock the read side of the reset semaphore, if that succeeds 833 * we know that the reset is not running in parallel. 834 * 835 * If the trylock fails we assert that we are either already holding the read 836 * side of the lock or are the reset thread itself and hold the write side of 837 * the lock. 838 */ 839 if (in_task()) { 840 if (down_read_trylock(&adev->reset_domain->sem)) 841 up_read(&adev->reset_domain->sem); 842 else 843 lockdep_assert_held(&adev->reset_domain->sem); 844 } 845 #endif 846 return false; 847 } 848 849 /** 850 * amdgpu_device_get_rev_id - query device rev_id 851 * 852 * @adev: amdgpu_device pointer 853 * 854 * Return device rev_id 855 */ 856 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev) 857 { 858 return adev->nbio.funcs->get_rev_id(adev); 859 } 860 861 static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev) 862 { 863 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) 864 return AMDGPU_VBIOS_SKIP; 865 866 if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev)) 867 return AMDGPU_VBIOS_OPTIONAL; 868 869 return 0; 870 } 871 872 /** 873 * amdgpu_device_asic_init - Wrapper for atom asic_init 874 * 875 * @adev: amdgpu_device pointer 876 * 877 * Does any asic specific work and then calls atom asic init. 878 */ 879 static int amdgpu_device_asic_init(struct amdgpu_device *adev) 880 { 881 uint32_t flags; 882 bool optional; 883 int ret; 884 885 amdgpu_asic_pre_asic_init(adev); 886 flags = amdgpu_device_get_vbios_flags(adev); 887 optional = !!(flags & (AMDGPU_VBIOS_OPTIONAL | AMDGPU_VBIOS_SKIP)); 888 889 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || 890 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) || 891 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) || 892 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { 893 amdgpu_psp_wait_for_bootloader(adev); 894 if (optional && !adev->bios) 895 return 0; 896 897 ret = amdgpu_atomfirmware_asic_init(adev, true); 898 return ret; 899 } else { 900 if (optional && !adev->bios) 901 return 0; 902 903 return amdgpu_atom_asic_init(adev->mode_info.atom_context); 904 } 905 906 return 0; 907 } 908 909 /** 910 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page 911 * 912 * @adev: amdgpu_device pointer 913 * 914 * Allocates a scratch page of VRAM for use by various things in the 915 * driver. 916 */ 917 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev) 918 { 919 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE, 920 AMDGPU_GEM_DOMAIN_VRAM | 921 AMDGPU_GEM_DOMAIN_GTT, 922 &adev->mem_scratch.robj, 923 &adev->mem_scratch.gpu_addr, 924 (void **)&adev->mem_scratch.ptr); 925 } 926 927 /** 928 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page 929 * 930 * @adev: amdgpu_device pointer 931 * 932 * Frees the VRAM scratch page. 933 */ 934 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev) 935 { 936 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL); 937 } 938 939 /** 940 * amdgpu_device_program_register_sequence - program an array of registers. 941 * 942 * @adev: amdgpu_device pointer 943 * @registers: pointer to the register array 944 * @array_size: size of the register array 945 * 946 * Programs an array or registers with and or masks. 947 * This is a helper for setting golden registers. 948 */ 949 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 950 const u32 *registers, 951 const u32 array_size) 952 { 953 u32 tmp, reg, and_mask, or_mask; 954 int i; 955 956 if (array_size % 3) 957 return; 958 959 for (i = 0; i < array_size; i += 3) { 960 reg = registers[i + 0]; 961 and_mask = registers[i + 1]; 962 or_mask = registers[i + 2]; 963 964 if (and_mask == 0xffffffff) { 965 tmp = or_mask; 966 } else { 967 tmp = RREG32(reg); 968 tmp &= ~and_mask; 969 if (adev->family >= AMDGPU_FAMILY_AI) 970 tmp |= (or_mask & and_mask); 971 else 972 tmp |= or_mask; 973 } 974 WREG32(reg, tmp); 975 } 976 } 977 978 /** 979 * amdgpu_device_pci_config_reset - reset the GPU 980 * 981 * @adev: amdgpu_device pointer 982 * 983 * Resets the GPU using the pci config reset sequence. 984 * Only applicable to asics prior to vega10. 985 */ 986 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) 987 { 988 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 989 } 990 991 /** 992 * amdgpu_device_pci_reset - reset the GPU using generic PCI means 993 * 994 * @adev: amdgpu_device pointer 995 * 996 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.). 997 */ 998 int amdgpu_device_pci_reset(struct amdgpu_device *adev) 999 { 1000 return pci_reset_function(adev->pdev); 1001 } 1002 1003 /* 1004 * amdgpu_device_wb_*() 1005 * Writeback is the method by which the GPU updates special pages in memory 1006 * with the status of certain GPU events (fences, ring pointers,etc.). 1007 */ 1008 1009 /** 1010 * amdgpu_device_wb_fini - Disable Writeback and free memory 1011 * 1012 * @adev: amdgpu_device pointer 1013 * 1014 * Disables Writeback and frees the Writeback memory (all asics). 1015 * Used at driver shutdown. 1016 */ 1017 static void amdgpu_device_wb_fini(struct amdgpu_device *adev) 1018 { 1019 if (adev->wb.wb_obj) { 1020 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 1021 &adev->wb.gpu_addr, 1022 (void **)&adev->wb.wb); 1023 adev->wb.wb_obj = NULL; 1024 } 1025 } 1026 1027 /** 1028 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory 1029 * 1030 * @adev: amdgpu_device pointer 1031 * 1032 * Initializes writeback and allocates writeback memory (all asics). 1033 * Used at driver startup. 1034 * Returns 0 on success or an -error on failure. 1035 */ 1036 static int amdgpu_device_wb_init(struct amdgpu_device *adev) 1037 { 1038 int r; 1039 1040 if (adev->wb.wb_obj == NULL) { 1041 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 1042 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 1043 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1044 &adev->wb.wb_obj, &adev->wb.gpu_addr, 1045 (void **)&adev->wb.wb); 1046 if (r) { 1047 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 1048 return r; 1049 } 1050 1051 adev->wb.num_wb = AMDGPU_MAX_WB; 1052 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 1053 1054 /* clear wb memory */ 1055 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); 1056 } 1057 1058 return 0; 1059 } 1060 1061 /** 1062 * amdgpu_device_wb_get - Allocate a wb entry 1063 * 1064 * @adev: amdgpu_device pointer 1065 * @wb: wb index 1066 * 1067 * Allocate a wb slot for use by the driver (all asics). 1068 * Returns 0 on success or -EINVAL on failure. 1069 */ 1070 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) 1071 { 1072 unsigned long flags, offset; 1073 1074 spin_lock_irqsave(&adev->wb.lock, flags); 1075 offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 1076 if (offset < adev->wb.num_wb) { 1077 __set_bit(offset, adev->wb.used); 1078 spin_unlock_irqrestore(&adev->wb.lock, flags); 1079 *wb = offset << 3; /* convert to dw offset */ 1080 return 0; 1081 } else { 1082 spin_unlock_irqrestore(&adev->wb.lock, flags); 1083 return -EINVAL; 1084 } 1085 } 1086 1087 /** 1088 * amdgpu_device_wb_free - Free a wb entry 1089 * 1090 * @adev: amdgpu_device pointer 1091 * @wb: wb index 1092 * 1093 * Free a wb slot allocated for use by the driver (all asics) 1094 */ 1095 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 1096 { 1097 unsigned long flags; 1098 1099 wb >>= 3; 1100 spin_lock_irqsave(&adev->wb.lock, flags); 1101 if (wb < adev->wb.num_wb) 1102 __clear_bit(wb, adev->wb.used); 1103 spin_unlock_irqrestore(&adev->wb.lock, flags); 1104 } 1105 1106 /** 1107 * amdgpu_device_resize_fb_bar - try to resize FB BAR 1108 * 1109 * @adev: amdgpu_device pointer 1110 * 1111 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not 1112 * to fail, but if any of the BARs is not accessible after the size we abort 1113 * driver loading by returning -ENODEV. 1114 */ 1115 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) 1116 { 1117 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); 1118 struct pci_bus *root; 1119 struct resource *res; 1120 int max_size, r; 1121 unsigned int i; 1122 u16 cmd; 1123 1124 if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) 1125 return 0; 1126 1127 /* Bypass for VF */ 1128 if (amdgpu_sriov_vf(adev)) 1129 return 0; 1130 1131 if (!amdgpu_rebar) 1132 return 0; 1133 1134 /* resizing on Dell G5 SE platforms causes problems with runtime pm */ 1135 if ((amdgpu_runtime_pm != 0) && 1136 adev->pdev->vendor == PCI_VENDOR_ID_ATI && 1137 adev->pdev->device == 0x731f && 1138 adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 1139 return 0; 1140 1141 /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ 1142 if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) 1143 dev_warn( 1144 adev->dev, 1145 "System can't access extended configuration space, please check!!\n"); 1146 1147 /* skip if the bios has already enabled large BAR */ 1148 if (adev->gmc.real_vram_size && 1149 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) 1150 return 0; 1151 1152 /* Check if the root BUS has 64bit memory resources */ 1153 root = adev->pdev->bus; 1154 while (root->parent) 1155 root = root->parent; 1156 1157 pci_bus_for_each_resource(root, res, i) { 1158 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && 1159 res->start > 0x100000000ull) 1160 break; 1161 } 1162 1163 /* Trying to resize is pointless without a root hub window above 4GB */ 1164 if (!res) 1165 return 0; 1166 1167 /* Limit the BAR size to what is available */ 1168 max_size = pci_rebar_get_max_size(adev->pdev, 0); 1169 if (max_size < 0) 1170 return 0; 1171 rbar_size = min(max_size, rbar_size); 1172 1173 /* Disable memory decoding while we change the BAR addresses and size */ 1174 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); 1175 pci_write_config_word(adev->pdev, PCI_COMMAND, 1176 cmd & ~PCI_COMMAND_MEMORY); 1177 1178 /* Tear down doorbell as resizing will release BARs */ 1179 amdgpu_doorbell_fini(adev); 1180 1181 r = pci_resize_resource(adev->pdev, 0, rbar_size, 1182 (adev->asic_type >= CHIP_BONAIRE) ? 1 << 5 1183 : 1 << 2); 1184 if (r == -ENOSPC) 1185 dev_info(adev->dev, 1186 "Not enough PCI address space for a large BAR."); 1187 else if (r && r != -ENOTSUPP) 1188 dev_err(adev->dev, "Problem resizing BAR0 (%d).", r); 1189 1190 /* When the doorbell or fb BAR isn't available we have no chance of 1191 * using the device. 1192 */ 1193 r = amdgpu_doorbell_init(adev); 1194 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) 1195 return -ENODEV; 1196 1197 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); 1198 1199 return 0; 1200 } 1201 1202 /* 1203 * GPU helpers function. 1204 */ 1205 /** 1206 * amdgpu_device_need_post - check if the hw need post or not 1207 * 1208 * @adev: amdgpu_device pointer 1209 * 1210 * Check if the asic has been initialized (all asics) at driver startup 1211 * or post is needed if hw reset is performed. 1212 * Returns true if need or false if not. 1213 */ 1214 bool amdgpu_device_need_post(struct amdgpu_device *adev) 1215 { 1216 uint32_t reg, flags; 1217 1218 if (amdgpu_sriov_vf(adev)) 1219 return false; 1220 1221 flags = amdgpu_device_get_vbios_flags(adev); 1222 if (flags & AMDGPU_VBIOS_SKIP) 1223 return false; 1224 if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios) 1225 return false; 1226 1227 if (amdgpu_passthrough(adev)) { 1228 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 1229 * some old smc fw still need driver do vPost otherwise gpu hang, while 1230 * those smc fw version above 22.15 doesn't have this flaw, so we force 1231 * vpost executed for smc version below 22.15 1232 */ 1233 if (adev->asic_type == CHIP_FIJI) { 1234 int err; 1235 uint32_t fw_ver; 1236 1237 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 1238 /* force vPost if error occurred */ 1239 if (err) 1240 return true; 1241 1242 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 1243 release_firmware(adev->pm.fw); 1244 if (fw_ver < 0x00160e00) 1245 return true; 1246 } 1247 } 1248 1249 /* Don't post if we need to reset whole hive on init */ 1250 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) 1251 return false; 1252 1253 if (adev->has_hw_reset) { 1254 adev->has_hw_reset = false; 1255 return true; 1256 } 1257 1258 /* bios scratch used on CIK+ */ 1259 if (adev->asic_type >= CHIP_BONAIRE) 1260 return amdgpu_atombios_scratch_need_asic_init(adev); 1261 1262 /* check MEM_SIZE for older asics */ 1263 reg = amdgpu_asic_get_config_memsize(adev); 1264 1265 if ((reg != 0) && (reg != 0xffffffff)) 1266 return false; 1267 1268 return true; 1269 } 1270 1271 /* 1272 * Check whether seamless boot is supported. 1273 * 1274 * So far we only support seamless boot on DCE 3.0 or later. 1275 * If users report that it works on older ASICS as well, we may 1276 * loosen this. 1277 */ 1278 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) 1279 { 1280 switch (amdgpu_seamless) { 1281 case -1: 1282 break; 1283 case 1: 1284 return true; 1285 case 0: 1286 return false; 1287 default: 1288 dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n", 1289 amdgpu_seamless); 1290 return false; 1291 } 1292 1293 if (!(adev->flags & AMD_IS_APU)) 1294 return false; 1295 1296 if (adev->mman.keep_stolen_vga_memory) 1297 return false; 1298 1299 return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0); 1300 } 1301 1302 /* 1303 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids 1304 * don't support dynamic speed switching. Until we have confirmation from Intel 1305 * that a specific host supports it, it's safer that we keep it disabled for all. 1306 * 1307 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/ 1308 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663 1309 */ 1310 static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev) 1311 { 1312 #if IS_ENABLED(CONFIG_X86) 1313 struct cpuinfo_x86 *c = &cpu_data(0); 1314 1315 /* eGPU change speeds based on USB4 fabric conditions */ 1316 if (dev_is_removable(adev->dev)) 1317 return true; 1318 1319 if (c->x86_vendor == X86_VENDOR_INTEL) 1320 return false; 1321 #endif 1322 return true; 1323 } 1324 1325 static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev) 1326 { 1327 /* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4. 1328 * It's unclear if this is a platform-specific or GPU-specific issue. 1329 * Disable ASPM on SI for the time being. 1330 */ 1331 if (adev->family == AMDGPU_FAMILY_SI) 1332 return true; 1333 1334 #if IS_ENABLED(CONFIG_X86) 1335 struct cpuinfo_x86 *c = &cpu_data(0); 1336 1337 if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) || 1338 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1))) 1339 return false; 1340 1341 if (c->x86 == 6 && 1342 adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) { 1343 switch (c->x86_model) { 1344 case VFM_MODEL(INTEL_ALDERLAKE): 1345 case VFM_MODEL(INTEL_ALDERLAKE_L): 1346 case VFM_MODEL(INTEL_RAPTORLAKE): 1347 case VFM_MODEL(INTEL_RAPTORLAKE_P): 1348 case VFM_MODEL(INTEL_RAPTORLAKE_S): 1349 return true; 1350 default: 1351 return false; 1352 } 1353 } else { 1354 return false; 1355 } 1356 #else 1357 return false; 1358 #endif 1359 } 1360 1361 /** 1362 * amdgpu_device_should_use_aspm - check if the device should program ASPM 1363 * 1364 * @adev: amdgpu_device pointer 1365 * 1366 * Confirm whether the module parameter and pcie bridge agree that ASPM should 1367 * be set for this device. 1368 * 1369 * Returns true if it should be used or false if not. 1370 */ 1371 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) 1372 { 1373 switch (amdgpu_aspm) { 1374 case -1: 1375 break; 1376 case 0: 1377 return false; 1378 case 1: 1379 return true; 1380 default: 1381 return false; 1382 } 1383 if (adev->flags & AMD_IS_APU) 1384 return false; 1385 if (amdgpu_device_aspm_support_quirk(adev)) 1386 return false; 1387 return pcie_aspm_enabled(adev->pdev); 1388 } 1389 1390 /* if we get transitioned to only one device, take VGA back */ 1391 /** 1392 * amdgpu_device_vga_set_decode - enable/disable vga decode 1393 * 1394 * @pdev: PCI device pointer 1395 * @state: enable/disable vga decode 1396 * 1397 * Enable/disable vga decode (all asics). 1398 * Returns VGA resource flags. 1399 */ 1400 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, 1401 bool state) 1402 { 1403 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); 1404 1405 amdgpu_asic_set_vga_state(adev, state); 1406 if (state) 1407 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1408 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1409 else 1410 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1411 } 1412 1413 /** 1414 * amdgpu_device_check_block_size - validate the vm block size 1415 * 1416 * @adev: amdgpu_device pointer 1417 * 1418 * Validates the vm block size specified via module parameter. 1419 * The vm block size defines number of bits in page table versus page directory, 1420 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1421 * page table and the remaining bits are in the page directory. 1422 */ 1423 static void amdgpu_device_check_block_size(struct amdgpu_device *adev) 1424 { 1425 /* defines number of bits in page table versus page directory, 1426 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1427 * page table and the remaining bits are in the page directory 1428 */ 1429 if (amdgpu_vm_block_size == -1) 1430 return; 1431 1432 if (amdgpu_vm_block_size < 9) { 1433 dev_warn(adev->dev, "VM page table size (%d) too small\n", 1434 amdgpu_vm_block_size); 1435 amdgpu_vm_block_size = -1; 1436 } 1437 } 1438 1439 /** 1440 * amdgpu_device_check_vm_size - validate the vm size 1441 * 1442 * @adev: amdgpu_device pointer 1443 * 1444 * Validates the vm size in GB specified via module parameter. 1445 * The VM size is the size of the GPU virtual memory space in GB. 1446 */ 1447 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) 1448 { 1449 /* no need to check the default value */ 1450 if (amdgpu_vm_size == -1) 1451 return; 1452 1453 if (amdgpu_vm_size < 1) { 1454 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 1455 amdgpu_vm_size); 1456 amdgpu_vm_size = -1; 1457 } 1458 } 1459 1460 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) 1461 { 1462 struct sysinfo si; 1463 bool is_os_64 = (sizeof(void *) == 8); 1464 uint64_t total_memory; 1465 uint64_t dram_size_seven_GB = 0x1B8000000; 1466 uint64_t dram_size_three_GB = 0xB8000000; 1467 1468 if (amdgpu_smu_memory_pool_size == 0) 1469 return; 1470 1471 if (!is_os_64) { 1472 dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n"); 1473 goto def_value; 1474 } 1475 si_meminfo(&si); 1476 total_memory = (uint64_t)si.totalram * si.mem_unit; 1477 1478 if ((amdgpu_smu_memory_pool_size == 1) || 1479 (amdgpu_smu_memory_pool_size == 2)) { 1480 if (total_memory < dram_size_three_GB) 1481 goto def_value1; 1482 } else if ((amdgpu_smu_memory_pool_size == 4) || 1483 (amdgpu_smu_memory_pool_size == 8)) { 1484 if (total_memory < dram_size_seven_GB) 1485 goto def_value1; 1486 } else { 1487 dev_warn(adev->dev, "Smu memory pool size not supported\n"); 1488 goto def_value; 1489 } 1490 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; 1491 1492 return; 1493 1494 def_value1: 1495 dev_warn(adev->dev, "No enough system memory\n"); 1496 def_value: 1497 adev->pm.smu_prv_buffer_size = 0; 1498 } 1499 1500 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) 1501 { 1502 if (!(adev->flags & AMD_IS_APU) || 1503 adev->asic_type < CHIP_RAVEN) 1504 return 0; 1505 1506 switch (adev->asic_type) { 1507 case CHIP_RAVEN: 1508 if (adev->pdev->device == 0x15dd) 1509 adev->apu_flags |= AMD_APU_IS_RAVEN; 1510 if (adev->pdev->device == 0x15d8) 1511 adev->apu_flags |= AMD_APU_IS_PICASSO; 1512 break; 1513 case CHIP_RENOIR: 1514 if ((adev->pdev->device == 0x1636) || 1515 (adev->pdev->device == 0x164c)) 1516 adev->apu_flags |= AMD_APU_IS_RENOIR; 1517 else 1518 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; 1519 break; 1520 case CHIP_VANGOGH: 1521 adev->apu_flags |= AMD_APU_IS_VANGOGH; 1522 break; 1523 case CHIP_YELLOW_CARP: 1524 break; 1525 case CHIP_CYAN_SKILLFISH: 1526 if ((adev->pdev->device == 0x13FE) || 1527 (adev->pdev->device == 0x143F)) 1528 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; 1529 break; 1530 default: 1531 break; 1532 } 1533 1534 return 0; 1535 } 1536 1537 /** 1538 * amdgpu_device_check_arguments - validate module params 1539 * 1540 * @adev: amdgpu_device pointer 1541 * 1542 * Validates certain module parameters and updates 1543 * the associated values used by the driver (all asics). 1544 */ 1545 static int amdgpu_device_check_arguments(struct amdgpu_device *adev) 1546 { 1547 int i; 1548 1549 if (amdgpu_sched_jobs < 4) { 1550 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1551 amdgpu_sched_jobs); 1552 amdgpu_sched_jobs = 4; 1553 } else if (!is_power_of_2(amdgpu_sched_jobs)) { 1554 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 1555 amdgpu_sched_jobs); 1556 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1557 } 1558 1559 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1560 /* gart size must be greater or equal to 32M */ 1561 dev_warn(adev->dev, "gart size (%d) too small\n", 1562 amdgpu_gart_size); 1563 amdgpu_gart_size = -1; 1564 } 1565 1566 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 1567 /* gtt size must be greater or equal to 32M */ 1568 dev_warn(adev->dev, "gtt size (%d) too small\n", 1569 amdgpu_gtt_size); 1570 amdgpu_gtt_size = -1; 1571 } 1572 1573 /* valid range is between 4 and 9 inclusive */ 1574 if (amdgpu_vm_fragment_size != -1 && 1575 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 1576 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 1577 amdgpu_vm_fragment_size = -1; 1578 } 1579 1580 if (amdgpu_sched_hw_submission < 2) { 1581 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", 1582 amdgpu_sched_hw_submission); 1583 amdgpu_sched_hw_submission = 2; 1584 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) { 1585 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", 1586 amdgpu_sched_hw_submission); 1587 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission); 1588 } 1589 1590 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) { 1591 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n"); 1592 amdgpu_reset_method = -1; 1593 } 1594 1595 amdgpu_device_check_smu_prv_buffer_size(adev); 1596 1597 amdgpu_device_check_vm_size(adev); 1598 1599 amdgpu_device_check_block_size(adev); 1600 1601 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1602 1603 for (i = 0; i < MAX_XCP; i++) { 1604 switch (amdgpu_enforce_isolation) { 1605 case -1: 1606 case 0: 1607 default: 1608 /* disable */ 1609 adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE; 1610 break; 1611 case 1: 1612 /* enable */ 1613 adev->enforce_isolation[i] = 1614 AMDGPU_ENFORCE_ISOLATION_ENABLE; 1615 break; 1616 case 2: 1617 /* enable legacy mode */ 1618 adev->enforce_isolation[i] = 1619 AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY; 1620 break; 1621 case 3: 1622 /* enable only process isolation without submitting cleaner shader */ 1623 adev->enforce_isolation[i] = 1624 AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER; 1625 break; 1626 } 1627 } 1628 1629 return 0; 1630 } 1631 1632 /** 1633 * amdgpu_switcheroo_set_state - set switcheroo state 1634 * 1635 * @pdev: pci dev pointer 1636 * @state: vga_switcheroo state 1637 * 1638 * Callback for the switcheroo driver. Suspends or resumes 1639 * the asics before or after it is powered up using ACPI methods. 1640 */ 1641 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, 1642 enum vga_switcheroo_state state) 1643 { 1644 struct drm_device *dev = pci_get_drvdata(pdev); 1645 int r; 1646 1647 if (amdgpu_device_supports_px(drm_to_adev(dev)) && 1648 state == VGA_SWITCHEROO_OFF) 1649 return; 1650 1651 if (state == VGA_SWITCHEROO_ON) { 1652 pr_info("switched on\n"); 1653 /* don't suspend or resume card normally */ 1654 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1655 1656 pci_set_power_state(pdev, PCI_D0); 1657 amdgpu_device_load_pci_state(pdev); 1658 r = pci_enable_device(pdev); 1659 if (r) 1660 dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n", 1661 r); 1662 amdgpu_device_resume(dev, true); 1663 1664 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1665 } else { 1666 dev_info(&pdev->dev, "switched off\n"); 1667 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1668 amdgpu_device_prepare(dev); 1669 amdgpu_device_suspend(dev, true); 1670 amdgpu_device_cache_pci_state(pdev); 1671 /* Shut down the device */ 1672 pci_disable_device(pdev); 1673 pci_set_power_state(pdev, PCI_D3cold); 1674 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1675 } 1676 } 1677 1678 /** 1679 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1680 * 1681 * @pdev: pci dev pointer 1682 * 1683 * Callback for the switcheroo driver. Check of the switcheroo 1684 * state can be changed. 1685 * Returns true if the state can be changed, false if not. 1686 */ 1687 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1688 { 1689 struct drm_device *dev = pci_get_drvdata(pdev); 1690 1691 /* 1692 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1693 * locking inversion with the driver load path. And the access here is 1694 * completely racy anyway. So don't bother with locking for now. 1695 */ 1696 return atomic_read(&dev->open_count) == 0; 1697 } 1698 1699 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1700 .set_gpu_state = amdgpu_switcheroo_set_state, 1701 .reprobe = NULL, 1702 .can_switch = amdgpu_switcheroo_can_switch, 1703 }; 1704 1705 /** 1706 * amdgpu_device_enable_virtual_display - enable virtual display feature 1707 * 1708 * @adev: amdgpu_device pointer 1709 * 1710 * Enabled the virtual display feature if the user has enabled it via 1711 * the module parameter virtual_display. This feature provides a virtual 1712 * display hardware on headless boards or in virtualized environments. 1713 * This function parses and validates the configuration string specified by 1714 * the user and configures the virtual display configuration (number of 1715 * virtual connectors, crtcs, etc.) specified. 1716 */ 1717 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 1718 { 1719 adev->enable_virtual_display = false; 1720 1721 if (amdgpu_virtual_display) { 1722 const char *pci_address_name = pci_name(adev->pdev); 1723 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 1724 1725 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1726 pciaddstr_tmp = pciaddstr; 1727 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 1728 pciaddname = strsep(&pciaddname_tmp, ","); 1729 if (!strcmp("all", pciaddname) 1730 || !strcmp(pci_address_name, pciaddname)) { 1731 long num_crtc; 1732 int res = -1; 1733 1734 adev->enable_virtual_display = true; 1735 1736 if (pciaddname_tmp) 1737 res = kstrtol(pciaddname_tmp, 10, 1738 &num_crtc); 1739 1740 if (!res) { 1741 if (num_crtc < 1) 1742 num_crtc = 1; 1743 if (num_crtc > 6) 1744 num_crtc = 6; 1745 adev->mode_info.num_crtc = num_crtc; 1746 } else { 1747 adev->mode_info.num_crtc = 1; 1748 } 1749 break; 1750 } 1751 } 1752 1753 dev_info( 1754 adev->dev, 1755 "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 1756 amdgpu_virtual_display, pci_address_name, 1757 adev->enable_virtual_display, adev->mode_info.num_crtc); 1758 1759 kfree(pciaddstr); 1760 } 1761 } 1762 1763 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev) 1764 { 1765 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { 1766 adev->mode_info.num_crtc = 1; 1767 adev->enable_virtual_display = true; 1768 dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n", 1769 adev->enable_virtual_display, 1770 adev->mode_info.num_crtc); 1771 } 1772 } 1773 1774 /** 1775 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware 1776 * 1777 * @adev: amdgpu_device pointer 1778 * 1779 * Parses the asic configuration parameters specified in the gpu info 1780 * firmware and makes them available to the driver for use in configuring 1781 * the asic. 1782 * Returns 0 on success, -EINVAL on failure. 1783 */ 1784 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 1785 { 1786 const char *chip_name; 1787 int err; 1788 const struct gpu_info_firmware_header_v1_0 *hdr; 1789 1790 adev->firmware.gpu_info_fw = NULL; 1791 1792 switch (adev->asic_type) { 1793 default: 1794 return 0; 1795 case CHIP_VEGA10: 1796 chip_name = "vega10"; 1797 break; 1798 case CHIP_VEGA12: 1799 chip_name = "vega12"; 1800 break; 1801 case CHIP_RAVEN: 1802 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1803 chip_name = "raven2"; 1804 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 1805 chip_name = "picasso"; 1806 else 1807 chip_name = "raven"; 1808 break; 1809 case CHIP_ARCTURUS: 1810 chip_name = "arcturus"; 1811 break; 1812 case CHIP_NAVI12: 1813 if (adev->discovery.bin) 1814 return 0; 1815 chip_name = "navi12"; 1816 break; 1817 case CHIP_CYAN_SKILLFISH: 1818 if (adev->discovery.bin) 1819 return 0; 1820 chip_name = "cyan_skillfish"; 1821 break; 1822 } 1823 1824 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, 1825 AMDGPU_UCODE_OPTIONAL, 1826 "amdgpu/%s_gpu_info.bin", chip_name); 1827 if (err) { 1828 dev_err(adev->dev, 1829 "Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n", 1830 chip_name); 1831 goto out; 1832 } 1833 1834 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 1835 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 1836 1837 switch (hdr->version_major) { 1838 case 1: 1839 { 1840 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 1841 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 1842 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1843 1844 /* 1845 * Should be dropped when DAL no longer needs it. 1846 */ 1847 if (adev->asic_type == CHIP_NAVI12) 1848 goto parse_soc_bounding_box; 1849 1850 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 1851 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 1852 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 1853 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 1854 adev->gfx.config.max_texture_channel_caches = 1855 le32_to_cpu(gpu_info_fw->gc_num_tccs); 1856 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 1857 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 1858 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 1859 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 1860 adev->gfx.config.double_offchip_lds_buf = 1861 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 1862 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 1863 adev->gfx.cu_info.max_waves_per_simd = 1864 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 1865 adev->gfx.cu_info.max_scratch_slots_per_cu = 1866 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 1867 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 1868 if (hdr->version_minor >= 1) { 1869 const struct gpu_info_firmware_v1_1 *gpu_info_fw = 1870 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + 1871 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1872 adev->gfx.config.num_sc_per_sh = 1873 le32_to_cpu(gpu_info_fw->num_sc_per_sh); 1874 adev->gfx.config.num_packer_per_sc = 1875 le32_to_cpu(gpu_info_fw->num_packer_per_sc); 1876 } 1877 1878 parse_soc_bounding_box: 1879 /* 1880 * soc bounding box info is not integrated in disocovery table, 1881 * we always need to parse it from gpu info firmware if needed. 1882 */ 1883 if (hdr->version_minor == 2) { 1884 const struct gpu_info_firmware_v1_2 *gpu_info_fw = 1885 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + 1886 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1887 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; 1888 } 1889 break; 1890 } 1891 default: 1892 dev_err(adev->dev, 1893 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 1894 err = -EINVAL; 1895 goto out; 1896 } 1897 out: 1898 return err; 1899 } 1900 1901 static void amdgpu_uid_init(struct amdgpu_device *adev) 1902 { 1903 /* Initialize the UID for the device */ 1904 adev->uid_info = kzalloc_obj(struct amdgpu_uid); 1905 if (!adev->uid_info) { 1906 dev_warn(adev->dev, "Failed to allocate memory for UID\n"); 1907 return; 1908 } 1909 adev->uid_info->adev = adev; 1910 } 1911 1912 static void amdgpu_uid_fini(struct amdgpu_device *adev) 1913 { 1914 /* Free the UID memory */ 1915 kfree(adev->uid_info); 1916 adev->uid_info = NULL; 1917 } 1918 1919 /** 1920 * amdgpu_device_ip_early_init - run early init for hardware IPs 1921 * 1922 * @adev: amdgpu_device pointer 1923 * 1924 * Early initialization pass for hardware IPs. The hardware IPs that make 1925 * up each asic are discovered each IP's early_init callback is run. This 1926 * is the first stage in initializing the asic. 1927 * Returns 0 on success, negative error code on failure. 1928 */ 1929 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) 1930 { 1931 struct amdgpu_ip_block *ip_block; 1932 struct pci_dev *parent; 1933 bool total, skip_bios; 1934 uint32_t bios_flags; 1935 int i, r; 1936 1937 amdgpu_device_enable_virtual_display(adev); 1938 1939 if (amdgpu_sriov_vf(adev)) { 1940 r = amdgpu_virt_request_full_gpu(adev, true); 1941 if (r) 1942 return r; 1943 1944 r = amdgpu_virt_init_critical_region(adev); 1945 if (r) 1946 return r; 1947 } 1948 1949 switch (adev->asic_type) { 1950 #ifdef CONFIG_DRM_AMDGPU_SI 1951 case CHIP_VERDE: 1952 case CHIP_TAHITI: 1953 case CHIP_PITCAIRN: 1954 case CHIP_OLAND: 1955 case CHIP_HAINAN: 1956 adev->family = AMDGPU_FAMILY_SI; 1957 r = si_set_ip_blocks(adev); 1958 if (r) 1959 return r; 1960 break; 1961 #endif 1962 #ifdef CONFIG_DRM_AMDGPU_CIK 1963 case CHIP_BONAIRE: 1964 case CHIP_HAWAII: 1965 case CHIP_KAVERI: 1966 case CHIP_KABINI: 1967 case CHIP_MULLINS: 1968 if (adev->flags & AMD_IS_APU) 1969 adev->family = AMDGPU_FAMILY_KV; 1970 else 1971 adev->family = AMDGPU_FAMILY_CI; 1972 1973 r = cik_set_ip_blocks(adev); 1974 if (r) 1975 return r; 1976 break; 1977 #endif 1978 case CHIP_TOPAZ: 1979 case CHIP_TONGA: 1980 case CHIP_FIJI: 1981 case CHIP_POLARIS10: 1982 case CHIP_POLARIS11: 1983 case CHIP_POLARIS12: 1984 case CHIP_VEGAM: 1985 case CHIP_CARRIZO: 1986 case CHIP_STONEY: 1987 if (adev->flags & AMD_IS_APU) 1988 adev->family = AMDGPU_FAMILY_CZ; 1989 else 1990 adev->family = AMDGPU_FAMILY_VI; 1991 1992 r = vi_set_ip_blocks(adev); 1993 if (r) 1994 return r; 1995 break; 1996 default: 1997 r = amdgpu_discovery_set_ip_blocks(adev); 1998 if (r) { 1999 adev->num_ip_blocks = 0; 2000 return r; 2001 } 2002 break; 2003 } 2004 2005 /* Check for IP version 9.4.3 with A0 hardware */ 2006 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && 2007 !amdgpu_device_get_rev_id(adev)) { 2008 dev_err(adev->dev, "Unsupported A0 hardware\n"); 2009 return -ENODEV; /* device unsupported - no device error */ 2010 } 2011 2012 if (amdgpu_has_atpx() && 2013 (amdgpu_is_atpx_hybrid() || 2014 amdgpu_has_atpx_dgpu_power_cntl()) && 2015 ((adev->flags & AMD_IS_APU) == 0) && 2016 !dev_is_removable(&adev->pdev->dev)) 2017 adev->flags |= AMD_IS_PX; 2018 2019 if (!(adev->flags & AMD_IS_APU)) { 2020 parent = pcie_find_root_port(adev->pdev); 2021 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; 2022 } 2023 2024 adev->pm.pp_feature = amdgpu_pp_feature_mask; 2025 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) 2026 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2027 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) 2028 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; 2029 if (!amdgpu_device_pcie_dynamic_switching_supported(adev)) 2030 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; 2031 2032 adev->virt.is_xgmi_node_migrate_enabled = false; 2033 if (amdgpu_sriov_vf(adev)) { 2034 adev->virt.is_xgmi_node_migrate_enabled = 2035 amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4); 2036 } 2037 2038 total = true; 2039 for (i = 0; i < adev->num_ip_blocks; i++) { 2040 ip_block = &adev->ip_blocks[i]; 2041 2042 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 2043 dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i, 2044 adev->ip_blocks[i].version->funcs->name); 2045 adev->ip_blocks[i].status.valid = false; 2046 } else if (ip_block->version->funcs->early_init) { 2047 r = ip_block->version->funcs->early_init(ip_block); 2048 if (r == -ENOENT) { 2049 adev->ip_blocks[i].status.valid = false; 2050 } else if (r) { 2051 dev_err(adev->dev, 2052 "early_init of IP block <%s> failed %d\n", 2053 adev->ip_blocks[i].version->funcs->name, 2054 r); 2055 total = false; 2056 } else { 2057 adev->ip_blocks[i].status.valid = true; 2058 } 2059 } else { 2060 adev->ip_blocks[i].status.valid = true; 2061 } 2062 /* get the vbios after the asic_funcs are set up */ 2063 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 2064 r = amdgpu_device_parse_gpu_info_fw(adev); 2065 if (r) 2066 return r; 2067 2068 bios_flags = amdgpu_device_get_vbios_flags(adev); 2069 skip_bios = !!(bios_flags & AMDGPU_VBIOS_SKIP); 2070 /* Read BIOS */ 2071 if (!skip_bios) { 2072 bool optional = 2073 !!(bios_flags & AMDGPU_VBIOS_OPTIONAL); 2074 if (!amdgpu_get_bios(adev) && !optional) 2075 return -EINVAL; 2076 2077 if (optional && !adev->bios) 2078 dev_info( 2079 adev->dev, 2080 "VBIOS image optional, proceeding without VBIOS image"); 2081 2082 if (adev->bios) { 2083 r = amdgpu_atombios_init(adev); 2084 if (r) { 2085 dev_err(adev->dev, 2086 "amdgpu_atombios_init failed\n"); 2087 amdgpu_vf_error_put( 2088 adev, 2089 AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 2090 0, 0); 2091 return r; 2092 } 2093 } 2094 } 2095 2096 /*get pf2vf msg info at it's earliest time*/ 2097 if (amdgpu_sriov_vf(adev)) 2098 amdgpu_virt_init_data_exchange(adev); 2099 2100 } 2101 } 2102 if (!total) 2103 return -ENODEV; 2104 2105 if (adev->gmc.xgmi.supported) 2106 amdgpu_xgmi_early_init(adev); 2107 2108 if (amdgpu_is_multi_aid(adev)) 2109 amdgpu_uid_init(adev); 2110 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); 2111 if (ip_block->status.valid != false) 2112 amdgpu_amdkfd_device_probe(adev); 2113 2114 adev->cg_flags &= amdgpu_cg_mask; 2115 adev->pg_flags &= amdgpu_pg_mask; 2116 2117 return 0; 2118 } 2119 2120 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) 2121 { 2122 int i, r; 2123 2124 for (i = 0; i < adev->num_ip_blocks; i++) { 2125 if (!adev->ip_blocks[i].status.sw) 2126 continue; 2127 if (adev->ip_blocks[i].status.hw) 2128 continue; 2129 if (!amdgpu_ip_member_of_hwini( 2130 adev, adev->ip_blocks[i].version->type)) 2131 continue; 2132 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2133 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || 2134 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 2135 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); 2136 if (r) { 2137 dev_err(adev->dev, 2138 "hw_init of IP block <%s> failed %d\n", 2139 adev->ip_blocks[i].version->funcs->name, 2140 r); 2141 return r; 2142 } 2143 adev->ip_blocks[i].status.hw = true; 2144 } 2145 } 2146 2147 return 0; 2148 } 2149 2150 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) 2151 { 2152 int i, r; 2153 2154 for (i = 0; i < adev->num_ip_blocks; i++) { 2155 if (!adev->ip_blocks[i].status.sw) 2156 continue; 2157 if (adev->ip_blocks[i].status.hw) 2158 continue; 2159 if (!amdgpu_ip_member_of_hwini( 2160 adev, adev->ip_blocks[i].version->type)) 2161 continue; 2162 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); 2163 if (r) { 2164 dev_err(adev->dev, 2165 "hw_init of IP block <%s> failed %d\n", 2166 adev->ip_blocks[i].version->funcs->name, r); 2167 return r; 2168 } 2169 adev->ip_blocks[i].status.hw = true; 2170 } 2171 2172 return 0; 2173 } 2174 2175 static int amdgpu_device_fw_loading(struct amdgpu_device *adev) 2176 { 2177 int r = 0; 2178 int i; 2179 uint32_t smu_version; 2180 2181 if (adev->asic_type >= CHIP_VEGA10) { 2182 for (i = 0; i < adev->num_ip_blocks; i++) { 2183 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) 2184 continue; 2185 2186 if (!amdgpu_ip_member_of_hwini(adev, 2187 AMD_IP_BLOCK_TYPE_PSP)) 2188 break; 2189 2190 if (!adev->ip_blocks[i].status.sw) 2191 continue; 2192 2193 /* no need to do the fw loading again if already done*/ 2194 if (adev->ip_blocks[i].status.hw == true) 2195 break; 2196 2197 if (amdgpu_in_reset(adev) || adev->in_suspend) { 2198 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); 2199 if (r) 2200 return r; 2201 } else { 2202 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); 2203 if (r) { 2204 dev_err(adev->dev, 2205 "hw_init of IP block <%s> failed %d\n", 2206 adev->ip_blocks[i] 2207 .version->funcs->name, 2208 r); 2209 return r; 2210 } 2211 adev->ip_blocks[i].status.hw = true; 2212 } 2213 break; 2214 } 2215 } 2216 2217 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) 2218 r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 2219 2220 return r; 2221 } 2222 2223 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) 2224 { 2225 struct drm_sched_init_args args = { 2226 .ops = &amdgpu_sched_ops, 2227 .num_rqs = DRM_SCHED_PRIORITY_COUNT, 2228 .timeout_wq = adev->reset_domain->wq, 2229 .dev = adev->dev, 2230 }; 2231 long timeout; 2232 int r, i; 2233 2234 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2235 struct amdgpu_ring *ring = adev->rings[i]; 2236 2237 /* No need to setup the GPU scheduler for rings that don't need it */ 2238 if (!ring || ring->no_scheduler) 2239 continue; 2240 2241 switch (ring->funcs->type) { 2242 case AMDGPU_RING_TYPE_GFX: 2243 timeout = adev->gfx_timeout; 2244 break; 2245 case AMDGPU_RING_TYPE_COMPUTE: 2246 timeout = adev->compute_timeout; 2247 break; 2248 case AMDGPU_RING_TYPE_SDMA: 2249 timeout = adev->sdma_timeout; 2250 break; 2251 default: 2252 timeout = adev->video_timeout; 2253 break; 2254 } 2255 2256 args.timeout = timeout; 2257 args.credit_limit = ring->num_hw_submission; 2258 args.score = ring->sched_score; 2259 args.name = ring->name; 2260 2261 r = drm_sched_init(&ring->sched, &args); 2262 if (r) { 2263 dev_err(adev->dev, 2264 "Failed to create scheduler on ring %s.\n", 2265 ring->name); 2266 return r; 2267 } 2268 r = amdgpu_uvd_entity_init(adev, ring); 2269 if (r) { 2270 dev_err(adev->dev, 2271 "Failed to create UVD scheduling entity on ring %s.\n", 2272 ring->name); 2273 return r; 2274 } 2275 r = amdgpu_vce_entity_init(adev, ring); 2276 if (r) { 2277 dev_err(adev->dev, 2278 "Failed to create VCE scheduling entity on ring %s.\n", 2279 ring->name); 2280 return r; 2281 } 2282 } 2283 2284 if (adev->xcp_mgr) 2285 amdgpu_xcp_update_partition_sched_list(adev); 2286 2287 return 0; 2288 } 2289 2290 2291 /** 2292 * amdgpu_device_ip_init - run init for hardware IPs 2293 * 2294 * @adev: amdgpu_device pointer 2295 * 2296 * Main initialization pass for hardware IPs. The list of all the hardware 2297 * IPs that make up the asic is walked and the sw_init and hw_init callbacks 2298 * are run. sw_init initializes the software state associated with each IP 2299 * and hw_init initializes the hardware associated with each IP. 2300 * Returns 0 on success, negative error code on failure. 2301 */ 2302 static int amdgpu_device_ip_init(struct amdgpu_device *adev) 2303 { 2304 bool init_badpage; 2305 int i, r; 2306 2307 r = amdgpu_ras_init(adev); 2308 if (r) 2309 return r; 2310 2311 for (i = 0; i < adev->num_ip_blocks; i++) { 2312 if (!adev->ip_blocks[i].status.valid) 2313 continue; 2314 if (adev->ip_blocks[i].version->funcs->sw_init) { 2315 r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); 2316 if (r) { 2317 dev_err(adev->dev, 2318 "sw_init of IP block <%s> failed %d\n", 2319 adev->ip_blocks[i].version->funcs->name, 2320 r); 2321 goto init_failed; 2322 } 2323 } 2324 adev->ip_blocks[i].status.sw = true; 2325 2326 if (!amdgpu_ip_member_of_hwini( 2327 adev, adev->ip_blocks[i].version->type)) 2328 continue; 2329 2330 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 2331 /* need to do common hw init early so everything is set up for gmc */ 2332 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); 2333 if (r) { 2334 dev_err(adev->dev, "hw_init %d failed %d\n", i, 2335 r); 2336 goto init_failed; 2337 } 2338 adev->ip_blocks[i].status.hw = true; 2339 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2340 /* need to do gmc hw init early so we can allocate gpu mem */ 2341 /* Try to reserve bad pages early */ 2342 if (amdgpu_sriov_vf(adev)) 2343 amdgpu_virt_exchange_data(adev); 2344 2345 r = amdgpu_device_mem_scratch_init(adev); 2346 if (r) { 2347 dev_err(adev->dev, 2348 "amdgpu_mem_scratch_init failed %d\n", 2349 r); 2350 goto init_failed; 2351 } 2352 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); 2353 if (r) { 2354 dev_err(adev->dev, "hw_init %d failed %d\n", i, 2355 r); 2356 goto init_failed; 2357 } 2358 r = amdgpu_device_wb_init(adev); 2359 if (r) { 2360 dev_err(adev->dev, 2361 "amdgpu_device_wb_init failed %d\n", r); 2362 goto init_failed; 2363 } 2364 adev->ip_blocks[i].status.hw = true; 2365 2366 /* right after GMC hw init, we create CSA */ 2367 if (adev->gfx.mcbp) { 2368 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, 2369 AMDGPU_GEM_DOMAIN_VRAM | 2370 AMDGPU_GEM_DOMAIN_GTT, 2371 AMDGPU_CSA_SIZE); 2372 if (r) { 2373 dev_err(adev->dev, 2374 "allocate CSA failed %d\n", r); 2375 goto init_failed; 2376 } 2377 } 2378 2379 r = amdgpu_seq64_init(adev); 2380 if (r) { 2381 dev_err(adev->dev, "allocate seq64 failed %d\n", 2382 r); 2383 goto init_failed; 2384 } 2385 } 2386 } 2387 2388 if (amdgpu_sriov_vf(adev)) 2389 amdgpu_virt_init_data_exchange(adev); 2390 2391 r = amdgpu_ib_pool_init(adev); 2392 if (r) { 2393 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 2394 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 2395 goto init_failed; 2396 } 2397 2398 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ 2399 if (r) 2400 goto init_failed; 2401 2402 r = amdgpu_device_ip_hw_init_phase1(adev); 2403 if (r) 2404 goto init_failed; 2405 2406 r = amdgpu_device_fw_loading(adev); 2407 if (r) 2408 goto init_failed; 2409 2410 r = amdgpu_device_ip_hw_init_phase2(adev); 2411 if (r) 2412 goto init_failed; 2413 2414 /* 2415 * retired pages will be loaded from eeprom and reserved here, 2416 * it should be called after amdgpu_device_ip_hw_init_phase2 since 2417 * for some ASICs the RAS EEPROM code relies on SMU fully functioning 2418 * for I2C communication which only true at this point. 2419 * 2420 * amdgpu_ras_recovery_init may fail, but the upper only cares the 2421 * failure from bad gpu situation and stop amdgpu init process 2422 * accordingly. For other failed cases, it will still release all 2423 * the resource and print error message, rather than returning one 2424 * negative value to upper level. 2425 * 2426 * Note: theoretically, this should be called before all vram allocations 2427 * to protect retired page from abusing 2428 */ 2429 init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI); 2430 r = amdgpu_ras_recovery_init(adev, init_badpage); 2431 if (r) 2432 goto init_failed; 2433 2434 /** 2435 * In case of XGMI grab extra reference for reset domain for this device 2436 */ 2437 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2438 if (amdgpu_xgmi_add_device(adev) == 0) { 2439 if (!amdgpu_sriov_vf(adev)) { 2440 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2441 2442 if (WARN_ON(!hive)) { 2443 r = -ENOENT; 2444 goto init_failed; 2445 } 2446 2447 if (!hive->reset_domain || 2448 !amdgpu_reset_get_reset_domain(hive->reset_domain)) { 2449 r = -ENOENT; 2450 amdgpu_put_xgmi_hive(hive); 2451 goto init_failed; 2452 } 2453 2454 /* Drop the early temporary reset domain we created for device */ 2455 amdgpu_reset_put_reset_domain(adev->reset_domain); 2456 adev->reset_domain = hive->reset_domain; 2457 amdgpu_put_xgmi_hive(hive); 2458 } 2459 } 2460 } 2461 2462 r = amdgpu_device_init_schedulers(adev); 2463 if (r) 2464 goto init_failed; 2465 2466 amdgpu_ttm_set_buffer_funcs_status(adev, true); 2467 2468 /* Don't init kfd if whole hive need to be reset during init */ 2469 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { 2470 amdgpu_amdkfd_device_init(adev); 2471 } 2472 2473 amdgpu_fru_get_product_info(adev); 2474 2475 r = amdgpu_cper_init(adev); 2476 2477 init_failed: 2478 2479 return r; 2480 } 2481 2482 /** 2483 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer 2484 * 2485 * @adev: amdgpu_device pointer 2486 * 2487 * Writes a reset magic value to the gart pointer in VRAM. The driver calls 2488 * this function before a GPU reset. If the value is retained after a 2489 * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents. 2490 */ 2491 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) 2492 { 2493 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 2494 } 2495 2496 /** 2497 * amdgpu_device_check_vram_lost - check if vram is valid 2498 * 2499 * @adev: amdgpu_device pointer 2500 * 2501 * Checks the reset magic value written to the gart pointer in VRAM. 2502 * The driver calls this after a GPU reset to see if the contents of 2503 * VRAM is lost or now. 2504 * returns true if vram is lost, false if not. 2505 */ 2506 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) 2507 { 2508 if (memcmp(adev->gart.ptr, adev->reset_magic, 2509 AMDGPU_RESET_MAGIC_NUM)) 2510 return true; 2511 2512 if (!amdgpu_in_reset(adev)) 2513 return false; 2514 2515 /* 2516 * For all ASICs with baco/mode1 reset, the VRAM is 2517 * always assumed to be lost. 2518 */ 2519 switch (amdgpu_asic_reset_method(adev)) { 2520 case AMD_RESET_METHOD_LEGACY: 2521 case AMD_RESET_METHOD_LINK: 2522 case AMD_RESET_METHOD_BACO: 2523 case AMD_RESET_METHOD_MODE1: 2524 return true; 2525 default: 2526 return false; 2527 } 2528 } 2529 2530 /** 2531 * amdgpu_device_set_cg_state - set clockgating for amdgpu device 2532 * 2533 * @adev: amdgpu_device pointer 2534 * @state: clockgating state (gate or ungate) 2535 * 2536 * The list of all the hardware IPs that make up the asic is walked and the 2537 * set_clockgating_state callbacks are run. 2538 * Late initialization pass enabling clockgating for hardware IPs. 2539 * Fini or suspend, pass disabling clockgating for hardware IPs. 2540 * Returns 0 on success, negative error code on failure. 2541 */ 2542 2543 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 2544 enum amd_clockgating_state state) 2545 { 2546 int i, j, r; 2547 2548 if (amdgpu_emu_mode == 1) 2549 return 0; 2550 2551 for (j = 0; j < adev->num_ip_blocks; j++) { 2552 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2553 if (!adev->ip_blocks[i].status.late_initialized) 2554 continue; 2555 if (!adev->ip_blocks[i].version) 2556 continue; 2557 /* skip CG for GFX, SDMA on S0ix */ 2558 if (adev->in_s0ix && 2559 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 2560 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) 2561 continue; 2562 /* skip CG for VCE/UVD, it's handled specially */ 2563 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2564 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2565 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2566 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2567 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 2568 /* enable clockgating to save power */ 2569 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i], 2570 state); 2571 if (r) { 2572 dev_err(adev->dev, 2573 "set_clockgating_state(gate) of IP block <%s> failed %d\n", 2574 adev->ip_blocks[i].version->funcs->name, 2575 r); 2576 return r; 2577 } 2578 } 2579 } 2580 2581 return 0; 2582 } 2583 2584 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, 2585 enum amd_powergating_state state) 2586 { 2587 int i, j, r; 2588 2589 if (amdgpu_emu_mode == 1) 2590 return 0; 2591 2592 for (j = 0; j < adev->num_ip_blocks; j++) { 2593 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2594 if (!adev->ip_blocks[i].status.late_initialized) 2595 continue; 2596 if (!adev->ip_blocks[i].version) 2597 continue; 2598 /* skip PG for GFX, SDMA on S0ix */ 2599 if (adev->in_s0ix && 2600 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 2601 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) 2602 continue; 2603 /* skip CG for VCE/UVD, it's handled specially */ 2604 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2605 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2606 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2607 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2608 adev->ip_blocks[i].version->funcs->set_powergating_state) { 2609 /* enable powergating to save power */ 2610 r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i], 2611 state); 2612 if (r) { 2613 dev_err(adev->dev, 2614 "set_powergating_state(gate) of IP block <%s> failed %d\n", 2615 adev->ip_blocks[i].version->funcs->name, 2616 r); 2617 return r; 2618 } 2619 } 2620 } 2621 return 0; 2622 } 2623 2624 static int amdgpu_device_enable_mgpu_fan_boost(void) 2625 { 2626 struct amdgpu_gpu_instance *gpu_ins; 2627 struct amdgpu_device *adev; 2628 int i, ret = 0; 2629 2630 mutex_lock(&mgpu_info.mutex); 2631 2632 /* 2633 * MGPU fan boost feature should be enabled 2634 * only when there are two or more dGPUs in 2635 * the system 2636 */ 2637 if (mgpu_info.num_dgpu < 2) 2638 goto out; 2639 2640 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2641 gpu_ins = &(mgpu_info.gpu_ins[i]); 2642 adev = gpu_ins->adev; 2643 if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) && 2644 !gpu_ins->mgpu_fan_enabled) { 2645 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); 2646 if (ret) 2647 break; 2648 2649 gpu_ins->mgpu_fan_enabled = 1; 2650 } 2651 } 2652 2653 out: 2654 mutex_unlock(&mgpu_info.mutex); 2655 2656 return ret; 2657 } 2658 2659 /** 2660 * amdgpu_device_ip_late_init - run late init for hardware IPs 2661 * 2662 * @adev: amdgpu_device pointer 2663 * 2664 * Late initialization pass for hardware IPs. The list of all the hardware 2665 * IPs that make up the asic is walked and the late_init callbacks are run. 2666 * late_init covers any special initialization that an IP requires 2667 * after all of the have been initialized or something that needs to happen 2668 * late in the init process. 2669 * Returns 0 on success, negative error code on failure. 2670 */ 2671 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) 2672 { 2673 struct amdgpu_gpu_instance *gpu_instance; 2674 int i = 0, r; 2675 2676 for (i = 0; i < adev->num_ip_blocks; i++) { 2677 if (!adev->ip_blocks[i].status.hw) 2678 continue; 2679 if (adev->ip_blocks[i].version->funcs->late_init) { 2680 r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); 2681 if (r) { 2682 dev_err(adev->dev, 2683 "late_init of IP block <%s> failed %d\n", 2684 adev->ip_blocks[i].version->funcs->name, 2685 r); 2686 return r; 2687 } 2688 } 2689 adev->ip_blocks[i].status.late_initialized = true; 2690 } 2691 2692 r = amdgpu_ras_late_init(adev); 2693 if (r) { 2694 dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r); 2695 return r; 2696 } 2697 2698 if (!amdgpu_reset_in_recovery(adev)) 2699 amdgpu_ras_set_error_query_ready(adev, true); 2700 2701 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 2702 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); 2703 2704 amdgpu_device_fill_reset_magic(adev); 2705 2706 r = amdgpu_device_enable_mgpu_fan_boost(); 2707 if (r) 2708 dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r); 2709 2710 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ 2711 if (amdgpu_passthrough(adev) && 2712 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) || 2713 adev->asic_type == CHIP_ALDEBARAN)) 2714 amdgpu_dpm_handle_passthrough_sbr(adev, true); 2715 2716 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2717 mutex_lock(&mgpu_info.mutex); 2718 2719 /* 2720 * Reset device p-state to low as this was booted with high. 2721 * 2722 * This should be performed only after all devices from the same 2723 * hive get initialized. 2724 * 2725 * However, it's unknown how many device in the hive in advance. 2726 * As this is counted one by one during devices initializations. 2727 * 2728 * So, we wait for all XGMI interlinked devices initialized. 2729 * This may bring some delays as those devices may come from 2730 * different hives. But that should be OK. 2731 */ 2732 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { 2733 for (i = 0; i < mgpu_info.num_gpu; i++) { 2734 gpu_instance = &(mgpu_info.gpu_ins[i]); 2735 if (gpu_instance->adev->flags & AMD_IS_APU) 2736 continue; 2737 2738 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 2739 AMDGPU_XGMI_PSTATE_MIN); 2740 if (r) { 2741 dev_err(adev->dev, 2742 "pstate setting failed (%d).\n", 2743 r); 2744 break; 2745 } 2746 } 2747 } 2748 2749 mutex_unlock(&mgpu_info.mutex); 2750 } 2751 2752 return 0; 2753 } 2754 2755 static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block) 2756 { 2757 struct amdgpu_device *adev = ip_block->adev; 2758 int r; 2759 2760 if (!ip_block->version->funcs->hw_fini) { 2761 dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n", 2762 ip_block->version->funcs->name); 2763 } else { 2764 r = ip_block->version->funcs->hw_fini(ip_block); 2765 /* XXX handle errors */ 2766 if (r) { 2767 dev_dbg(adev->dev, 2768 "hw_fini of IP block <%s> failed %d\n", 2769 ip_block->version->funcs->name, r); 2770 } 2771 } 2772 2773 ip_block->status.hw = false; 2774 } 2775 2776 /** 2777 * amdgpu_device_smu_fini_early - smu hw_fini wrapper 2778 * 2779 * @adev: amdgpu_device pointer 2780 * 2781 * For ASICs need to disable SMC first 2782 */ 2783 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) 2784 { 2785 int i; 2786 2787 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) 2788 return; 2789 2790 for (i = 0; i < adev->num_ip_blocks; i++) { 2791 if (!adev->ip_blocks[i].status.hw) 2792 continue; 2793 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 2794 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); 2795 break; 2796 } 2797 } 2798 } 2799 2800 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) 2801 { 2802 int i, r; 2803 2804 for (i = 0; i < adev->num_ip_blocks; i++) { 2805 if (!adev->ip_blocks[i].version) 2806 continue; 2807 if (!adev->ip_blocks[i].version->funcs->early_fini) 2808 continue; 2809 2810 r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); 2811 if (r) { 2812 dev_dbg(adev->dev, 2813 "early_fini of IP block <%s> failed %d\n", 2814 adev->ip_blocks[i].version->funcs->name, r); 2815 } 2816 } 2817 2818 amdgpu_amdkfd_suspend(adev, true); 2819 amdgpu_amdkfd_teardown_processes(adev); 2820 amdgpu_userq_suspend(adev); 2821 2822 /* Workaround for ASICs need to disable SMC first */ 2823 amdgpu_device_smu_fini_early(adev); 2824 2825 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2826 if (!adev->ip_blocks[i].status.hw) 2827 continue; 2828 2829 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); 2830 } 2831 2832 if (amdgpu_sriov_vf(adev)) { 2833 if (amdgpu_virt_release_full_gpu(adev, false)) 2834 dev_err(adev->dev, 2835 "failed to release exclusive mode on fini\n"); 2836 } 2837 2838 /* 2839 * Driver reload on the APU can fail due to firmware validation because 2840 * the PSP is always running, as it is shared across the whole SoC. 2841 * This same issue does not occur on dGPU because it has a mechanism 2842 * that checks whether the PSP is running. A solution for those issues 2843 * in the APU is to trigger a GPU reset, but this should be done during 2844 * the unload phase to avoid adding boot latency and screen flicker. 2845 */ 2846 if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) { 2847 r = amdgpu_asic_reset(adev); 2848 if (r) 2849 dev_err(adev->dev, "asic reset on %s failed\n", __func__); 2850 } 2851 2852 return 0; 2853 } 2854 2855 /** 2856 * amdgpu_device_ip_fini - run fini for hardware IPs 2857 * 2858 * @adev: amdgpu_device pointer 2859 * 2860 * Main teardown pass for hardware IPs. The list of all the hardware 2861 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks 2862 * are run. hw_fini tears down the hardware associated with each IP 2863 * and sw_fini tears down any software state associated with each IP. 2864 * Returns 0 on success, negative error code on failure. 2865 */ 2866 static int amdgpu_device_ip_fini(struct amdgpu_device *adev) 2867 { 2868 int i, r; 2869 2870 amdgpu_cper_fini(adev); 2871 2872 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) 2873 amdgpu_virt_release_ras_err_handler_data(adev); 2874 2875 if (adev->gmc.xgmi.num_physical_nodes > 1) 2876 amdgpu_xgmi_remove_device(adev); 2877 2878 amdgpu_amdkfd_device_fini_sw(adev); 2879 2880 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2881 if (!adev->ip_blocks[i].status.sw) 2882 continue; 2883 2884 if (!adev->ip_blocks[i].version) 2885 continue; 2886 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2887 amdgpu_ucode_free_bo(adev); 2888 amdgpu_free_static_csa(&adev->virt.csa_obj); 2889 amdgpu_device_wb_fini(adev); 2890 amdgpu_device_mem_scratch_fini(adev); 2891 amdgpu_ib_pool_fini(adev); 2892 amdgpu_seq64_fini(adev); 2893 amdgpu_doorbell_fini(adev); 2894 } 2895 if (adev->ip_blocks[i].version->funcs->sw_fini) { 2896 r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); 2897 /* XXX handle errors */ 2898 if (r) { 2899 dev_dbg(adev->dev, 2900 "sw_fini of IP block <%s> failed %d\n", 2901 adev->ip_blocks[i].version->funcs->name, 2902 r); 2903 } 2904 } 2905 adev->ip_blocks[i].status.sw = false; 2906 adev->ip_blocks[i].status.valid = false; 2907 } 2908 2909 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2910 if (!adev->ip_blocks[i].status.late_initialized) 2911 continue; 2912 if (!adev->ip_blocks[i].version) 2913 continue; 2914 if (adev->ip_blocks[i].version->funcs->late_fini) 2915 adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]); 2916 adev->ip_blocks[i].status.late_initialized = false; 2917 } 2918 2919 amdgpu_ras_fini(adev); 2920 amdgpu_uid_fini(adev); 2921 2922 return 0; 2923 } 2924 2925 /** 2926 * amdgpu_device_delayed_init_work_handler - work handler for IB tests 2927 * 2928 * @work: work_struct. 2929 */ 2930 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) 2931 { 2932 struct amdgpu_device *adev = 2933 container_of(work, struct amdgpu_device, delayed_init_work.work); 2934 int r; 2935 2936 r = amdgpu_ib_ring_tests(adev); 2937 if (r) 2938 dev_err(adev->dev, "ib ring test failed (%d).\n", r); 2939 } 2940 2941 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) 2942 { 2943 struct amdgpu_device *adev = 2944 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); 2945 2946 WARN_ON_ONCE(adev->gfx.gfx_off_state); 2947 WARN_ON_ONCE(adev->gfx.gfx_off_req_count); 2948 2949 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0)) 2950 adev->gfx.gfx_off_state = true; 2951 } 2952 2953 /** 2954 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) 2955 * 2956 * @adev: amdgpu_device pointer 2957 * 2958 * Main suspend function for hardware IPs. The list of all the hardware 2959 * IPs that make up the asic is walked, clockgating is disabled and the 2960 * suspend callbacks are run. suspend puts the hardware and software state 2961 * in each IP into a state suitable for suspend. 2962 * Returns 0 on success, negative error code on failure. 2963 */ 2964 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) 2965 { 2966 int i, r, rec; 2967 2968 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2969 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2970 2971 /* 2972 * Per PMFW team's suggestion, driver needs to handle gfxoff 2973 * and df cstate features disablement for gpu reset(e.g. Mode1Reset) 2974 * scenario. Add the missing df cstate disablement here. 2975 */ 2976 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) 2977 dev_warn(adev->dev, "Failed to disallow df cstate"); 2978 2979 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2980 if (!adev->ip_blocks[i].status.valid) 2981 continue; 2982 2983 /* displays are handled separately */ 2984 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) 2985 continue; 2986 2987 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); 2988 if (r) 2989 goto unwind; 2990 } 2991 2992 return 0; 2993 unwind: 2994 rec = amdgpu_device_ip_resume_phase3(adev); 2995 if (rec) 2996 dev_err(adev->dev, 2997 "amdgpu_device_ip_resume_phase3 failed during unwind: %d\n", 2998 rec); 2999 3000 amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW); 3001 3002 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); 3003 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 3004 3005 return r; 3006 } 3007 3008 /** 3009 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) 3010 * 3011 * @adev: amdgpu_device pointer 3012 * 3013 * Main suspend function for hardware IPs. The list of all the hardware 3014 * IPs that make up the asic is walked, clockgating is disabled and the 3015 * suspend callbacks are run. suspend puts the hardware and software state 3016 * in each IP into a state suitable for suspend. 3017 * Returns 0 on success, negative error code on failure. 3018 */ 3019 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) 3020 { 3021 int i, r, rec; 3022 3023 if (adev->in_s0ix) 3024 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry); 3025 3026 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 3027 if (!adev->ip_blocks[i].status.valid) 3028 continue; 3029 /* displays are handled in phase1 */ 3030 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) 3031 continue; 3032 /* PSP lost connection when err_event_athub occurs */ 3033 if (amdgpu_ras_intr_triggered() && 3034 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 3035 adev->ip_blocks[i].status.hw = false; 3036 continue; 3037 } 3038 3039 /* skip unnecessary suspend if we do not initialize them yet */ 3040 if (!amdgpu_ip_member_of_hwini( 3041 adev, adev->ip_blocks[i].version->type)) 3042 continue; 3043 3044 /* Since we skip suspend for S0i3, we need to cancel the delayed 3045 * idle work here as the suspend callback never gets called. 3046 */ 3047 if (adev->in_s0ix && 3048 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX && 3049 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0)) 3050 cancel_delayed_work_sync(&adev->gfx.idle_work); 3051 /* skip suspend of gfx/mes and psp for S0ix 3052 * gfx is in gfxoff state, so on resume it will exit gfxoff just 3053 * like at runtime. PSP is also part of the always on hardware 3054 * so no need to suspend it. 3055 */ 3056 if (adev->in_s0ix && 3057 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || 3058 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 3059 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) 3060 continue; 3061 3062 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ 3063 if (adev->in_s0ix && 3064 (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= 3065 IP_VERSION(5, 0, 0)) && 3066 (adev->ip_blocks[i].version->type == 3067 AMD_IP_BLOCK_TYPE_SDMA)) 3068 continue; 3069 3070 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot. 3071 * These are in TMR, hence are expected to be reused by PSP-TOS to reload 3072 * from this location and RLC Autoload automatically also gets loaded 3073 * from here based on PMFW -> PSP message during re-init sequence. 3074 * Therefore, the psp suspend & resume should be skipped to avoid destroy 3075 * the TMR and reload FWs again for IMU enabled APU ASICs. 3076 */ 3077 if (amdgpu_in_reset(adev) && 3078 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs && 3079 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 3080 continue; 3081 3082 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); 3083 if (r) 3084 goto unwind; 3085 3086 /* handle putting the SMC in the appropriate state */ 3087 if (!amdgpu_sriov_vf(adev)) { 3088 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 3089 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); 3090 if (r) { 3091 dev_err(adev->dev, 3092 "SMC failed to set mp1 state %d, %d\n", 3093 adev->mp1_state, r); 3094 goto unwind; 3095 } 3096 } 3097 } 3098 } 3099 3100 return 0; 3101 unwind: 3102 /* suspend phase 2 = resume phase 1 + resume phase 2 */ 3103 rec = amdgpu_device_ip_resume_phase1(adev); 3104 if (rec) { 3105 dev_err(adev->dev, 3106 "amdgpu_device_ip_resume_phase1 failed during unwind: %d\n", 3107 rec); 3108 return r; 3109 } 3110 3111 rec = amdgpu_device_fw_loading(adev); 3112 if (rec) { 3113 dev_err(adev->dev, 3114 "amdgpu_device_fw_loading failed during unwind: %d\n", 3115 rec); 3116 return r; 3117 } 3118 3119 rec = amdgpu_device_ip_resume_phase2(adev); 3120 if (rec) { 3121 dev_err(adev->dev, 3122 "amdgpu_device_ip_resume_phase2 failed during unwind: %d\n", 3123 rec); 3124 return r; 3125 } 3126 3127 return r; 3128 } 3129 3130 /** 3131 * amdgpu_device_ip_suspend - run suspend for hardware IPs 3132 * 3133 * @adev: amdgpu_device pointer 3134 * 3135 * Main suspend function for hardware IPs. The list of all the hardware 3136 * IPs that make up the asic is walked, clockgating is disabled and the 3137 * suspend callbacks are run. suspend puts the hardware and software state 3138 * in each IP into a state suitable for suspend. 3139 * Returns 0 on success, negative error code on failure. 3140 */ 3141 static int amdgpu_device_ip_suspend(struct amdgpu_device *adev) 3142 { 3143 int r; 3144 3145 if (amdgpu_sriov_vf(adev)) { 3146 amdgpu_virt_fini_data_exchange(adev); 3147 amdgpu_virt_request_full_gpu(adev, false); 3148 } 3149 3150 amdgpu_ttm_set_buffer_funcs_status(adev, false); 3151 3152 r = amdgpu_device_ip_suspend_phase1(adev); 3153 if (r) 3154 return r; 3155 r = amdgpu_device_ip_suspend_phase2(adev); 3156 3157 if (amdgpu_sriov_vf(adev)) 3158 amdgpu_virt_release_full_gpu(adev, false); 3159 3160 return r; 3161 } 3162 3163 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) 3164 { 3165 int i, r; 3166 3167 static enum amd_ip_block_type ip_order[] = { 3168 AMD_IP_BLOCK_TYPE_COMMON, 3169 AMD_IP_BLOCK_TYPE_GMC, 3170 AMD_IP_BLOCK_TYPE_PSP, 3171 AMD_IP_BLOCK_TYPE_IH, 3172 }; 3173 3174 for (i = 0; i < adev->num_ip_blocks; i++) { 3175 int j; 3176 struct amdgpu_ip_block *block; 3177 3178 block = &adev->ip_blocks[i]; 3179 block->status.hw = false; 3180 3181 for (j = 0; j < ARRAY_SIZE(ip_order); j++) { 3182 3183 if (block->version->type != ip_order[j] || 3184 !block->status.valid) 3185 continue; 3186 3187 r = block->version->funcs->hw_init(&adev->ip_blocks[i]); 3188 if (r) { 3189 dev_err(adev->dev, "RE-INIT-early: %s failed\n", 3190 block->version->funcs->name); 3191 return r; 3192 } 3193 block->status.hw = true; 3194 } 3195 } 3196 3197 return 0; 3198 } 3199 3200 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) 3201 { 3202 struct amdgpu_ip_block *block; 3203 int i, r = 0; 3204 3205 static enum amd_ip_block_type ip_order[] = { 3206 AMD_IP_BLOCK_TYPE_SMC, 3207 AMD_IP_BLOCK_TYPE_DCE, 3208 AMD_IP_BLOCK_TYPE_GFX, 3209 AMD_IP_BLOCK_TYPE_SDMA, 3210 AMD_IP_BLOCK_TYPE_MES, 3211 AMD_IP_BLOCK_TYPE_UVD, 3212 AMD_IP_BLOCK_TYPE_VCE, 3213 AMD_IP_BLOCK_TYPE_VCN, 3214 AMD_IP_BLOCK_TYPE_JPEG 3215 }; 3216 3217 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 3218 block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]); 3219 3220 if (!block) 3221 continue; 3222 3223 if (block->status.valid && !block->status.hw) { 3224 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) { 3225 r = amdgpu_ip_block_resume(block); 3226 } else { 3227 r = block->version->funcs->hw_init(block); 3228 } 3229 3230 if (r) { 3231 dev_err(adev->dev, "RE-INIT-late: %s failed\n", 3232 block->version->funcs->name); 3233 break; 3234 } 3235 block->status.hw = true; 3236 } 3237 } 3238 3239 return r; 3240 } 3241 3242 /** 3243 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs 3244 * 3245 * @adev: amdgpu_device pointer 3246 * 3247 * First resume function for hardware IPs. The list of all the hardware 3248 * IPs that make up the asic is walked and the resume callbacks are run for 3249 * COMMON, GMC, and IH. resume puts the hardware into a functional state 3250 * after a suspend and updates the software state as necessary. This 3251 * function is also used for restoring the GPU after a GPU reset. 3252 * Returns 0 on success, negative error code on failure. 3253 */ 3254 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) 3255 { 3256 int i, r; 3257 3258 for (i = 0; i < adev->num_ip_blocks; i++) { 3259 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3260 continue; 3261 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3262 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3263 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3264 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { 3265 3266 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); 3267 if (r) 3268 return r; 3269 } 3270 } 3271 3272 return 0; 3273 } 3274 3275 /** 3276 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs 3277 * 3278 * @adev: amdgpu_device pointer 3279 * 3280 * Second resume function for hardware IPs. The list of all the hardware 3281 * IPs that make up the asic is walked and the resume callbacks are run for 3282 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a 3283 * functional state after a suspend and updates the software state as 3284 * necessary. This function is also used for restoring the GPU after a GPU 3285 * reset. 3286 * Returns 0 on success, negative error code on failure. 3287 */ 3288 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) 3289 { 3290 int i, r; 3291 3292 for (i = 0; i < adev->num_ip_blocks; i++) { 3293 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3294 continue; 3295 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3296 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3297 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3298 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE || 3299 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 3300 continue; 3301 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); 3302 if (r) 3303 return r; 3304 } 3305 3306 return 0; 3307 } 3308 3309 /** 3310 * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs 3311 * 3312 * @adev: amdgpu_device pointer 3313 * 3314 * Third resume function for hardware IPs. The list of all the hardware 3315 * IPs that make up the asic is walked and the resume callbacks are run for 3316 * all DCE. resume puts the hardware into a functional state after a suspend 3317 * and updates the software state as necessary. This function is also used 3318 * for restoring the GPU after a GPU reset. 3319 * 3320 * Returns 0 on success, negative error code on failure. 3321 */ 3322 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev) 3323 { 3324 int i, r; 3325 3326 for (i = 0; i < adev->num_ip_blocks; i++) { 3327 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3328 continue; 3329 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { 3330 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); 3331 if (r) 3332 return r; 3333 } 3334 } 3335 3336 return 0; 3337 } 3338 3339 /** 3340 * amdgpu_device_ip_resume - run resume for hardware IPs 3341 * 3342 * @adev: amdgpu_device pointer 3343 * 3344 * Main resume function for hardware IPs. The hardware IPs 3345 * are split into two resume functions because they are 3346 * also used in recovering from a GPU reset and some additional 3347 * steps need to be take between them. In this case (S3/S4) they are 3348 * run sequentially. 3349 * Returns 0 on success, negative error code on failure. 3350 */ 3351 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) 3352 { 3353 int r; 3354 3355 r = amdgpu_device_ip_resume_phase1(adev); 3356 if (r) 3357 return r; 3358 3359 r = amdgpu_device_fw_loading(adev); 3360 if (r) 3361 return r; 3362 3363 r = amdgpu_device_ip_resume_phase2(adev); 3364 3365 amdgpu_ttm_set_buffer_funcs_status(adev, true); 3366 3367 if (r) 3368 return r; 3369 3370 amdgpu_fence_driver_hw_init(adev); 3371 3372 r = amdgpu_device_ip_resume_phase3(adev); 3373 3374 return r; 3375 } 3376 3377 /** 3378 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV 3379 * 3380 * @adev: amdgpu_device pointer 3381 * 3382 * Query the VBIOS data tables to determine if the board supports SR-IOV. 3383 */ 3384 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 3385 { 3386 if (amdgpu_sriov_vf(adev)) { 3387 if (adev->is_atom_fw) { 3388 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev)) 3389 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3390 } else { 3391 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 3392 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3393 } 3394 3395 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) 3396 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 3397 } 3398 } 3399 3400 /** 3401 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic 3402 * 3403 * @pdev : pci device context 3404 * @asic_type: AMD asic type 3405 * 3406 * Check if there is DC (new modesetting infrastructre) support for an asic. 3407 * returns true if DC has support, false if not. 3408 */ 3409 bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, 3410 enum amd_asic_type asic_type) 3411 { 3412 switch (asic_type) { 3413 #ifdef CONFIG_DRM_AMDGPU_SI 3414 case CHIP_HAINAN: 3415 #endif 3416 case CHIP_TOPAZ: 3417 /* chips with no display hardware */ 3418 return false; 3419 #if defined(CONFIG_DRM_AMD_DC) 3420 case CHIP_TAHITI: 3421 case CHIP_PITCAIRN: 3422 case CHIP_VERDE: 3423 case CHIP_OLAND: 3424 return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI); 3425 default: 3426 return amdgpu_dc != 0; 3427 #else 3428 default: 3429 if (amdgpu_dc > 0) 3430 dev_info_once( 3431 &pdev->dev, 3432 "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); 3433 return false; 3434 #endif 3435 } 3436 } 3437 3438 /** 3439 * amdgpu_device_has_dc_support - check if dc is supported 3440 * 3441 * @adev: amdgpu_device pointer 3442 * 3443 * Returns true for supported, false for not supported 3444 */ 3445 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 3446 { 3447 if (adev->enable_virtual_display || 3448 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 3449 return false; 3450 3451 return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type); 3452 } 3453 3454 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) 3455 { 3456 struct amdgpu_device *adev = 3457 container_of(__work, struct amdgpu_device, xgmi_reset_work); 3458 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 3459 3460 /* It's a bug to not have a hive within this function */ 3461 if (WARN_ON(!hive)) 3462 return; 3463 3464 /* 3465 * Use task barrier to synchronize all xgmi reset works across the 3466 * hive. task_barrier_enter and task_barrier_exit will block 3467 * until all the threads running the xgmi reset works reach 3468 * those points. task_barrier_full will do both blocks. 3469 */ 3470 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 3471 3472 task_barrier_enter(&hive->tb); 3473 adev->asic_reset_res = amdgpu_device_baco_enter(adev); 3474 3475 if (adev->asic_reset_res) 3476 goto fail; 3477 3478 task_barrier_exit(&hive->tb); 3479 adev->asic_reset_res = amdgpu_device_baco_exit(adev); 3480 3481 if (adev->asic_reset_res) 3482 goto fail; 3483 3484 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB); 3485 } else { 3486 3487 task_barrier_full(&hive->tb); 3488 adev->asic_reset_res = amdgpu_asic_reset(adev); 3489 } 3490 3491 fail: 3492 if (adev->asic_reset_res) 3493 dev_warn(adev->dev, 3494 "ASIC reset failed with error, %d for drm dev, %s", 3495 adev->asic_reset_res, adev_to_drm(adev)->unique); 3496 amdgpu_put_xgmi_hive(hive); 3497 } 3498 3499 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) 3500 { 3501 char buf[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; 3502 char *input = buf; 3503 char *timeout_setting = NULL; 3504 int index = 0; 3505 long timeout; 3506 int ret = 0; 3507 3508 /* By default timeout for all queues is 2 sec */ 3509 adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout = 3510 adev->video_timeout = msecs_to_jiffies(2000); 3511 3512 if (!strnlen(amdgpu_lockup_timeout, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) 3513 return 0; 3514 3515 /* 3516 * strsep() destructively modifies its input by replacing delimiters 3517 * with '\0'. Use a stack copy so the global module parameter buffer 3518 * remains intact for multi-GPU systems where this function is called 3519 * once per device. 3520 */ 3521 strscpy(buf, amdgpu_lockup_timeout, sizeof(buf)); 3522 3523 while ((timeout_setting = strsep(&input, ",")) && 3524 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3525 ret = kstrtol(timeout_setting, 0, &timeout); 3526 if (ret) 3527 return ret; 3528 3529 if (timeout == 0) { 3530 index++; 3531 continue; 3532 } else if (timeout < 0) { 3533 timeout = MAX_SCHEDULE_TIMEOUT; 3534 dev_warn(adev->dev, "lockup timeout disabled"); 3535 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); 3536 } else { 3537 timeout = msecs_to_jiffies(timeout); 3538 } 3539 3540 switch (index++) { 3541 case 0: 3542 adev->gfx_timeout = timeout; 3543 break; 3544 case 1: 3545 adev->compute_timeout = timeout; 3546 break; 3547 case 2: 3548 adev->sdma_timeout = timeout; 3549 break; 3550 case 3: 3551 adev->video_timeout = timeout; 3552 break; 3553 default: 3554 break; 3555 } 3556 } 3557 3558 /* When only one value specified apply it to all queues. */ 3559 if (index == 1) 3560 adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout = 3561 adev->video_timeout = timeout; 3562 3563 return ret; 3564 } 3565 3566 /** 3567 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU 3568 * 3569 * @adev: amdgpu_device pointer 3570 * 3571 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode 3572 */ 3573 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) 3574 { 3575 struct iommu_domain *domain; 3576 3577 domain = iommu_get_domain_for_dev(adev->dev); 3578 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY) 3579 adev->ram_is_direct_mapped = true; 3580 } 3581 3582 #if defined(CONFIG_HSA_AMD_P2P) 3583 /** 3584 * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled. 3585 * 3586 * @adev: amdgpu_device pointer 3587 * 3588 * return if IOMMU remapping bar address 3589 */ 3590 static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev) 3591 { 3592 struct iommu_domain *domain; 3593 3594 domain = iommu_get_domain_for_dev(adev->dev); 3595 if (domain && (domain->type == IOMMU_DOMAIN_DMA || 3596 domain->type == IOMMU_DOMAIN_DMA_FQ)) 3597 return true; 3598 3599 return false; 3600 } 3601 #endif 3602 3603 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) 3604 { 3605 if (amdgpu_mcbp == 1) 3606 adev->gfx.mcbp = true; 3607 else if (amdgpu_mcbp == 0) 3608 adev->gfx.mcbp = false; 3609 3610 if (amdgpu_sriov_vf(adev)) 3611 adev->gfx.mcbp = true; 3612 3613 if (adev->gfx.mcbp) 3614 dev_info(adev->dev, "MCBP is enabled\n"); 3615 } 3616 3617 static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev) 3618 { 3619 int r; 3620 3621 r = amdgpu_atombios_sysfs_init(adev); 3622 if (r) 3623 drm_err(&adev->ddev, 3624 "registering atombios sysfs failed (%d).\n", r); 3625 3626 r = amdgpu_pm_sysfs_init(adev); 3627 if (r) 3628 dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); 3629 3630 r = amdgpu_ucode_sysfs_init(adev); 3631 if (r) { 3632 adev->ucode_sysfs_en = false; 3633 dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); 3634 } else 3635 adev->ucode_sysfs_en = true; 3636 3637 r = amdgpu_device_attr_sysfs_init(adev); 3638 if (r) 3639 dev_err(adev->dev, "Could not create amdgpu device attr\n"); 3640 3641 r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); 3642 if (r) 3643 dev_err(adev->dev, 3644 "Could not create amdgpu board attributes\n"); 3645 3646 amdgpu_fru_sysfs_init(adev); 3647 amdgpu_reg_state_sysfs_init(adev); 3648 amdgpu_xcp_sysfs_init(adev); 3649 amdgpu_uma_sysfs_init(adev); 3650 3651 return r; 3652 } 3653 3654 static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev) 3655 { 3656 if (adev->pm.sysfs_initialized) 3657 amdgpu_pm_sysfs_fini(adev); 3658 if (adev->ucode_sysfs_en) 3659 amdgpu_ucode_sysfs_fini(adev); 3660 amdgpu_device_attr_sysfs_fini(adev); 3661 amdgpu_fru_sysfs_fini(adev); 3662 3663 amdgpu_reg_state_sysfs_fini(adev); 3664 amdgpu_xcp_sysfs_fini(adev); 3665 amdgpu_uma_sysfs_fini(adev); 3666 } 3667 3668 /** 3669 * amdgpu_device_init - initialize the driver 3670 * 3671 * @adev: amdgpu_device pointer 3672 * @flags: driver flags 3673 * 3674 * Initializes the driver info and hw (all asics). 3675 * Returns 0 for success or an error on failure. 3676 * Called at driver startup. 3677 */ 3678 int amdgpu_device_init(struct amdgpu_device *adev, 3679 uint32_t flags) 3680 { 3681 struct pci_dev *pdev = adev->pdev; 3682 int r, i; 3683 bool px = false; 3684 u32 max_MBps; 3685 int tmp; 3686 3687 adev->shutdown = false; 3688 adev->flags = flags; 3689 3690 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) 3691 adev->asic_type = amdgpu_force_asic_type; 3692 else 3693 adev->asic_type = flags & AMD_ASIC_MASK; 3694 3695 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 3696 if (amdgpu_emu_mode == 1) 3697 adev->usec_timeout *= 10; 3698 adev->gmc.gart_size = 512 * 1024 * 1024; 3699 adev->accel_working = false; 3700 adev->num_rings = 0; 3701 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub()); 3702 adev->mman.buffer_funcs = NULL; 3703 adev->mman.buffer_funcs_ring = NULL; 3704 adev->vm_manager.vm_pte_funcs = NULL; 3705 adev->vm_manager.vm_pte_num_scheds = 0; 3706 adev->gmc.gmc_funcs = NULL; 3707 adev->harvest_ip_mask = 0x0; 3708 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 3709 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 3710 3711 amdgpu_reg_access_init(adev); 3712 3713 dev_info( 3714 adev->dev, 3715 "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 3716 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 3717 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 3718 3719 /* mutex initialization are all done here so we 3720 * can recall function without having locking issues 3721 */ 3722 mutex_init(&adev->firmware.mutex); 3723 mutex_init(&adev->pm.mutex); 3724 mutex_init(&adev->gfx.gpu_clock_mutex); 3725 mutex_init(&adev->srbm_mutex); 3726 mutex_init(&adev->gfx.pipe_reserve_mutex); 3727 mutex_init(&adev->gfx.gfx_off_mutex); 3728 mutex_init(&adev->gfx.partition_mutex); 3729 mutex_init(&adev->grbm_idx_mutex); 3730 mutex_init(&adev->mn_lock); 3731 mutex_init(&adev->virt.vf_errors.lock); 3732 hash_init(adev->mn_hash); 3733 mutex_init(&adev->psp.mutex); 3734 mutex_init(&adev->notifier_lock); 3735 mutex_init(&adev->pm.stable_pstate_ctx_lock); 3736 mutex_init(&adev->benchmark_mutex); 3737 mutex_init(&adev->gfx.reset_sem_mutex); 3738 /* Initialize the mutex for cleaner shader isolation between GFX and compute processes */ 3739 mutex_init(&adev->enforce_isolation_mutex); 3740 for (i = 0; i < MAX_XCP; ++i) { 3741 adev->isolation[i].spearhead = dma_fence_get_stub(); 3742 amdgpu_sync_create(&adev->isolation[i].active); 3743 amdgpu_sync_create(&adev->isolation[i].prev); 3744 } 3745 mutex_init(&adev->gfx.userq_sch_mutex); 3746 mutex_init(&adev->gfx.workload_profile_mutex); 3747 mutex_init(&adev->vcn.workload_profile_mutex); 3748 3749 amdgpu_device_init_apu_flags(adev); 3750 3751 r = amdgpu_device_check_arguments(adev); 3752 if (r) 3753 return r; 3754 3755 spin_lock_init(&adev->mmio_idx_lock); 3756 spin_lock_init(&adev->mm_stats.lock); 3757 spin_lock_init(&adev->virt.rlcg_reg_lock); 3758 spin_lock_init(&adev->wb.lock); 3759 3760 xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ); 3761 3762 INIT_LIST_HEAD(&adev->reset_list); 3763 3764 INIT_LIST_HEAD(&adev->ras_list); 3765 3766 INIT_LIST_HEAD(&adev->pm.od_kobj_list); 3767 3768 xa_init(&adev->userq_doorbell_xa); 3769 3770 INIT_DELAYED_WORK(&adev->delayed_init_work, 3771 amdgpu_device_delayed_init_work_handler); 3772 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, 3773 amdgpu_device_delay_enable_gfx_off); 3774 /* 3775 * Initialize the enforce_isolation work structures for each XCP 3776 * partition. This work handler is responsible for enforcing shader 3777 * isolation on AMD GPUs. It counts the number of emitted fences for 3778 * each GFX and compute ring. If there are any fences, it schedules 3779 * the `enforce_isolation_work` to be run after a delay. If there are 3780 * no fences, it signals the Kernel Fusion Driver (KFD) to resume the 3781 * runqueue. 3782 */ 3783 for (i = 0; i < MAX_XCP; i++) { 3784 INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work, 3785 amdgpu_gfx_enforce_isolation_handler); 3786 adev->gfx.enforce_isolation[i].adev = adev; 3787 adev->gfx.enforce_isolation[i].xcp_id = i; 3788 } 3789 3790 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); 3791 INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work); 3792 3793 amdgpu_coredump_init(adev); 3794 3795 adev->gfx.gfx_off_req_count = 1; 3796 adev->gfx.gfx_off_residency = 0; 3797 adev->gfx.gfx_off_entrycount = 0; 3798 adev->pm.ac_power = power_supply_is_system_supplied() > 0; 3799 3800 atomic_set(&adev->throttling_logging_enabled, 1); 3801 /* 3802 * If throttling continues, logging will be performed every minute 3803 * to avoid log flooding. "-1" is subtracted since the thermal 3804 * throttling interrupt comes every second. Thus, the total logging 3805 * interval is 59 seconds(retelimited printk interval) + 1(waiting 3806 * for throttling interrupt) = 60 seconds. 3807 */ 3808 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); 3809 3810 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); 3811 3812 /* Registers mapping */ 3813 /* TODO: block userspace mapping of io register */ 3814 if (adev->asic_type >= CHIP_BONAIRE) { 3815 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 3816 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 3817 } else { 3818 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 3819 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 3820 } 3821 3822 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) 3823 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); 3824 3825 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 3826 if (!adev->rmmio) 3827 return -ENOMEM; 3828 3829 dev_info(adev->dev, "register mmio base: 0x%08X\n", 3830 (uint32_t)adev->rmmio_base); 3831 dev_info(adev->dev, "register mmio size: %u\n", 3832 (unsigned int)adev->rmmio_size); 3833 3834 /* 3835 * Reset domain needs to be present early, before XGMI hive discovered 3836 * (if any) and initialized to use reset sem and in_gpu reset flag 3837 * early on during init and before calling to RREG32. 3838 */ 3839 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); 3840 if (!adev->reset_domain) 3841 return -ENOMEM; 3842 3843 /* detect hw virtualization here */ 3844 amdgpu_virt_init(adev); 3845 3846 amdgpu_device_get_pcie_info(adev); 3847 3848 r = amdgpu_device_get_job_timeout_settings(adev); 3849 if (r) { 3850 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); 3851 return r; 3852 } 3853 3854 amdgpu_device_set_mcbp(adev); 3855 3856 /* 3857 * By default, use default mode where all blocks are expected to be 3858 * initialized. At present a 'swinit' of blocks is required to be 3859 * completed before the need for a different level is detected. 3860 */ 3861 amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT); 3862 /* early init functions */ 3863 r = amdgpu_device_ip_early_init(adev); 3864 if (r) 3865 return r; 3866 3867 /* 3868 * No need to remove conflicting FBs for non-display class devices. 3869 * This prevents the sysfb from being freed accidently. 3870 */ 3871 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA || 3872 (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) { 3873 /* Get rid of things like offb */ 3874 r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name); 3875 if (r) 3876 return r; 3877 } 3878 3879 /* Enable TMZ based on IP_VERSION */ 3880 amdgpu_gmc_tmz_set(adev); 3881 3882 if (amdgpu_sriov_vf(adev) && 3883 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 3884 /* VF MMIO access (except mailbox range) from CPU 3885 * will be blocked during sriov runtime 3886 */ 3887 adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; 3888 3889 amdgpu_gmc_noretry_set(adev); 3890 /* Need to get xgmi info early to decide the reset behavior*/ 3891 if (adev->gmc.xgmi.supported) { 3892 if (adev->gfxhub.funcs && 3893 adev->gfxhub.funcs->get_xgmi_info) { 3894 r = adev->gfxhub.funcs->get_xgmi_info(adev); 3895 if (r) 3896 return r; 3897 } 3898 } 3899 3900 if (adev->gmc.xgmi.connected_to_cpu) { 3901 if (adev->mmhub.funcs && 3902 adev->mmhub.funcs->get_xgmi_info) { 3903 r = adev->mmhub.funcs->get_xgmi_info(adev); 3904 if (r) 3905 return r; 3906 } 3907 } 3908 3909 /* enable PCIE atomic ops */ 3910 if (amdgpu_sriov_vf(adev)) { 3911 if (adev->virt.fw_reserve.p_pf2vf) 3912 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) 3913 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == 3914 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); 3915 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a 3916 * internal path natively support atomics, set have_atomics_support to true. 3917 */ 3918 } else if ((adev->flags & AMD_IS_APU && 3919 amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) || 3920 (adev->gmc.xgmi.connected_to_cpu && 3921 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 1, 0))) { 3922 adev->have_atomics_support = true; 3923 } else { 3924 adev->have_atomics_support = 3925 !pci_enable_atomic_ops_to_root(adev->pdev, 3926 PCI_EXP_DEVCAP2_ATOMIC_COMP32 | 3927 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 3928 } 3929 3930 if (!adev->have_atomics_support) 3931 dev_info(adev->dev, "PCIE atomic ops is not supported\n"); 3932 3933 /* doorbell bar mapping and doorbell index init*/ 3934 amdgpu_doorbell_init(adev); 3935 3936 if (amdgpu_emu_mode == 1) { 3937 /* post the asic on emulation mode */ 3938 emu_soc_asic_init(adev); 3939 goto fence_driver_init; 3940 } 3941 3942 amdgpu_reset_init(adev); 3943 3944 /* detect if we are with an SRIOV vbios */ 3945 if (adev->bios) 3946 amdgpu_device_detect_sriov_bios(adev); 3947 3948 /* check if we need to reset the asic 3949 * E.g., driver was not cleanly unloaded previously, etc. 3950 */ 3951 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { 3952 if (adev->gmc.xgmi.num_physical_nodes) { 3953 dev_info(adev->dev, "Pending hive reset.\n"); 3954 amdgpu_set_init_level(adev, 3955 AMDGPU_INIT_LEVEL_MINIMAL_XGMI); 3956 } else { 3957 tmp = amdgpu_reset_method; 3958 /* It should do a default reset when loading or reloading the driver, 3959 * regardless of the module parameter reset_method. 3960 */ 3961 amdgpu_reset_method = AMD_RESET_METHOD_NONE; 3962 r = amdgpu_asic_reset(adev); 3963 amdgpu_reset_method = tmp; 3964 } 3965 3966 if (r) { 3967 dev_err(adev->dev, "asic reset on init failed\n"); 3968 goto failed; 3969 } 3970 } 3971 3972 /* Post card if necessary */ 3973 if (amdgpu_device_need_post(adev)) { 3974 if (!adev->bios) { 3975 dev_err(adev->dev, "no vBIOS found\n"); 3976 r = -EINVAL; 3977 goto failed; 3978 } 3979 dev_info(adev->dev, "GPU posting now...\n"); 3980 r = amdgpu_device_asic_init(adev); 3981 if (r) { 3982 dev_err(adev->dev, "gpu post error!\n"); 3983 goto failed; 3984 } 3985 } 3986 3987 if (adev->bios) { 3988 if (adev->is_atom_fw) { 3989 /* Initialize clocks */ 3990 r = amdgpu_atomfirmware_get_clock_info(adev); 3991 if (r) { 3992 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 3993 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3994 goto failed; 3995 } 3996 } else { 3997 /* Initialize clocks */ 3998 r = amdgpu_atombios_get_clock_info(adev); 3999 if (r) { 4000 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 4001 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 4002 goto failed; 4003 } 4004 /* init i2c buses */ 4005 amdgpu_i2c_init(adev); 4006 } 4007 } 4008 4009 fence_driver_init: 4010 /* Fence driver */ 4011 r = amdgpu_fence_driver_sw_init(adev); 4012 if (r) { 4013 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); 4014 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 4015 goto failed; 4016 } 4017 4018 /* init the mode config */ 4019 drm_mode_config_init(adev_to_drm(adev)); 4020 4021 r = amdgpu_device_ip_init(adev); 4022 if (r) { 4023 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); 4024 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 4025 goto release_ras_con; 4026 } 4027 4028 amdgpu_fence_driver_hw_init(adev); 4029 4030 dev_info(adev->dev, 4031 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", 4032 adev->gfx.config.max_shader_engines, 4033 adev->gfx.config.max_sh_per_se, 4034 adev->gfx.config.max_cu_per_sh, 4035 adev->gfx.cu_info.number); 4036 4037 adev->accel_working = true; 4038 4039 amdgpu_vm_check_compute_bug(adev); 4040 4041 /* Initialize the buffer migration limit. */ 4042 if (amdgpu_moverate >= 0) 4043 max_MBps = amdgpu_moverate; 4044 else 4045 max_MBps = 8; /* Allow 8 MB/s. */ 4046 /* Get a log2 for easy divisions. */ 4047 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 4048 4049 /* 4050 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. 4051 * Otherwise the mgpu fan boost feature will be skipped due to the 4052 * gpu instance is counted less. 4053 */ 4054 amdgpu_register_gpu_instance(adev); 4055 4056 /* enable clockgating, etc. after ib tests, etc. since some blocks require 4057 * explicit gating rather than handling it automatically. 4058 */ 4059 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { 4060 r = amdgpu_device_ip_late_init(adev); 4061 if (r) { 4062 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); 4063 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 4064 goto release_ras_con; 4065 } 4066 /* must succeed. */ 4067 amdgpu_ras_resume(adev); 4068 queue_delayed_work(system_wq, &adev->delayed_init_work, 4069 msecs_to_jiffies(AMDGPU_RESUME_MS)); 4070 } 4071 4072 if (amdgpu_sriov_vf(adev)) { 4073 amdgpu_virt_release_full_gpu(adev, true); 4074 flush_delayed_work(&adev->delayed_init_work); 4075 } 4076 4077 /* Don't init kfd if whole hive need to be reset during init */ 4078 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { 4079 kgd2kfd_init_zone_device(adev); 4080 kfd_update_svm_support_properties(adev); 4081 } 4082 4083 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) 4084 amdgpu_xgmi_reset_on_init(adev); 4085 4086 /* 4087 * Place those sysfs registering after `late_init`. As some of those 4088 * operations performed in `late_init` might affect the sysfs 4089 * interfaces creating. 4090 */ 4091 r = amdgpu_device_sys_interface_init(adev); 4092 4093 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 4094 r = amdgpu_pmu_init(adev); 4095 if (r) 4096 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); 4097 4098 /* Have stored pci confspace at hand for restore in sudden PCI error */ 4099 if (amdgpu_device_cache_pci_state(adev->pdev)) 4100 pci_restore_state(pdev); 4101 4102 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 4103 /* this will fail for cards that aren't VGA class devices, just 4104 * ignore it 4105 */ 4106 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 4107 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); 4108 4109 px = amdgpu_device_supports_px(adev); 4110 4111 if (px || (!dev_is_removable(&adev->pdev->dev) && 4112 apple_gmux_detect(NULL, NULL))) 4113 vga_switcheroo_register_client(adev->pdev, 4114 &amdgpu_switcheroo_ops, px); 4115 4116 if (px) 4117 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 4118 4119 amdgpu_device_check_iommu_direct_map(adev); 4120 4121 adev->pm_nb.notifier_call = amdgpu_device_pm_notifier; 4122 r = register_pm_notifier(&adev->pm_nb); 4123 if (r) 4124 goto failed; 4125 4126 return 0; 4127 4128 release_ras_con: 4129 if (amdgpu_sriov_vf(adev)) 4130 amdgpu_virt_release_full_gpu(adev, true); 4131 4132 /* failed in exclusive mode due to timeout */ 4133 if (amdgpu_sriov_vf(adev) && 4134 !amdgpu_sriov_runtime(adev) && 4135 amdgpu_virt_mmio_blocked(adev) && 4136 !amdgpu_virt_wait_reset(adev)) { 4137 dev_err(adev->dev, "VF exclusive mode timeout\n"); 4138 /* Don't send request since VF is inactive. */ 4139 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 4140 adev->virt.ops = NULL; 4141 r = -EAGAIN; 4142 } 4143 amdgpu_release_ras_context(adev); 4144 4145 failed: 4146 amdgpu_vf_error_trans_all(adev); 4147 4148 return r; 4149 } 4150 4151 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) 4152 { 4153 4154 /* Clear all CPU mappings pointing to this device */ 4155 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); 4156 4157 /* Unmap all mapped bars - Doorbell, registers and VRAM */ 4158 amdgpu_doorbell_fini(adev); 4159 4160 iounmap(adev->rmmio); 4161 adev->rmmio = NULL; 4162 if (adev->mman.aper_base_kaddr) 4163 iounmap(adev->mman.aper_base_kaddr); 4164 adev->mman.aper_base_kaddr = NULL; 4165 4166 /* Memory manager related */ 4167 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { 4168 arch_phys_wc_del(adev->gmc.vram_mtrr); 4169 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); 4170 } 4171 } 4172 4173 /** 4174 * amdgpu_device_fini_hw - tear down the driver 4175 * 4176 * @adev: amdgpu_device pointer 4177 * 4178 * Tear down the driver info (all asics). 4179 * Called at driver shutdown. 4180 */ 4181 void amdgpu_device_fini_hw(struct amdgpu_device *adev) 4182 { 4183 dev_info(adev->dev, "finishing device.\n"); 4184 flush_delayed_work(&adev->delayed_init_work); 4185 4186 if (adev->mman.initialized) 4187 drain_workqueue(adev->mman.bdev.wq); 4188 adev->shutdown = true; 4189 4190 unregister_pm_notifier(&adev->pm_nb); 4191 4192 /* make sure IB test finished before entering exclusive mode 4193 * to avoid preemption on IB test 4194 */ 4195 if (amdgpu_sriov_vf(adev)) { 4196 amdgpu_virt_request_full_gpu(adev, false); 4197 amdgpu_virt_fini_data_exchange(adev); 4198 } 4199 4200 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 4201 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 4202 4203 /* disable all interrupts */ 4204 amdgpu_irq_disable_all(adev); 4205 if (adev->mode_info.mode_config_initialized) { 4206 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) 4207 drm_helper_force_disable_all(adev_to_drm(adev)); 4208 else 4209 drm_atomic_helper_shutdown(adev_to_drm(adev)); 4210 } 4211 amdgpu_fence_driver_hw_fini(adev); 4212 4213 amdgpu_device_sys_interface_fini(adev); 4214 4215 /* disable ras feature must before hw fini */ 4216 amdgpu_ras_pre_fini(adev); 4217 4218 amdgpu_ttm_set_buffer_funcs_status(adev, false); 4219 4220 /* 4221 * device went through surprise hotplug; we need to destroy topology 4222 * before ip_fini_early to prevent kfd locking refcount issues by calling 4223 * amdgpu_amdkfd_suspend() 4224 */ 4225 if (pci_dev_is_disconnected(adev->pdev)) 4226 amdgpu_amdkfd_device_fini_sw(adev); 4227 4228 amdgpu_device_ip_fini_early(adev); 4229 4230 amdgpu_irq_fini_hw(adev); 4231 4232 if (adev->mman.initialized) 4233 ttm_device_clear_dma_mappings(&adev->mman.bdev); 4234 4235 amdgpu_gart_dummy_page_fini(adev); 4236 4237 if (pci_dev_is_disconnected(adev->pdev)) 4238 amdgpu_device_unmap_mmio(adev); 4239 4240 } 4241 4242 void amdgpu_device_fini_sw(struct amdgpu_device *adev) 4243 { 4244 int i, idx; 4245 bool px; 4246 4247 amdgpu_device_ip_fini(adev); 4248 amdgpu_fence_driver_sw_fini(adev); 4249 amdgpu_ucode_release(&adev->firmware.gpu_info_fw); 4250 adev->accel_working = false; 4251 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); 4252 for (i = 0; i < MAX_XCP; ++i) { 4253 dma_fence_put(adev->isolation[i].spearhead); 4254 amdgpu_sync_free(&adev->isolation[i].active); 4255 amdgpu_sync_free(&adev->isolation[i].prev); 4256 } 4257 4258 amdgpu_reset_fini(adev); 4259 4260 /* free i2c buses */ 4261 amdgpu_i2c_fini(adev); 4262 4263 if (adev->bios) { 4264 if (amdgpu_emu_mode != 1) 4265 amdgpu_atombios_fini(adev); 4266 amdgpu_bios_release(adev); 4267 } 4268 4269 kfree(adev->fru_info); 4270 adev->fru_info = NULL; 4271 4272 kfree(adev->xcp_mgr); 4273 adev->xcp_mgr = NULL; 4274 4275 px = amdgpu_device_supports_px(adev); 4276 4277 if (px || (!dev_is_removable(&adev->pdev->dev) && 4278 apple_gmux_detect(NULL, NULL))) 4279 vga_switcheroo_unregister_client(adev->pdev); 4280 4281 if (px) 4282 vga_switcheroo_fini_domain_pm_ops(adev->dev); 4283 4284 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 4285 vga_client_unregister(adev->pdev); 4286 4287 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 4288 4289 iounmap(adev->rmmio); 4290 adev->rmmio = NULL; 4291 drm_dev_exit(idx); 4292 } 4293 4294 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 4295 amdgpu_pmu_fini(adev); 4296 if (adev->discovery.bin) 4297 amdgpu_discovery_fini(adev); 4298 4299 amdgpu_reset_put_reset_domain(adev->reset_domain); 4300 adev->reset_domain = NULL; 4301 4302 kfree(adev->pci_state); 4303 kfree(adev->pcie_reset_ctx.swds_pcistate); 4304 kfree(adev->pcie_reset_ctx.swus_pcistate); 4305 } 4306 4307 /** 4308 * amdgpu_device_evict_resources - evict device resources 4309 * @adev: amdgpu device object 4310 * 4311 * Evicts all ttm device resources(vram BOs, gart table) from the lru list 4312 * of the vram memory type. Mainly used for evicting device resources 4313 * at suspend time. 4314 * 4315 */ 4316 static int amdgpu_device_evict_resources(struct amdgpu_device *adev) 4317 { 4318 int ret; 4319 4320 /* No need to evict vram on APUs unless going to S4 */ 4321 if (!adev->in_s4 && (adev->flags & AMD_IS_APU)) 4322 return 0; 4323 4324 /* No need to evict when going to S5 through S4 callbacks */ 4325 if (system_state == SYSTEM_POWER_OFF) 4326 return 0; 4327 4328 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 4329 if (ret) { 4330 dev_warn(adev->dev, "evicting device resources failed\n"); 4331 return ret; 4332 } 4333 4334 if (adev->in_s4) { 4335 ret = ttm_device_prepare_hibernation(&adev->mman.bdev); 4336 if (ret) 4337 dev_err(adev->dev, "prepare hibernation failed, %d\n", ret); 4338 } 4339 return ret; 4340 } 4341 4342 /* 4343 * Suspend & resume. 4344 */ 4345 /** 4346 * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events 4347 * @nb: notifier block 4348 * @mode: suspend mode 4349 * @data: data 4350 * 4351 * This function is called when the system is about to suspend or hibernate. 4352 * It is used to set the appropriate flags so that eviction can be optimized 4353 * in the pm prepare callback. 4354 */ 4355 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, 4356 void *data) 4357 { 4358 struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb); 4359 4360 switch (mode) { 4361 case PM_HIBERNATION_PREPARE: 4362 adev->in_s4 = true; 4363 break; 4364 case PM_POST_HIBERNATION: 4365 adev->in_s4 = false; 4366 break; 4367 } 4368 4369 return NOTIFY_DONE; 4370 } 4371 4372 /** 4373 * amdgpu_device_prepare - prepare for device suspend 4374 * 4375 * @dev: drm dev pointer 4376 * 4377 * Prepare to put the hw in the suspend state (all asics). 4378 * Returns 0 for success or an error on failure. 4379 * Called at driver suspend. 4380 */ 4381 int amdgpu_device_prepare(struct drm_device *dev) 4382 { 4383 struct amdgpu_device *adev = drm_to_adev(dev); 4384 int i, r; 4385 4386 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4387 return 0; 4388 4389 /* Evict the majority of BOs before starting suspend sequence */ 4390 r = amdgpu_device_evict_resources(adev); 4391 if (r) 4392 return r; 4393 4394 flush_delayed_work(&adev->gfx.gfx_off_delay_work); 4395 4396 for (i = 0; i < adev->num_ip_blocks; i++) { 4397 if (!adev->ip_blocks[i].status.valid) 4398 continue; 4399 if (!adev->ip_blocks[i].version->funcs->prepare_suspend) 4400 continue; 4401 r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); 4402 if (r) 4403 return r; 4404 } 4405 4406 return 0; 4407 } 4408 4409 /** 4410 * amdgpu_device_complete - complete power state transition 4411 * 4412 * @dev: drm dev pointer 4413 * 4414 * Undo the changes from amdgpu_device_prepare. This will be 4415 * called on all resume transitions, including those that failed. 4416 */ 4417 void amdgpu_device_complete(struct drm_device *dev) 4418 { 4419 struct amdgpu_device *adev = drm_to_adev(dev); 4420 int i; 4421 4422 for (i = 0; i < adev->num_ip_blocks; i++) { 4423 if (!adev->ip_blocks[i].status.valid) 4424 continue; 4425 if (!adev->ip_blocks[i].version->funcs->complete) 4426 continue; 4427 adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]); 4428 } 4429 } 4430 4431 /** 4432 * amdgpu_device_suspend - initiate device suspend 4433 * 4434 * @dev: drm dev pointer 4435 * @notify_clients: notify in-kernel DRM clients 4436 * 4437 * Puts the hw in the suspend state (all asics). 4438 * Returns 0 for success or an error on failure. 4439 * Called at driver suspend. 4440 */ 4441 int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) 4442 { 4443 struct amdgpu_device *adev = drm_to_adev(dev); 4444 int r, rec; 4445 4446 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4447 return 0; 4448 4449 adev->in_suspend = true; 4450 4451 if (amdgpu_sriov_vf(adev)) { 4452 if (!adev->in_runpm) 4453 amdgpu_amdkfd_suspend_process(adev); 4454 amdgpu_virt_fini_data_exchange(adev); 4455 r = amdgpu_virt_request_full_gpu(adev, false); 4456 if (r) 4457 return r; 4458 } 4459 4460 r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3); 4461 if (r) 4462 goto unwind_sriov; 4463 4464 if (notify_clients) 4465 drm_client_dev_suspend(adev_to_drm(adev)); 4466 4467 cancel_delayed_work_sync(&adev->delayed_init_work); 4468 4469 amdgpu_ras_suspend(adev); 4470 4471 r = amdgpu_device_ip_suspend_phase1(adev); 4472 if (r) 4473 goto unwind_smartshift; 4474 4475 amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 4476 r = amdgpu_userq_suspend(adev); 4477 if (r) 4478 goto unwind_ip_phase1; 4479 4480 r = amdgpu_device_evict_resources(adev); 4481 if (r) 4482 goto unwind_userq; 4483 4484 amdgpu_ttm_set_buffer_funcs_status(adev, false); 4485 4486 amdgpu_fence_driver_hw_fini(adev); 4487 4488 r = amdgpu_device_ip_suspend_phase2(adev); 4489 if (r) 4490 goto unwind_evict; 4491 4492 if (amdgpu_sriov_vf(adev)) 4493 amdgpu_virt_release_full_gpu(adev, false); 4494 4495 return 0; 4496 4497 unwind_evict: 4498 amdgpu_ttm_set_buffer_funcs_status(adev, true); 4499 amdgpu_fence_driver_hw_init(adev); 4500 4501 unwind_userq: 4502 rec = amdgpu_userq_resume(adev); 4503 if (rec) { 4504 dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec); 4505 return r; 4506 } 4507 rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 4508 if (rec) { 4509 dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec); 4510 return r; 4511 } 4512 4513 unwind_ip_phase1: 4514 /* suspend phase 1 = resume phase 3 */ 4515 rec = amdgpu_device_ip_resume_phase3(adev); 4516 if (rec) { 4517 dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec); 4518 return r; 4519 } 4520 4521 unwind_smartshift: 4522 rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0); 4523 if (rec) { 4524 dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec); 4525 return r; 4526 } 4527 4528 if (notify_clients) 4529 drm_client_dev_resume(adev_to_drm(adev)); 4530 4531 amdgpu_ras_resume(adev); 4532 4533 unwind_sriov: 4534 if (amdgpu_sriov_vf(adev)) { 4535 rec = amdgpu_virt_request_full_gpu(adev, true); 4536 if (rec) { 4537 dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec); 4538 return r; 4539 } 4540 } 4541 4542 adev->in_suspend = adev->in_s0ix = adev->in_s3 = false; 4543 4544 return r; 4545 } 4546 4547 static inline int amdgpu_virt_resume(struct amdgpu_device *adev) 4548 { 4549 int r; 4550 unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id; 4551 4552 /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO) 4553 * may not work. The access could be blocked by nBIF protection as VF isn't in 4554 * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX 4555 * so that QEMU reprograms MSIX table. 4556 */ 4557 amdgpu_restore_msix(adev); 4558 4559 r = adev->gfxhub.funcs->get_xgmi_info(adev); 4560 if (r) 4561 return r; 4562 4563 dev_info(adev->dev, "xgmi node, old id %d, new id %d\n", 4564 prev_physical_node_id, adev->gmc.xgmi.physical_node_id); 4565 4566 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); 4567 adev->vm_manager.vram_base_offset += 4568 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 4569 4570 return 0; 4571 } 4572 4573 /** 4574 * amdgpu_device_resume - initiate device resume 4575 * 4576 * @dev: drm dev pointer 4577 * @notify_clients: notify in-kernel DRM clients 4578 * 4579 * Bring the hw back to operating state (all asics). 4580 * Returns 0 for success or an error on failure. 4581 * Called at driver resume. 4582 */ 4583 int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) 4584 { 4585 struct amdgpu_device *adev = drm_to_adev(dev); 4586 int r = 0; 4587 4588 if (amdgpu_sriov_vf(adev)) { 4589 r = amdgpu_virt_request_full_gpu(adev, true); 4590 if (r) 4591 return r; 4592 } 4593 4594 if (amdgpu_virt_xgmi_migrate_enabled(adev)) { 4595 r = amdgpu_virt_resume(adev); 4596 if (r) 4597 goto exit; 4598 } 4599 4600 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4601 return 0; 4602 4603 if (adev->in_s0ix) 4604 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry); 4605 4606 /* post card */ 4607 if (amdgpu_device_need_post(adev)) { 4608 r = amdgpu_device_asic_init(adev); 4609 if (r) 4610 dev_err(adev->dev, "amdgpu asic init failed\n"); 4611 } 4612 4613 r = amdgpu_device_ip_resume(adev); 4614 4615 if (r) { 4616 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); 4617 goto exit; 4618 } 4619 4620 r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 4621 if (r) 4622 goto exit; 4623 4624 r = amdgpu_userq_resume(adev); 4625 if (r) 4626 goto exit; 4627 4628 r = amdgpu_device_ip_late_init(adev); 4629 if (r) 4630 goto exit; 4631 4632 queue_delayed_work(system_wq, &adev->delayed_init_work, 4633 msecs_to_jiffies(AMDGPU_RESUME_MS)); 4634 exit: 4635 if (amdgpu_sriov_vf(adev)) { 4636 amdgpu_virt_init_data_exchange(adev); 4637 amdgpu_virt_release_full_gpu(adev, true); 4638 4639 if (!r && !adev->in_runpm) 4640 r = amdgpu_amdkfd_resume_process(adev); 4641 } 4642 4643 if (r) 4644 return r; 4645 4646 /* Make sure IB tests flushed */ 4647 flush_delayed_work(&adev->delayed_init_work); 4648 4649 if (notify_clients) 4650 drm_client_dev_resume(adev_to_drm(adev)); 4651 4652 amdgpu_ras_resume(adev); 4653 4654 if (adev->mode_info.num_crtc) { 4655 /* 4656 * Most of the connector probing functions try to acquire runtime pm 4657 * refs to ensure that the GPU is powered on when connector polling is 4658 * performed. Since we're calling this from a runtime PM callback, 4659 * trying to acquire rpm refs will cause us to deadlock. 4660 * 4661 * Since we're guaranteed to be holding the rpm lock, it's safe to 4662 * temporarily disable the rpm helpers so this doesn't deadlock us. 4663 */ 4664 #ifdef CONFIG_PM 4665 dev->dev->power.disable_depth++; 4666 #endif 4667 if (!adev->dc_enabled) 4668 drm_helper_hpd_irq_event(dev); 4669 else 4670 drm_kms_helper_hotplug_event(dev); 4671 #ifdef CONFIG_PM 4672 dev->dev->power.disable_depth--; 4673 #endif 4674 } 4675 4676 amdgpu_vram_mgr_clear_reset_blocks(adev); 4677 adev->in_suspend = false; 4678 4679 if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0)) 4680 dev_warn(adev->dev, "smart shift update failed\n"); 4681 4682 return 0; 4683 } 4684 4685 /** 4686 * amdgpu_device_ip_check_soft_reset - did soft reset succeed 4687 * 4688 * @adev: amdgpu_device pointer 4689 * 4690 * The list of all the hardware IPs that make up the asic is walked and 4691 * the check_soft_reset callbacks are run. check_soft_reset determines 4692 * if the asic is still hung or not. 4693 * Returns true if any of the IPs are still in a hung state, false if not. 4694 */ 4695 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) 4696 { 4697 int i; 4698 bool asic_hang = false; 4699 4700 if (amdgpu_sriov_vf(adev)) 4701 return true; 4702 4703 if (amdgpu_asic_need_full_reset(adev)) 4704 return true; 4705 4706 for (i = 0; i < adev->num_ip_blocks; i++) { 4707 if (!adev->ip_blocks[i].status.valid) 4708 continue; 4709 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 4710 adev->ip_blocks[i].status.hang = 4711 adev->ip_blocks[i].version->funcs->check_soft_reset( 4712 &adev->ip_blocks[i]); 4713 if (adev->ip_blocks[i].status.hang) { 4714 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 4715 asic_hang = true; 4716 } 4717 } 4718 return asic_hang; 4719 } 4720 4721 /** 4722 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset 4723 * 4724 * @adev: amdgpu_device pointer 4725 * 4726 * The list of all the hardware IPs that make up the asic is walked and the 4727 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset 4728 * handles any IP specific hardware or software state changes that are 4729 * necessary for a soft reset to succeed. 4730 * Returns 0 on success, negative error code on failure. 4731 */ 4732 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) 4733 { 4734 int i, r = 0; 4735 4736 for (i = 0; i < adev->num_ip_blocks; i++) { 4737 if (!adev->ip_blocks[i].status.valid) 4738 continue; 4739 if (adev->ip_blocks[i].status.hang && 4740 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 4741 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]); 4742 if (r) 4743 return r; 4744 } 4745 } 4746 4747 return 0; 4748 } 4749 4750 /** 4751 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed 4752 * 4753 * @adev: amdgpu_device pointer 4754 * 4755 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu 4756 * reset is necessary to recover. 4757 * Returns true if a full asic reset is required, false if not. 4758 */ 4759 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) 4760 { 4761 int i; 4762 4763 if (amdgpu_asic_need_full_reset(adev)) 4764 return true; 4765 4766 for (i = 0; i < adev->num_ip_blocks; i++) { 4767 if (!adev->ip_blocks[i].status.valid) 4768 continue; 4769 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 4770 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 4771 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 4772 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 4773 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 4774 if (adev->ip_blocks[i].status.hang) { 4775 dev_info(adev->dev, "Some block need full reset!\n"); 4776 return true; 4777 } 4778 } 4779 } 4780 return false; 4781 } 4782 4783 /** 4784 * amdgpu_device_ip_soft_reset - do a soft reset 4785 * 4786 * @adev: amdgpu_device pointer 4787 * 4788 * The list of all the hardware IPs that make up the asic is walked and the 4789 * soft_reset callbacks are run if the block is hung. soft_reset handles any 4790 * IP specific hardware or software state changes that are necessary to soft 4791 * reset the IP. 4792 * Returns 0 on success, negative error code on failure. 4793 */ 4794 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) 4795 { 4796 int i, r = 0; 4797 4798 for (i = 0; i < adev->num_ip_blocks; i++) { 4799 if (!adev->ip_blocks[i].status.valid) 4800 continue; 4801 if (adev->ip_blocks[i].status.hang && 4802 adev->ip_blocks[i].version->funcs->soft_reset) { 4803 r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]); 4804 if (r) 4805 return r; 4806 } 4807 } 4808 4809 return 0; 4810 } 4811 4812 /** 4813 * amdgpu_device_ip_post_soft_reset - clean up from soft reset 4814 * 4815 * @adev: amdgpu_device pointer 4816 * 4817 * The list of all the hardware IPs that make up the asic is walked and the 4818 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset 4819 * handles any IP specific hardware or software state changes that are 4820 * necessary after the IP has been soft reset. 4821 * Returns 0 on success, negative error code on failure. 4822 */ 4823 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) 4824 { 4825 int i, r = 0; 4826 4827 for (i = 0; i < adev->num_ip_blocks; i++) { 4828 if (!adev->ip_blocks[i].status.valid) 4829 continue; 4830 if (adev->ip_blocks[i].status.hang && 4831 adev->ip_blocks[i].version->funcs->post_soft_reset) 4832 r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]); 4833 if (r) 4834 return r; 4835 } 4836 4837 return 0; 4838 } 4839 4840 /** 4841 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 4842 * 4843 * @adev: amdgpu_device pointer 4844 * @reset_context: amdgpu reset context pointer 4845 * 4846 * do VF FLR and reinitialize Asic 4847 * return 0 means succeeded otherwise failed 4848 */ 4849 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 4850 struct amdgpu_reset_context *reset_context) 4851 { 4852 int r; 4853 struct amdgpu_hive_info *hive = NULL; 4854 4855 if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) { 4856 if (!amdgpu_ras_get_fed_status(adev)) 4857 amdgpu_virt_ready_to_reset(adev); 4858 amdgpu_virt_wait_reset(adev); 4859 clear_bit(AMDGPU_HOST_FLR, &reset_context->flags); 4860 r = amdgpu_virt_request_full_gpu(adev, true); 4861 } else { 4862 r = amdgpu_virt_reset_gpu(adev); 4863 } 4864 if (r) 4865 return r; 4866 4867 amdgpu_ras_clear_err_state(adev); 4868 amdgpu_irq_gpu_reset_resume_helper(adev); 4869 4870 /* some sw clean up VF needs to do before recover */ 4871 amdgpu_virt_post_reset(adev); 4872 4873 /* Resume IP prior to SMC */ 4874 r = amdgpu_device_ip_reinit_early_sriov(adev); 4875 if (r) 4876 return r; 4877 4878 amdgpu_virt_init_data_exchange(adev); 4879 4880 r = amdgpu_device_fw_loading(adev); 4881 if (r) 4882 return r; 4883 4884 /* now we are okay to resume SMC/CP/SDMA */ 4885 r = amdgpu_device_ip_reinit_late_sriov(adev); 4886 if (r) 4887 return r; 4888 4889 hive = amdgpu_get_xgmi_hive(adev); 4890 /* Update PSP FW topology after reset */ 4891 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) 4892 r = amdgpu_xgmi_update_topology(hive, adev); 4893 if (hive) 4894 amdgpu_put_xgmi_hive(hive); 4895 if (r) 4896 return r; 4897 4898 r = amdgpu_ib_ring_tests(adev); 4899 if (r) 4900 return r; 4901 4902 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) 4903 amdgpu_inc_vram_lost(adev); 4904 4905 /* need to be called during full access so we can't do it later like 4906 * bare-metal does. 4907 */ 4908 amdgpu_amdkfd_post_reset(adev); 4909 amdgpu_virt_release_full_gpu(adev, true); 4910 4911 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */ 4912 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || 4913 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || 4914 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) || 4915 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) || 4916 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) 4917 amdgpu_ras_resume(adev); 4918 4919 amdgpu_virt_ras_telemetry_post_reset(adev); 4920 4921 return 0; 4922 } 4923 4924 /** 4925 * amdgpu_device_has_job_running - check if there is any unfinished job 4926 * 4927 * @adev: amdgpu_device pointer 4928 * 4929 * check if there is any job running on the device when guest driver receives 4930 * FLR notification from host driver. If there are still jobs running, then 4931 * the guest driver will not respond the FLR reset. Instead, let the job hit 4932 * the timeout and guest driver then issue the reset request. 4933 */ 4934 bool amdgpu_device_has_job_running(struct amdgpu_device *adev) 4935 { 4936 int i; 4937 4938 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4939 struct amdgpu_ring *ring = adev->rings[i]; 4940 4941 if (!amdgpu_ring_sched_ready(ring)) 4942 continue; 4943 4944 if (amdgpu_fence_count_emitted(ring)) 4945 return true; 4946 } 4947 return false; 4948 } 4949 4950 /** 4951 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery 4952 * 4953 * @adev: amdgpu_device pointer 4954 * 4955 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover 4956 * a hung GPU. 4957 */ 4958 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) 4959 { 4960 4961 if (amdgpu_gpu_recovery == 0) 4962 goto disabled; 4963 4964 /* Skip soft reset check in fatal error mode */ 4965 if (!amdgpu_ras_is_poison_mode_supported(adev)) 4966 return true; 4967 4968 if (amdgpu_sriov_vf(adev)) 4969 return true; 4970 4971 if (amdgpu_gpu_recovery == -1) { 4972 switch (adev->asic_type) { 4973 #ifdef CONFIG_DRM_AMDGPU_SI 4974 case CHIP_VERDE: 4975 case CHIP_TAHITI: 4976 case CHIP_PITCAIRN: 4977 case CHIP_OLAND: 4978 case CHIP_HAINAN: 4979 #endif 4980 #ifdef CONFIG_DRM_AMDGPU_CIK 4981 case CHIP_KAVERI: 4982 case CHIP_KABINI: 4983 case CHIP_MULLINS: 4984 #endif 4985 case CHIP_CARRIZO: 4986 case CHIP_STONEY: 4987 case CHIP_CYAN_SKILLFISH: 4988 goto disabled; 4989 default: 4990 break; 4991 } 4992 } 4993 4994 return true; 4995 4996 disabled: 4997 dev_info(adev->dev, "GPU recovery disabled.\n"); 4998 return false; 4999 } 5000 5001 int amdgpu_device_mode1_reset(struct amdgpu_device *adev) 5002 { 5003 u32 i; 5004 int ret = 0; 5005 5006 if (adev->bios) 5007 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 5008 5009 dev_info(adev->dev, "GPU mode1 reset\n"); 5010 5011 /* Cache the state before bus master disable. The saved config space 5012 * values are used in other cases like restore after mode-2 reset. 5013 */ 5014 amdgpu_device_cache_pci_state(adev->pdev); 5015 5016 /* disable BM */ 5017 pci_clear_master(adev->pdev); 5018 5019 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 5020 dev_info(adev->dev, "GPU smu mode1 reset\n"); 5021 ret = amdgpu_dpm_mode1_reset(adev); 5022 } else { 5023 dev_info(adev->dev, "GPU psp mode1 reset\n"); 5024 ret = psp_gpu_reset(adev); 5025 } 5026 5027 if (ret) 5028 goto mode1_reset_failed; 5029 5030 /* enable mmio access after mode 1 reset completed */ 5031 adev->no_hw_access = false; 5032 5033 /* ensure no_hw_access is updated before we access hw */ 5034 smp_mb(); 5035 5036 amdgpu_device_load_pci_state(adev->pdev); 5037 ret = amdgpu_psp_wait_for_bootloader(adev); 5038 if (ret) 5039 goto mode1_reset_failed; 5040 5041 /* wait for asic to come out of reset */ 5042 for (i = 0; i < adev->usec_timeout; i++) { 5043 u32 memsize = adev->nbio.funcs->get_memsize(adev); 5044 5045 if (memsize != 0xffffffff) 5046 break; 5047 udelay(1); 5048 } 5049 5050 if (i >= adev->usec_timeout) { 5051 ret = -ETIMEDOUT; 5052 goto mode1_reset_failed; 5053 } 5054 5055 if (adev->bios) 5056 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 5057 5058 return 0; 5059 5060 mode1_reset_failed: 5061 dev_err(adev->dev, "GPU mode1 reset failed\n"); 5062 return ret; 5063 } 5064 5065 int amdgpu_device_link_reset(struct amdgpu_device *adev) 5066 { 5067 int ret = 0; 5068 5069 dev_info(adev->dev, "GPU link reset\n"); 5070 5071 if (!amdgpu_reset_in_dpc(adev)) 5072 ret = amdgpu_dpm_link_reset(adev); 5073 5074 if (ret) 5075 goto link_reset_failed; 5076 5077 ret = amdgpu_psp_wait_for_bootloader(adev); 5078 if (ret) 5079 goto link_reset_failed; 5080 5081 return 0; 5082 5083 link_reset_failed: 5084 dev_err(adev->dev, "GPU link reset failed\n"); 5085 return ret; 5086 } 5087 5088 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 5089 struct amdgpu_reset_context *reset_context) 5090 { 5091 int i, r = 0; 5092 struct amdgpu_job *job = NULL; 5093 struct amdgpu_device *tmp_adev = reset_context->reset_req_dev; 5094 bool need_full_reset = 5095 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5096 5097 if (reset_context->reset_req_dev == adev) 5098 job = reset_context->job; 5099 5100 if (amdgpu_sriov_vf(adev)) 5101 amdgpu_virt_pre_reset(adev); 5102 5103 amdgpu_fence_driver_isr_toggle(adev, true); 5104 5105 /* block all schedulers and reset given job's ring */ 5106 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5107 struct amdgpu_ring *ring = adev->rings[i]; 5108 5109 if (!amdgpu_ring_sched_ready(ring)) 5110 continue; 5111 5112 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 5113 amdgpu_fence_driver_force_completion(ring); 5114 } 5115 5116 amdgpu_fence_driver_isr_toggle(adev, false); 5117 5118 if (job && job->vm) 5119 drm_sched_increase_karma(&job->base); 5120 5121 r = amdgpu_reset_prepare_hwcontext(adev, reset_context); 5122 /* If reset handler not implemented, continue; otherwise return */ 5123 if (r == -EOPNOTSUPP) 5124 r = 0; 5125 else 5126 return r; 5127 5128 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ 5129 if (!amdgpu_sriov_vf(adev)) { 5130 5131 if (!need_full_reset) 5132 need_full_reset = amdgpu_device_ip_need_full_reset(adev); 5133 5134 if (!need_full_reset && amdgpu_gpu_recovery && 5135 amdgpu_device_ip_check_soft_reset(adev)) { 5136 amdgpu_device_ip_pre_soft_reset(adev); 5137 r = amdgpu_device_ip_soft_reset(adev); 5138 amdgpu_device_ip_post_soft_reset(adev); 5139 if (r || amdgpu_device_ip_check_soft_reset(adev)) { 5140 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); 5141 need_full_reset = true; 5142 } 5143 } 5144 5145 if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) { 5146 dev_info(tmp_adev->dev, "Dumping IP State\n"); 5147 /* Trigger ip dump before we reset the asic */ 5148 for (i = 0; i < tmp_adev->num_ip_blocks; i++) 5149 if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) 5150 tmp_adev->ip_blocks[i].version->funcs 5151 ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]); 5152 dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); 5153 } 5154 5155 if (need_full_reset) 5156 r = amdgpu_device_ip_suspend(adev); 5157 if (need_full_reset) 5158 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5159 else 5160 clear_bit(AMDGPU_NEED_FULL_RESET, 5161 &reset_context->flags); 5162 } 5163 5164 return r; 5165 } 5166 5167 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) 5168 { 5169 struct list_head *device_list_handle; 5170 bool full_reset, vram_lost = false; 5171 struct amdgpu_device *tmp_adev; 5172 int r, init_level; 5173 5174 device_list_handle = reset_context->reset_device_list; 5175 5176 if (!device_list_handle) 5177 return -EINVAL; 5178 5179 full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5180 5181 /** 5182 * If it's reset on init, it's default init level, otherwise keep level 5183 * as recovery level. 5184 */ 5185 if (reset_context->method == AMD_RESET_METHOD_ON_INIT) 5186 init_level = AMDGPU_INIT_LEVEL_DEFAULT; 5187 else 5188 init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY; 5189 5190 r = 0; 5191 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5192 amdgpu_set_init_level(tmp_adev, init_level); 5193 if (full_reset) { 5194 /* post card */ 5195 amdgpu_reset_set_dpc_status(tmp_adev, false); 5196 amdgpu_ras_clear_err_state(tmp_adev); 5197 r = amdgpu_device_asic_init(tmp_adev); 5198 if (r) { 5199 dev_warn(tmp_adev->dev, "asic atom init failed!"); 5200 } else { 5201 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); 5202 5203 r = amdgpu_device_ip_resume_phase1(tmp_adev); 5204 if (r) 5205 goto out; 5206 5207 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 5208 5209 if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) 5210 amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job); 5211 5212 if (vram_lost) { 5213 dev_info( 5214 tmp_adev->dev, 5215 "VRAM is lost due to GPU reset!\n"); 5216 amdgpu_inc_vram_lost(tmp_adev); 5217 } 5218 5219 r = amdgpu_device_fw_loading(tmp_adev); 5220 if (r) 5221 return r; 5222 5223 r = amdgpu_xcp_restore_partition_mode( 5224 tmp_adev->xcp_mgr); 5225 if (r) 5226 goto out; 5227 5228 r = amdgpu_device_ip_resume_phase2(tmp_adev); 5229 if (r) 5230 goto out; 5231 5232 amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true); 5233 5234 r = amdgpu_device_ip_resume_phase3(tmp_adev); 5235 if (r) 5236 goto out; 5237 5238 if (vram_lost) 5239 amdgpu_device_fill_reset_magic(tmp_adev); 5240 5241 /* 5242 * Add this ASIC as tracked as reset was already 5243 * complete successfully. 5244 */ 5245 amdgpu_register_gpu_instance(tmp_adev); 5246 5247 if (!reset_context->hive && 5248 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 5249 amdgpu_xgmi_add_device(tmp_adev); 5250 5251 r = amdgpu_device_ip_late_init(tmp_adev); 5252 if (r) 5253 goto out; 5254 5255 r = amdgpu_userq_post_reset(tmp_adev, vram_lost); 5256 if (r) 5257 goto out; 5258 5259 drm_client_dev_resume(adev_to_drm(tmp_adev)); 5260 5261 /* 5262 * The GPU enters bad state once faulty pages 5263 * by ECC has reached the threshold, and ras 5264 * recovery is scheduled next. So add one check 5265 * here to break recovery if it indeed exceeds 5266 * bad page threshold, and remind user to 5267 * retire this GPU or setting one bigger 5268 * bad_page_threshold value to fix this once 5269 * probing driver again. 5270 */ 5271 if (!amdgpu_ras_is_rma(tmp_adev)) { 5272 /* must succeed. */ 5273 amdgpu_ras_resume(tmp_adev); 5274 } else { 5275 r = -EINVAL; 5276 goto out; 5277 } 5278 5279 /* Update PSP FW topology after reset */ 5280 if (reset_context->hive && 5281 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 5282 r = amdgpu_xgmi_update_topology( 5283 reset_context->hive, tmp_adev); 5284 } 5285 } 5286 5287 out: 5288 if (!r) { 5289 /* IP init is complete now, set level as default */ 5290 amdgpu_set_init_level(tmp_adev, 5291 AMDGPU_INIT_LEVEL_DEFAULT); 5292 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); 5293 r = amdgpu_ib_ring_tests(tmp_adev); 5294 if (r) { 5295 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); 5296 r = -EAGAIN; 5297 goto end; 5298 } 5299 } 5300 5301 if (r) 5302 tmp_adev->asic_reset_res = r; 5303 } 5304 5305 end: 5306 return r; 5307 } 5308 5309 int amdgpu_do_asic_reset(struct list_head *device_list_handle, 5310 struct amdgpu_reset_context *reset_context) 5311 { 5312 struct amdgpu_device *tmp_adev = NULL; 5313 bool need_full_reset, skip_hw_reset; 5314 int r = 0; 5315 5316 /* Try reset handler method first */ 5317 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5318 reset_list); 5319 5320 reset_context->reset_device_list = device_list_handle; 5321 r = amdgpu_reset_perform_reset(tmp_adev, reset_context); 5322 /* If reset handler not implemented, continue; otherwise return */ 5323 if (r == -EOPNOTSUPP) 5324 r = 0; 5325 else 5326 return r; 5327 5328 /* Reset handler not implemented, use the default method */ 5329 need_full_reset = 5330 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5331 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); 5332 5333 /* 5334 * ASIC reset has to be done on all XGMI hive nodes ASAP 5335 * to allow proper links negotiation in FW (within 1 sec) 5336 */ 5337 if (!skip_hw_reset && need_full_reset) { 5338 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5339 /* For XGMI run all resets in parallel to speed up the process */ 5340 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 5341 if (!queue_work(system_unbound_wq, 5342 &tmp_adev->xgmi_reset_work)) 5343 r = -EALREADY; 5344 } else 5345 r = amdgpu_asic_reset(tmp_adev); 5346 5347 if (r) { 5348 dev_err(tmp_adev->dev, 5349 "ASIC reset failed with error, %d for drm dev, %s", 5350 r, adev_to_drm(tmp_adev)->unique); 5351 goto out; 5352 } 5353 } 5354 5355 /* For XGMI wait for all resets to complete before proceed */ 5356 if (!r) { 5357 list_for_each_entry(tmp_adev, device_list_handle, 5358 reset_list) { 5359 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 5360 flush_work(&tmp_adev->xgmi_reset_work); 5361 r = tmp_adev->asic_reset_res; 5362 if (r) 5363 break; 5364 } 5365 } 5366 } 5367 } 5368 5369 if (!r && amdgpu_ras_intr_triggered()) { 5370 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5371 amdgpu_ras_reset_error_count(tmp_adev, 5372 AMDGPU_RAS_BLOCK__MMHUB); 5373 } 5374 5375 amdgpu_ras_intr_cleared(); 5376 } 5377 5378 r = amdgpu_device_reinit_after_reset(reset_context); 5379 if (r == -EAGAIN) 5380 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5381 else 5382 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5383 5384 out: 5385 return r; 5386 } 5387 5388 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev) 5389 { 5390 5391 switch (amdgpu_asic_reset_method(adev)) { 5392 case AMD_RESET_METHOD_MODE1: 5393 case AMD_RESET_METHOD_LINK: 5394 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; 5395 break; 5396 case AMD_RESET_METHOD_MODE2: 5397 adev->mp1_state = PP_MP1_STATE_RESET; 5398 break; 5399 default: 5400 adev->mp1_state = PP_MP1_STATE_NONE; 5401 break; 5402 } 5403 } 5404 5405 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev) 5406 { 5407 amdgpu_vf_error_trans_all(adev); 5408 adev->mp1_state = PP_MP1_STATE_NONE; 5409 } 5410 5411 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) 5412 { 5413 struct pci_dev *p = NULL; 5414 5415 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 5416 adev->pdev->bus->number, 1); 5417 if (p) { 5418 pm_runtime_enable(&(p->dev)); 5419 pm_runtime_resume(&(p->dev)); 5420 } 5421 5422 pci_dev_put(p); 5423 } 5424 5425 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) 5426 { 5427 enum amd_reset_method reset_method; 5428 struct pci_dev *p = NULL; 5429 u64 expires; 5430 5431 /* 5432 * For now, only BACO and mode1 reset are confirmed 5433 * to suffer the audio issue without proper suspended. 5434 */ 5435 reset_method = amdgpu_asic_reset_method(adev); 5436 if ((reset_method != AMD_RESET_METHOD_BACO) && 5437 (reset_method != AMD_RESET_METHOD_MODE1)) 5438 return -EINVAL; 5439 5440 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 5441 adev->pdev->bus->number, 1); 5442 if (!p) 5443 return -ENODEV; 5444 5445 expires = pm_runtime_autosuspend_expiration(&(p->dev)); 5446 if (!expires) 5447 /* 5448 * If we cannot get the audio device autosuspend delay, 5449 * a fixed 4S interval will be used. Considering 3S is 5450 * the audio controller default autosuspend delay setting. 5451 * 4S used here is guaranteed to cover that. 5452 */ 5453 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL; 5454 5455 while (!pm_runtime_status_suspended(&(p->dev))) { 5456 if (!pm_runtime_suspend(&(p->dev))) 5457 break; 5458 5459 if (expires < ktime_get_mono_fast_ns()) { 5460 dev_warn(adev->dev, "failed to suspend display audio\n"); 5461 pci_dev_put(p); 5462 /* TODO: abort the succeeding gpu reset? */ 5463 return -ETIMEDOUT; 5464 } 5465 } 5466 5467 pm_runtime_disable(&(p->dev)); 5468 5469 pci_dev_put(p); 5470 return 0; 5471 } 5472 5473 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) 5474 { 5475 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5476 5477 #if defined(CONFIG_DEBUG_FS) 5478 if (!amdgpu_sriov_vf(adev)) 5479 cancel_work(&adev->reset_work); 5480 #endif 5481 cancel_work(&adev->userq_reset_work); 5482 5483 if (adev->kfd.dev) 5484 cancel_work(&adev->kfd.reset_work); 5485 5486 if (amdgpu_sriov_vf(adev)) 5487 cancel_work(&adev->virt.flr_work); 5488 5489 if (con && adev->ras_enabled) 5490 cancel_work(&con->recovery_work); 5491 5492 } 5493 5494 static int amdgpu_device_health_check(struct list_head *device_list_handle) 5495 { 5496 struct amdgpu_device *tmp_adev; 5497 int ret = 0; 5498 5499 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5500 ret |= amdgpu_device_bus_status_check(tmp_adev); 5501 } 5502 5503 return ret; 5504 } 5505 5506 static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev, 5507 struct list_head *device_list, 5508 struct amdgpu_hive_info *hive) 5509 { 5510 struct amdgpu_device *tmp_adev = NULL; 5511 5512 /* 5513 * Build list of devices to reset. 5514 * In case we are in XGMI hive mode, resort the device list 5515 * to put adev in the 1st position. 5516 */ 5517 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { 5518 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 5519 list_add_tail(&tmp_adev->reset_list, device_list); 5520 if (adev->shutdown) 5521 tmp_adev->shutdown = true; 5522 if (amdgpu_reset_in_dpc(adev)) 5523 tmp_adev->pcie_reset_ctx.in_link_reset = true; 5524 } 5525 if (!list_is_first(&adev->reset_list, device_list)) 5526 list_rotate_to_front(&adev->reset_list, device_list); 5527 } else { 5528 list_add_tail(&adev->reset_list, device_list); 5529 } 5530 } 5531 5532 static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev, 5533 struct list_head *device_list) 5534 { 5535 struct amdgpu_device *tmp_adev = NULL; 5536 5537 if (list_empty(device_list)) 5538 return; 5539 tmp_adev = 5540 list_first_entry(device_list, struct amdgpu_device, reset_list); 5541 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); 5542 } 5543 5544 static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev, 5545 struct list_head *device_list) 5546 { 5547 struct amdgpu_device *tmp_adev = NULL; 5548 5549 if (list_empty(device_list)) 5550 return; 5551 tmp_adev = 5552 list_first_entry(device_list, struct amdgpu_device, reset_list); 5553 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); 5554 } 5555 5556 static void amdgpu_device_halt_activities(struct amdgpu_device *adev, 5557 struct amdgpu_job *job, 5558 struct amdgpu_reset_context *reset_context, 5559 struct list_head *device_list, 5560 struct amdgpu_hive_info *hive, 5561 bool need_emergency_restart) 5562 { 5563 struct amdgpu_device *tmp_adev = NULL; 5564 int i; 5565 5566 /* block all schedulers and reset given job's ring */ 5567 list_for_each_entry(tmp_adev, device_list, reset_list) { 5568 amdgpu_device_set_mp1_state(tmp_adev); 5569 5570 /* 5571 * Try to put the audio codec into suspend state 5572 * before gpu reset started. 5573 * 5574 * Due to the power domain of the graphics device 5575 * is shared with AZ power domain. Without this, 5576 * we may change the audio hardware from behind 5577 * the audio driver's back. That will trigger 5578 * some audio codec errors. 5579 */ 5580 if (!amdgpu_device_suspend_display_audio(tmp_adev)) 5581 tmp_adev->pcie_reset_ctx.audio_suspended = true; 5582 5583 amdgpu_ras_set_error_query_ready(tmp_adev, false); 5584 5585 cancel_delayed_work_sync(&tmp_adev->delayed_init_work); 5586 5587 amdgpu_amdkfd_pre_reset(tmp_adev, reset_context); 5588 5589 /* 5590 * Mark these ASICs to be reset as untracked first 5591 * And add them back after reset completed 5592 */ 5593 amdgpu_unregister_gpu_instance(tmp_adev); 5594 5595 drm_client_dev_suspend(adev_to_drm(tmp_adev)); 5596 5597 /* disable ras on ALL IPs */ 5598 if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) && 5599 amdgpu_device_ip_need_full_reset(tmp_adev)) 5600 amdgpu_ras_suspend(tmp_adev); 5601 5602 amdgpu_userq_pre_reset(tmp_adev); 5603 5604 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5605 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5606 5607 if (!amdgpu_ring_sched_ready(ring)) 5608 continue; 5609 5610 drm_sched_wqueue_stop(&ring->sched); 5611 5612 if (need_emergency_restart) 5613 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); 5614 } 5615 atomic_inc(&tmp_adev->gpu_reset_counter); 5616 } 5617 } 5618 5619 static int amdgpu_device_asic_reset(struct amdgpu_device *adev, 5620 struct list_head *device_list, 5621 struct amdgpu_reset_context *reset_context) 5622 { 5623 struct amdgpu_device *tmp_adev = NULL; 5624 int retry_limit = AMDGPU_MAX_RETRY_LIMIT; 5625 int r = 0; 5626 5627 retry: /* Rest of adevs pre asic reset from XGMI hive. */ 5628 list_for_each_entry(tmp_adev, device_list, reset_list) { 5629 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context); 5630 /*TODO Should we stop ?*/ 5631 if (r) { 5632 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", 5633 r, adev_to_drm(tmp_adev)->unique); 5634 tmp_adev->asic_reset_res = r; 5635 } 5636 } 5637 5638 /* Actual ASIC resets if needed.*/ 5639 /* Host driver will handle XGMI hive reset for SRIOV */ 5640 if (amdgpu_sriov_vf(adev)) { 5641 5642 /* Bail out of reset early */ 5643 if (amdgpu_ras_is_rma(adev)) 5644 return -ENODEV; 5645 5646 if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) { 5647 dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n"); 5648 amdgpu_ras_set_fed(adev, true); 5649 set_bit(AMDGPU_HOST_FLR, &reset_context->flags); 5650 } 5651 5652 r = amdgpu_device_reset_sriov(adev, reset_context); 5653 if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) { 5654 amdgpu_virt_release_full_gpu(adev, true); 5655 goto retry; 5656 } 5657 if (r) 5658 adev->asic_reset_res = r; 5659 } else { 5660 r = amdgpu_do_asic_reset(device_list, reset_context); 5661 if (r && r == -EAGAIN) 5662 goto retry; 5663 } 5664 5665 list_for_each_entry(tmp_adev, device_list, reset_list) { 5666 /* 5667 * Drop any pending non scheduler resets queued before reset is done. 5668 * Any reset scheduled after this point would be valid. Scheduler resets 5669 * were already dropped during drm_sched_stop and no new ones can come 5670 * in before drm_sched_start. 5671 */ 5672 amdgpu_device_stop_pending_resets(tmp_adev); 5673 } 5674 5675 return r; 5676 } 5677 5678 static int amdgpu_device_sched_resume(struct list_head *device_list, 5679 struct amdgpu_reset_context *reset_context, 5680 bool job_signaled) 5681 { 5682 struct amdgpu_device *tmp_adev = NULL; 5683 int i, r = 0; 5684 5685 /* Post ASIC reset for all devs .*/ 5686 list_for_each_entry(tmp_adev, device_list, reset_list) { 5687 5688 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5689 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5690 5691 if (!amdgpu_ring_sched_ready(ring)) 5692 continue; 5693 5694 drm_sched_wqueue_start(&ring->sched); 5695 } 5696 5697 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) 5698 drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); 5699 5700 if (tmp_adev->asic_reset_res) { 5701 /* bad news, how to tell it to userspace ? 5702 * for ras error, we should report GPU bad status instead of 5703 * reset failure 5704 */ 5705 if (reset_context->src != AMDGPU_RESET_SRC_RAS || 5706 !amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) 5707 dev_info( 5708 tmp_adev->dev, 5709 "GPU reset(%d) failed with error %d\n", 5710 atomic_read( 5711 &tmp_adev->gpu_reset_counter), 5712 tmp_adev->asic_reset_res); 5713 amdgpu_vf_error_put(tmp_adev, 5714 AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, 5715 tmp_adev->asic_reset_res); 5716 if (!r) 5717 r = tmp_adev->asic_reset_res; 5718 tmp_adev->asic_reset_res = 0; 5719 } else { 5720 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", 5721 atomic_read(&tmp_adev->gpu_reset_counter)); 5722 if (amdgpu_acpi_smart_shift_update(tmp_adev, 5723 AMDGPU_SS_DEV_D0)) 5724 dev_warn(tmp_adev->dev, 5725 "smart shift update failed\n"); 5726 } 5727 } 5728 5729 return r; 5730 } 5731 5732 static void amdgpu_device_gpu_resume(struct amdgpu_device *adev, 5733 struct list_head *device_list, 5734 bool need_emergency_restart) 5735 { 5736 struct amdgpu_device *tmp_adev = NULL; 5737 5738 list_for_each_entry(tmp_adev, device_list, reset_list) { 5739 /* unlock kfd: SRIOV would do it separately */ 5740 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) 5741 amdgpu_amdkfd_post_reset(tmp_adev); 5742 5743 /* kfd_post_reset will do nothing if kfd device is not initialized, 5744 * need to bring up kfd here if it's not be initialized before 5745 */ 5746 if (!adev->kfd.init_complete) 5747 amdgpu_amdkfd_device_init(adev); 5748 5749 if (tmp_adev->pcie_reset_ctx.audio_suspended) 5750 amdgpu_device_resume_display_audio(tmp_adev); 5751 5752 amdgpu_device_unset_mp1_state(tmp_adev); 5753 5754 amdgpu_ras_set_error_query_ready(tmp_adev, true); 5755 5756 } 5757 } 5758 5759 5760 /** 5761 * amdgpu_device_gpu_recover - reset the asic and recover scheduler 5762 * 5763 * @adev: amdgpu_device pointer 5764 * @job: which job trigger hang 5765 * @reset_context: amdgpu reset context pointer 5766 * 5767 * Attempt to reset the GPU if it has hung (all asics). 5768 * Attempt to do soft-reset or full-reset and reinitialize Asic 5769 * Returns 0 for success or an error on failure. 5770 */ 5771 5772 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 5773 struct amdgpu_job *job, 5774 struct amdgpu_reset_context *reset_context) 5775 { 5776 struct list_head device_list; 5777 bool job_signaled = false; 5778 struct amdgpu_hive_info *hive = NULL; 5779 int r = 0; 5780 bool need_emergency_restart = false; 5781 /* save the pasid here as the job may be freed before the end of the reset */ 5782 int pasid = job ? job->pasid : -EINVAL; 5783 5784 /* 5785 * If it reaches here because of hang/timeout and a RAS error is 5786 * detected at the same time, let RAS recovery take care of it. 5787 */ 5788 if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) && 5789 !amdgpu_sriov_vf(adev) && 5790 reset_context->src != AMDGPU_RESET_SRC_RAS) { 5791 dev_dbg(adev->dev, 5792 "Gpu recovery from source: %d yielding to RAS error recovery handling", 5793 reset_context->src); 5794 return 0; 5795 } 5796 5797 /* 5798 * Special case: RAS triggered and full reset isn't supported 5799 */ 5800 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); 5801 5802 /* 5803 * Flush RAM to disk so that after reboot 5804 * the user can read log and see why the system rebooted. 5805 */ 5806 if (need_emergency_restart && amdgpu_ras_get_context(adev) && 5807 amdgpu_ras_get_context(adev)->reboot) { 5808 dev_warn(adev->dev, "Emergency reboot."); 5809 5810 ksys_sync_helper(); 5811 emergency_restart(); 5812 } 5813 5814 dev_info(adev->dev, "GPU %s begin!. Source: %d\n", 5815 need_emergency_restart ? "jobs stop" : "reset", 5816 reset_context->src); 5817 5818 if (!amdgpu_sriov_vf(adev)) 5819 hive = amdgpu_get_xgmi_hive(adev); 5820 if (hive) 5821 mutex_lock(&hive->hive_lock); 5822 5823 reset_context->job = job; 5824 reset_context->hive = hive; 5825 INIT_LIST_HEAD(&device_list); 5826 5827 amdgpu_device_recovery_prepare(adev, &device_list, hive); 5828 5829 if (!amdgpu_sriov_vf(adev)) { 5830 r = amdgpu_device_health_check(&device_list); 5831 if (r) 5832 goto end_reset; 5833 } 5834 5835 /* Cannot be called after locking reset domain */ 5836 amdgpu_ras_pre_reset(adev, &device_list); 5837 5838 /* We need to lock reset domain only once both for XGMI and single device */ 5839 amdgpu_device_recovery_get_reset_lock(adev, &device_list); 5840 5841 amdgpu_device_halt_activities(adev, job, reset_context, &device_list, 5842 hive, need_emergency_restart); 5843 if (need_emergency_restart) 5844 goto skip_sched_resume; 5845 /* 5846 * Must check guilty signal here since after this point all old 5847 * HW fences are force signaled. 5848 * 5849 * job->base holds a reference to parent fence 5850 */ 5851 if (job && (dma_fence_get_status(&job->hw_fence->base) > 0)) { 5852 job_signaled = true; 5853 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); 5854 goto skip_hw_reset; 5855 } 5856 5857 r = amdgpu_device_asic_reset(adev, &device_list, reset_context); 5858 if (r) 5859 goto reset_unlock; 5860 skip_hw_reset: 5861 r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled); 5862 if (r) 5863 goto reset_unlock; 5864 skip_sched_resume: 5865 amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart); 5866 reset_unlock: 5867 amdgpu_device_recovery_put_reset_lock(adev, &device_list); 5868 amdgpu_ras_post_reset(adev, &device_list); 5869 end_reset: 5870 if (hive) { 5871 mutex_unlock(&hive->hive_lock); 5872 amdgpu_put_xgmi_hive(hive); 5873 } 5874 5875 if (r) 5876 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); 5877 5878 atomic_set(&adev->reset_domain->reset_res, r); 5879 5880 if (!r) { 5881 struct amdgpu_task_info *ti = NULL; 5882 5883 /* 5884 * The job may already be freed at this point via the sched tdr workqueue so 5885 * use the cached pasid. 5886 */ 5887 if (pasid >= 0) 5888 ti = amdgpu_vm_get_task_info_pasid(adev, pasid); 5889 5890 drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, 5891 ti ? &ti->task : NULL); 5892 5893 amdgpu_vm_put_task_info(ti); 5894 } 5895 5896 return r; 5897 } 5898 5899 /** 5900 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner 5901 * 5902 * @adev: amdgpu_device pointer 5903 * @speed: pointer to the speed of the link 5904 * @width: pointer to the width of the link 5905 * 5906 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the 5907 * first physical partner to an AMD dGPU. 5908 * This will exclude any virtual switches and links. 5909 */ 5910 static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev, 5911 enum pci_bus_speed *speed, 5912 enum pcie_link_width *width) 5913 { 5914 struct pci_dev *parent = adev->pdev; 5915 5916 if (!speed || !width) 5917 return; 5918 5919 *speed = PCI_SPEED_UNKNOWN; 5920 *width = PCIE_LNK_WIDTH_UNKNOWN; 5921 5922 if (amdgpu_device_pcie_dynamic_switching_supported(adev)) { 5923 while ((parent = pci_upstream_bridge(parent))) { 5924 /* skip upstream/downstream switches internal to dGPU*/ 5925 if (parent->vendor == PCI_VENDOR_ID_ATI) 5926 continue; 5927 *speed = pcie_get_speed_cap(parent); 5928 *width = pcie_get_width_cap(parent); 5929 break; 5930 } 5931 } else { 5932 /* use the current speeds rather than max if switching is not supported */ 5933 pcie_bandwidth_available(adev->pdev, NULL, speed, width); 5934 } 5935 } 5936 5937 /** 5938 * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU 5939 * 5940 * @adev: amdgpu_device pointer 5941 * @speed: pointer to the speed of the link 5942 * @width: pointer to the width of the link 5943 * 5944 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the 5945 * AMD dGPU which may be a virtual upstream bridge. 5946 */ 5947 static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev, 5948 enum pci_bus_speed *speed, 5949 enum pcie_link_width *width) 5950 { 5951 struct pci_dev *parent = adev->pdev; 5952 5953 if (!speed || !width) 5954 return; 5955 5956 parent = pci_upstream_bridge(parent); 5957 if (parent && parent->vendor == PCI_VENDOR_ID_ATI) { 5958 /* use the upstream/downstream switches internal to dGPU */ 5959 *speed = pcie_get_speed_cap(parent); 5960 *width = pcie_get_width_cap(parent); 5961 while ((parent = pci_upstream_bridge(parent))) { 5962 if (parent->vendor == PCI_VENDOR_ID_ATI) { 5963 /* use the upstream/downstream switches internal to dGPU */ 5964 *speed = pcie_get_speed_cap(parent); 5965 *width = pcie_get_width_cap(parent); 5966 } 5967 } 5968 } else { 5969 /* use the device itself */ 5970 *speed = pcie_get_speed_cap(adev->pdev); 5971 *width = pcie_get_width_cap(adev->pdev); 5972 } 5973 } 5974 5975 /** 5976 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot 5977 * 5978 * @adev: amdgpu_device pointer 5979 * 5980 * Fetches and stores in the driver the PCIE capabilities (gen speed 5981 * and lanes) of the slot the device is in. Handles APUs and 5982 * virtualized environments where PCIE config space may not be available. 5983 */ 5984 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 5985 { 5986 enum pci_bus_speed speed_cap, platform_speed_cap; 5987 enum pcie_link_width platform_link_width, link_width; 5988 5989 if (amdgpu_pcie_gen_cap) 5990 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 5991 5992 if (amdgpu_pcie_lane_cap) 5993 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 5994 5995 /* covers APUs as well */ 5996 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) { 5997 if (adev->pm.pcie_gen_mask == 0) 5998 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 5999 if (adev->pm.pcie_mlw_mask == 0) 6000 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 6001 return; 6002 } 6003 6004 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) 6005 return; 6006 6007 amdgpu_device_partner_bandwidth(adev, &platform_speed_cap, 6008 &platform_link_width); 6009 amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width); 6010 6011 if (adev->pm.pcie_gen_mask == 0) { 6012 /* asic caps */ 6013 if (speed_cap == PCI_SPEED_UNKNOWN) { 6014 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 6015 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 6016 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 6017 } else { 6018 if (speed_cap == PCIE_SPEED_32_0GT) 6019 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 6020 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 6021 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 6022 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 | 6023 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5); 6024 else if (speed_cap == PCIE_SPEED_16_0GT) 6025 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 6026 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 6027 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 6028 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4); 6029 else if (speed_cap == PCIE_SPEED_8_0GT) 6030 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 6031 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 6032 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 6033 else if (speed_cap == PCIE_SPEED_5_0GT) 6034 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 6035 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2); 6036 else 6037 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; 6038 } 6039 /* platform caps */ 6040 if (platform_speed_cap == PCI_SPEED_UNKNOWN) { 6041 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 6042 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 6043 } else { 6044 if (platform_speed_cap == PCIE_SPEED_32_0GT) 6045 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 6046 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 6047 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 6048 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 | 6049 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5); 6050 else if (platform_speed_cap == PCIE_SPEED_16_0GT) 6051 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 6052 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 6053 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 6054 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); 6055 else if (platform_speed_cap == PCIE_SPEED_8_0GT) 6056 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 6057 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 6058 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); 6059 else if (platform_speed_cap == PCIE_SPEED_5_0GT) 6060 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 6061 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 6062 else 6063 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 6064 6065 } 6066 } 6067 if (adev->pm.pcie_mlw_mask == 0) { 6068 /* asic caps */ 6069 if (link_width == PCIE_LNK_WIDTH_UNKNOWN) { 6070 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK; 6071 } else { 6072 switch (link_width) { 6073 case PCIE_LNK_X32: 6074 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 | 6075 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 | 6076 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 | 6077 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | 6078 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | 6079 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | 6080 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); 6081 break; 6082 case PCIE_LNK_X16: 6083 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 | 6084 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 | 6085 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | 6086 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | 6087 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | 6088 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); 6089 break; 6090 case PCIE_LNK_X12: 6091 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 | 6092 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | 6093 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | 6094 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | 6095 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); 6096 break; 6097 case PCIE_LNK_X8: 6098 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | 6099 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | 6100 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | 6101 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); 6102 break; 6103 case PCIE_LNK_X4: 6104 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | 6105 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | 6106 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); 6107 break; 6108 case PCIE_LNK_X2: 6109 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | 6110 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); 6111 break; 6112 case PCIE_LNK_X1: 6113 adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1; 6114 break; 6115 default: 6116 break; 6117 } 6118 } 6119 /* platform caps */ 6120 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { 6121 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; 6122 } else { 6123 switch (platform_link_width) { 6124 case PCIE_LNK_X32: 6125 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 6126 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 6127 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 6128 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 6129 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 6130 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 6131 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 6132 break; 6133 case PCIE_LNK_X16: 6134 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 6135 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 6136 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 6137 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 6138 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 6139 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 6140 break; 6141 case PCIE_LNK_X12: 6142 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 6143 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 6144 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 6145 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 6146 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 6147 break; 6148 case PCIE_LNK_X8: 6149 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 6150 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 6151 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 6152 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 6153 break; 6154 case PCIE_LNK_X4: 6155 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 6156 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 6157 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 6158 break; 6159 case PCIE_LNK_X2: 6160 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 6161 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 6162 break; 6163 case PCIE_LNK_X1: 6164 adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 6165 break; 6166 default: 6167 break; 6168 } 6169 } 6170 } 6171 } 6172 6173 /** 6174 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR 6175 * 6176 * @adev: amdgpu_device pointer 6177 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev 6178 * 6179 * Return true if @peer_adev can access (DMA) @adev through the PCIe 6180 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of 6181 * @peer_adev. 6182 */ 6183 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 6184 struct amdgpu_device *peer_adev) 6185 { 6186 #ifdef CONFIG_HSA_AMD_P2P 6187 bool p2p_access = 6188 !adev->gmc.xgmi.connected_to_cpu && 6189 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); 6190 if (!p2p_access) 6191 dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n", 6192 pci_name(peer_adev->pdev)); 6193 6194 bool is_large_bar = adev->gmc.visible_vram_size && 6195 adev->gmc.real_vram_size == adev->gmc.visible_vram_size; 6196 bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev); 6197 6198 if (!p2p_addressable) { 6199 uint64_t address_mask = peer_adev->dev->dma_mask ? 6200 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); 6201 resource_size_t aper_limit = 6202 adev->gmc.aper_base + adev->gmc.aper_size - 1; 6203 6204 p2p_addressable = !(adev->gmc.aper_base & address_mask || 6205 aper_limit & address_mask); 6206 } 6207 return pcie_p2p && is_large_bar && p2p_access && p2p_addressable; 6208 #else 6209 return false; 6210 #endif 6211 } 6212 6213 int amdgpu_device_baco_enter(struct amdgpu_device *adev) 6214 { 6215 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 6216 6217 if (!amdgpu_device_supports_baco(adev)) 6218 return -ENOTSUPP; 6219 6220 if (ras && adev->ras_enabled && 6221 adev->nbio.funcs->enable_doorbell_interrupt) 6222 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 6223 6224 return amdgpu_dpm_baco_enter(adev); 6225 } 6226 6227 int amdgpu_device_baco_exit(struct amdgpu_device *adev) 6228 { 6229 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 6230 int ret = 0; 6231 6232 if (!amdgpu_device_supports_baco(adev)) 6233 return -ENOTSUPP; 6234 6235 ret = amdgpu_dpm_baco_exit(adev); 6236 if (ret) 6237 return ret; 6238 6239 if (ras && adev->ras_enabled && 6240 adev->nbio.funcs->enable_doorbell_interrupt) 6241 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 6242 6243 if (amdgpu_passthrough(adev) && adev->nbio.funcs && 6244 adev->nbio.funcs->clear_doorbell_interrupt) 6245 adev->nbio.funcs->clear_doorbell_interrupt(adev); 6246 6247 return 0; 6248 } 6249 6250 /** 6251 * amdgpu_pci_error_detected - Called when a PCI error is detected. 6252 * @pdev: PCI device struct 6253 * @state: PCI channel state 6254 * 6255 * Description: Called when a PCI error is detected. 6256 * 6257 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 6258 */ 6259 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 6260 { 6261 struct drm_device *dev = pci_get_drvdata(pdev); 6262 struct amdgpu_device *adev = drm_to_adev(dev); 6263 struct amdgpu_hive_info *hive __free(xgmi_put_hive) = 6264 amdgpu_get_xgmi_hive(adev); 6265 struct amdgpu_reset_context reset_context; 6266 struct list_head device_list; 6267 6268 dev_info(adev->dev, "PCI error: detected callback!!\n"); 6269 6270 adev->pci_channel_state = state; 6271 6272 switch (state) { 6273 case pci_channel_io_normal: 6274 dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state); 6275 return PCI_ERS_RESULT_CAN_RECOVER; 6276 case pci_channel_io_frozen: 6277 /* Fatal error, prepare for slot reset */ 6278 dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state); 6279 if (hive) { 6280 /* Hive devices should be able to support FW based 6281 * link reset on other devices, if not return. 6282 */ 6283 if (!amdgpu_dpm_is_link_reset_supported(adev)) { 6284 dev_warn(adev->dev, 6285 "No support for XGMI hive yet...\n"); 6286 return PCI_ERS_RESULT_DISCONNECT; 6287 } 6288 /* Set dpc status only if device is part of hive 6289 * Non-hive devices should be able to recover after 6290 * link reset. 6291 */ 6292 amdgpu_reset_set_dpc_status(adev, true); 6293 6294 mutex_lock(&hive->hive_lock); 6295 } 6296 memset(&reset_context, 0, sizeof(reset_context)); 6297 INIT_LIST_HEAD(&device_list); 6298 6299 amdgpu_device_recovery_prepare(adev, &device_list, hive); 6300 amdgpu_device_recovery_get_reset_lock(adev, &device_list); 6301 amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list, 6302 hive, false); 6303 if (hive) 6304 mutex_unlock(&hive->hive_lock); 6305 return PCI_ERS_RESULT_NEED_RESET; 6306 case pci_channel_io_perm_failure: 6307 /* Permanent error, prepare for device removal */ 6308 dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state); 6309 return PCI_ERS_RESULT_DISCONNECT; 6310 } 6311 6312 return PCI_ERS_RESULT_NEED_RESET; 6313 } 6314 6315 /** 6316 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers 6317 * @pdev: pointer to PCI device 6318 */ 6319 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev) 6320 { 6321 struct drm_device *dev = pci_get_drvdata(pdev); 6322 struct amdgpu_device *adev = drm_to_adev(dev); 6323 6324 dev_info(adev->dev, "PCI error: mmio enabled callback!!\n"); 6325 6326 /* TODO - dump whatever for debugging purposes */ 6327 6328 /* This called only if amdgpu_pci_error_detected returns 6329 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 6330 * works, no need to reset slot. 6331 */ 6332 6333 return PCI_ERS_RESULT_RECOVERED; 6334 } 6335 6336 /** 6337 * amdgpu_pci_slot_reset - Called when PCI slot has been reset. 6338 * @pdev: PCI device struct 6339 * 6340 * Description: This routine is called by the pci error recovery 6341 * code after the PCI slot has been reset, just before we 6342 * should resume normal operations. 6343 */ 6344 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) 6345 { 6346 struct drm_device *dev = pci_get_drvdata(pdev); 6347 struct amdgpu_device *adev = drm_to_adev(dev); 6348 struct amdgpu_reset_context reset_context; 6349 struct amdgpu_device *tmp_adev; 6350 struct amdgpu_hive_info *hive; 6351 struct list_head device_list; 6352 struct pci_dev *link_dev; 6353 int r = 0, i, timeout; 6354 u32 memsize; 6355 u16 status; 6356 6357 dev_info(adev->dev, "PCI error: slot reset callback!!\n"); 6358 6359 memset(&reset_context, 0, sizeof(reset_context)); 6360 INIT_LIST_HEAD(&device_list); 6361 hive = amdgpu_get_xgmi_hive(adev); 6362 if (hive) { 6363 mutex_lock(&hive->hive_lock); 6364 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 6365 list_add_tail(&tmp_adev->reset_list, &device_list); 6366 } else { 6367 list_add_tail(&adev->reset_list, &device_list); 6368 } 6369 6370 if (adev->pcie_reset_ctx.swus) 6371 link_dev = adev->pcie_reset_ctx.swus; 6372 else 6373 link_dev = adev->pdev; 6374 /* wait for asic to come out of reset, timeout = 10s */ 6375 timeout = 10000; 6376 do { 6377 usleep_range(10000, 10500); 6378 r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status); 6379 timeout -= 10; 6380 } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) && 6381 (status != PCI_VENDOR_ID_AMD)); 6382 6383 if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) { 6384 r = -ETIME; 6385 goto out; 6386 } 6387 6388 amdgpu_device_load_switch_state(adev); 6389 /* Restore PCI confspace */ 6390 amdgpu_device_load_pci_state(pdev); 6391 6392 /* confirm ASIC came out of reset */ 6393 for (i = 0; i < adev->usec_timeout; i++) { 6394 memsize = amdgpu_asic_get_config_memsize(adev); 6395 6396 if (memsize != 0xffffffff) 6397 break; 6398 udelay(1); 6399 } 6400 if (memsize == 0xffffffff) { 6401 r = -ETIME; 6402 goto out; 6403 } 6404 6405 reset_context.method = AMD_RESET_METHOD_NONE; 6406 reset_context.reset_req_dev = adev; 6407 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 6408 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 6409 6410 if (hive) { 6411 reset_context.hive = hive; 6412 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 6413 tmp_adev->pcie_reset_ctx.in_link_reset = true; 6414 } else { 6415 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); 6416 } 6417 6418 r = amdgpu_device_asic_reset(adev, &device_list, &reset_context); 6419 out: 6420 if (!r) { 6421 if (amdgpu_device_cache_pci_state(adev->pdev)) 6422 pci_restore_state(adev->pdev); 6423 dev_info(adev->dev, "PCIe error recovery succeeded\n"); 6424 } else { 6425 dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r); 6426 if (hive) { 6427 list_for_each_entry(tmp_adev, &device_list, reset_list) 6428 amdgpu_device_unset_mp1_state(tmp_adev); 6429 } 6430 amdgpu_device_recovery_put_reset_lock(adev, &device_list); 6431 } 6432 6433 if (hive) { 6434 mutex_unlock(&hive->hive_lock); 6435 amdgpu_put_xgmi_hive(hive); 6436 } 6437 6438 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 6439 } 6440 6441 /** 6442 * amdgpu_pci_resume() - resume normal ops after PCI reset 6443 * @pdev: pointer to PCI device 6444 * 6445 * Called when the error recovery driver tells us that its 6446 * OK to resume normal operation. 6447 */ 6448 void amdgpu_pci_resume(struct pci_dev *pdev) 6449 { 6450 struct drm_device *dev = pci_get_drvdata(pdev); 6451 struct amdgpu_device *adev = drm_to_adev(dev); 6452 struct list_head device_list; 6453 struct amdgpu_hive_info *hive = NULL; 6454 struct amdgpu_device *tmp_adev = NULL; 6455 6456 dev_info(adev->dev, "PCI error: resume callback!!\n"); 6457 6458 /* Only continue execution for the case of pci_channel_io_frozen */ 6459 if (adev->pci_channel_state != pci_channel_io_frozen) 6460 return; 6461 6462 INIT_LIST_HEAD(&device_list); 6463 6464 hive = amdgpu_get_xgmi_hive(adev); 6465 if (hive) { 6466 mutex_lock(&hive->hive_lock); 6467 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 6468 tmp_adev->pcie_reset_ctx.in_link_reset = false; 6469 list_add_tail(&tmp_adev->reset_list, &device_list); 6470 } 6471 } else 6472 list_add_tail(&adev->reset_list, &device_list); 6473 6474 amdgpu_device_sched_resume(&device_list, NULL, NULL); 6475 amdgpu_device_gpu_resume(adev, &device_list, false); 6476 amdgpu_device_recovery_put_reset_lock(adev, &device_list); 6477 6478 if (hive) { 6479 mutex_unlock(&hive->hive_lock); 6480 amdgpu_put_xgmi_hive(hive); 6481 } 6482 } 6483 6484 static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev) 6485 { 6486 struct pci_dev *swus, *swds; 6487 int r; 6488 6489 swds = pci_upstream_bridge(adev->pdev); 6490 if (!swds || swds->vendor != PCI_VENDOR_ID_ATI || 6491 pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM) 6492 return; 6493 swus = pci_upstream_bridge(swds); 6494 if (!swus || 6495 (swus->vendor != PCI_VENDOR_ID_ATI && 6496 swus->vendor != PCI_VENDOR_ID_AMD) || 6497 pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM) 6498 return; 6499 6500 /* If already saved, return */ 6501 if (adev->pcie_reset_ctx.swus) 6502 return; 6503 /* Upstream bridge is ATI, assume it's SWUS/DS architecture */ 6504 r = pci_save_state(swds); 6505 if (r) 6506 return; 6507 adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds); 6508 6509 r = pci_save_state(swus); 6510 if (r) 6511 return; 6512 adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus); 6513 6514 adev->pcie_reset_ctx.swus = swus; 6515 } 6516 6517 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev) 6518 { 6519 struct pci_dev *pdev; 6520 int r; 6521 6522 if (!adev->pcie_reset_ctx.swds_pcistate || 6523 !adev->pcie_reset_ctx.swus_pcistate) 6524 return; 6525 6526 pdev = adev->pcie_reset_ctx.swus; 6527 r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate); 6528 if (!r) { 6529 pci_restore_state(pdev); 6530 } else { 6531 dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r); 6532 return; 6533 } 6534 6535 pdev = pci_upstream_bridge(adev->pdev); 6536 r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate); 6537 if (!r) 6538 pci_restore_state(pdev); 6539 else 6540 dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r); 6541 } 6542 6543 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) 6544 { 6545 struct drm_device *dev = pci_get_drvdata(pdev); 6546 struct amdgpu_device *adev = drm_to_adev(dev); 6547 int r; 6548 6549 if (amdgpu_sriov_vf(adev)) 6550 return false; 6551 6552 r = pci_save_state(pdev); 6553 if (!r) { 6554 kfree(adev->pci_state); 6555 6556 adev->pci_state = pci_store_saved_state(pdev); 6557 6558 if (!adev->pci_state) { 6559 dev_err(adev->dev, "Failed to store PCI saved state"); 6560 return false; 6561 } 6562 } else { 6563 dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r); 6564 return false; 6565 } 6566 6567 amdgpu_device_cache_switch_state(adev); 6568 6569 return true; 6570 } 6571 6572 bool amdgpu_device_load_pci_state(struct pci_dev *pdev) 6573 { 6574 struct drm_device *dev = pci_get_drvdata(pdev); 6575 struct amdgpu_device *adev = drm_to_adev(dev); 6576 int r; 6577 6578 if (!adev->pci_state) 6579 return false; 6580 6581 r = pci_load_saved_state(pdev, adev->pci_state); 6582 6583 if (!r) { 6584 pci_restore_state(pdev); 6585 } else { 6586 dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r); 6587 return false; 6588 } 6589 6590 return true; 6591 } 6592 6593 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, 6594 struct amdgpu_ring *ring) 6595 { 6596 #ifdef CONFIG_X86_64 6597 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) 6598 return; 6599 #endif 6600 if (adev->gmc.xgmi.connected_to_cpu) 6601 return; 6602 6603 if (ring && ring->funcs->emit_hdp_flush) { 6604 amdgpu_ring_emit_hdp_flush(ring); 6605 return; 6606 } 6607 6608 if (!ring && amdgpu_sriov_runtime(adev)) { 6609 if (!amdgpu_kiq_hdp_flush(adev)) 6610 return; 6611 } 6612 6613 amdgpu_hdp_flush(adev, ring); 6614 } 6615 6616 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, 6617 struct amdgpu_ring *ring) 6618 { 6619 #ifdef CONFIG_X86_64 6620 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) 6621 return; 6622 #endif 6623 if (adev->gmc.xgmi.connected_to_cpu) 6624 return; 6625 6626 amdgpu_hdp_invalidate(adev, ring); 6627 } 6628 6629 int amdgpu_in_reset(struct amdgpu_device *adev) 6630 { 6631 return atomic_read(&adev->reset_domain->in_gpu_reset); 6632 } 6633 6634 /** 6635 * amdgpu_device_halt() - bring hardware to some kind of halt state 6636 * 6637 * @adev: amdgpu_device pointer 6638 * 6639 * Bring hardware to some kind of halt state so that no one can touch it 6640 * any more. It will help to maintain error context when error occurred. 6641 * Compare to a simple hang, the system will keep stable at least for SSH 6642 * access. Then it should be trivial to inspect the hardware state and 6643 * see what's going on. Implemented as following: 6644 * 6645 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc), 6646 * clears all CPU mappings to device, disallows remappings through page faults 6647 * 2. amdgpu_irq_disable_all() disables all interrupts 6648 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences 6649 * 4. set adev->no_hw_access to avoid potential crashes after setp 5 6650 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings 6651 * 6. pci_disable_device() and pci_wait_for_pending_transaction() 6652 * flush any in flight DMA operations 6653 */ 6654 void amdgpu_device_halt(struct amdgpu_device *adev) 6655 { 6656 struct pci_dev *pdev = adev->pdev; 6657 struct drm_device *ddev = adev_to_drm(adev); 6658 6659 amdgpu_xcp_dev_unplug(adev); 6660 drm_dev_unplug(ddev); 6661 6662 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 6663 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 6664 6665 amdgpu_irq_disable_all(adev); 6666 6667 amdgpu_fence_driver_hw_fini(adev); 6668 6669 adev->no_hw_access = true; 6670 6671 amdgpu_device_unmap_mmio(adev); 6672 6673 pci_disable_device(pdev); 6674 pci_wait_for_pending_transaction(pdev); 6675 } 6676 6677 /** 6678 * amdgpu_device_get_gang - return a reference to the current gang 6679 * @adev: amdgpu_device pointer 6680 * 6681 * Returns: A new reference to the current gang leader. 6682 */ 6683 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev) 6684 { 6685 struct dma_fence *fence; 6686 6687 rcu_read_lock(); 6688 fence = dma_fence_get_rcu_safe(&adev->gang_submit); 6689 rcu_read_unlock(); 6690 return fence; 6691 } 6692 6693 /** 6694 * amdgpu_device_switch_gang - switch to a new gang 6695 * @adev: amdgpu_device pointer 6696 * @gang: the gang to switch to 6697 * 6698 * Try to switch to a new gang. 6699 * Returns: NULL if we switched to the new gang or a reference to the current 6700 * gang leader. 6701 */ 6702 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, 6703 struct dma_fence *gang) 6704 { 6705 struct dma_fence *old = NULL; 6706 6707 dma_fence_get(gang); 6708 do { 6709 dma_fence_put(old); 6710 old = amdgpu_device_get_gang(adev); 6711 if (old == gang) 6712 break; 6713 6714 if (!dma_fence_is_signaled(old)) { 6715 dma_fence_put(gang); 6716 return old; 6717 } 6718 6719 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, 6720 old, gang) != old); 6721 6722 /* 6723 * Drop it once for the exchanged reference in adev and once for the 6724 * thread local reference acquired in amdgpu_device_get_gang(). 6725 */ 6726 dma_fence_put(old); 6727 dma_fence_put(old); 6728 return NULL; 6729 } 6730 6731 /** 6732 * amdgpu_device_enforce_isolation - enforce HW isolation 6733 * @adev: the amdgpu device pointer 6734 * @ring: the HW ring the job is supposed to run on 6735 * @job: the job which is about to be pushed to the HW ring 6736 * 6737 * Makes sure that only one client at a time can use the GFX block. 6738 * Returns: The dependency to wait on before the job can be pushed to the HW. 6739 * The function is called multiple times until NULL is returned. 6740 */ 6741 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev, 6742 struct amdgpu_ring *ring, 6743 struct amdgpu_job *job) 6744 { 6745 struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id]; 6746 struct drm_sched_fence *f = job->base.s_fence; 6747 struct dma_fence *dep; 6748 void *owner; 6749 int r; 6750 6751 /* 6752 * For now enforce isolation only for the GFX block since we only need 6753 * the cleaner shader on those rings. 6754 */ 6755 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX && 6756 ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) 6757 return NULL; 6758 6759 /* 6760 * All submissions where enforce isolation is false are handled as if 6761 * they come from a single client. Use ~0l as the owner to distinct it 6762 * from kernel submissions where the owner is NULL. 6763 */ 6764 owner = job->enforce_isolation ? f->owner : (void *)~0l; 6765 6766 mutex_lock(&adev->enforce_isolation_mutex); 6767 6768 /* 6769 * The "spearhead" submission is the first one which changes the 6770 * ownership to its client. We always need to wait for it to be 6771 * pushed to the HW before proceeding with anything. 6772 */ 6773 if (&f->scheduled != isolation->spearhead && 6774 !dma_fence_is_signaled(isolation->spearhead)) { 6775 dep = isolation->spearhead; 6776 goto out_grab_ref; 6777 } 6778 6779 if (isolation->owner != owner) { 6780 6781 /* 6782 * Wait for any gang to be assembled before switching to a 6783 * different owner or otherwise we could deadlock the 6784 * submissions. 6785 */ 6786 if (!job->gang_submit) { 6787 dep = amdgpu_device_get_gang(adev); 6788 if (!dma_fence_is_signaled(dep)) 6789 goto out_return_dep; 6790 dma_fence_put(dep); 6791 } 6792 6793 dma_fence_put(isolation->spearhead); 6794 isolation->spearhead = dma_fence_get(&f->scheduled); 6795 amdgpu_sync_move(&isolation->active, &isolation->prev); 6796 trace_amdgpu_isolation(isolation->owner, owner); 6797 isolation->owner = owner; 6798 } 6799 6800 /* 6801 * Specifying the ring here helps to pipeline submissions even when 6802 * isolation is enabled. If that is not desired for testing NULL can be 6803 * used instead of the ring to enforce a CPU round trip while switching 6804 * between clients. 6805 */ 6806 dep = amdgpu_sync_peek_fence(&isolation->prev, ring); 6807 r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT); 6808 if (r) 6809 dev_warn(adev->dev, "OOM tracking isolation\n"); 6810 6811 out_grab_ref: 6812 dma_fence_get(dep); 6813 out_return_dep: 6814 mutex_unlock(&adev->enforce_isolation_mutex); 6815 return dep; 6816 } 6817 6818 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) 6819 { 6820 switch (adev->asic_type) { 6821 #ifdef CONFIG_DRM_AMDGPU_SI 6822 case CHIP_HAINAN: 6823 #endif 6824 case CHIP_TOPAZ: 6825 /* chips with no display hardware */ 6826 return false; 6827 #ifdef CONFIG_DRM_AMDGPU_SI 6828 case CHIP_TAHITI: 6829 case CHIP_PITCAIRN: 6830 case CHIP_VERDE: 6831 case CHIP_OLAND: 6832 #endif 6833 #ifdef CONFIG_DRM_AMDGPU_CIK 6834 case CHIP_BONAIRE: 6835 case CHIP_HAWAII: 6836 case CHIP_KAVERI: 6837 case CHIP_KABINI: 6838 case CHIP_MULLINS: 6839 #endif 6840 case CHIP_TONGA: 6841 case CHIP_FIJI: 6842 case CHIP_POLARIS10: 6843 case CHIP_POLARIS11: 6844 case CHIP_POLARIS12: 6845 case CHIP_VEGAM: 6846 case CHIP_CARRIZO: 6847 case CHIP_STONEY: 6848 /* chips with display hardware */ 6849 return true; 6850 default: 6851 /* IP discovery */ 6852 if (!amdgpu_ip_version(adev, DCE_HWIP, 0) || 6853 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 6854 return false; 6855 return true; 6856 } 6857 } 6858 6859 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring) 6860 { 6861 ssize_t size = 0; 6862 6863 if (!ring || !ring->adev) 6864 return size; 6865 6866 if (amdgpu_device_should_recover_gpu(ring->adev)) 6867 size |= AMDGPU_RESET_TYPE_FULL; 6868 6869 if (unlikely(!ring->adev->debug_disable_soft_recovery) && 6870 !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery) 6871 size |= AMDGPU_RESET_TYPE_SOFT_RESET; 6872 6873 return size; 6874 } 6875 6876 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset) 6877 { 6878 ssize_t size = 0; 6879 6880 if (supported_reset == 0) { 6881 size += sysfs_emit_at(buf, size, "unsupported"); 6882 size += sysfs_emit_at(buf, size, "\n"); 6883 return size; 6884 6885 } 6886 6887 if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET) 6888 size += sysfs_emit_at(buf, size, "soft "); 6889 6890 if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE) 6891 size += sysfs_emit_at(buf, size, "queue "); 6892 6893 if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE) 6894 size += sysfs_emit_at(buf, size, "pipe "); 6895 6896 if (supported_reset & AMDGPU_RESET_TYPE_FULL) 6897 size += sysfs_emit_at(buf, size, "full "); 6898 6899 size += sysfs_emit_at(buf, size, "\n"); 6900 return size; 6901 } 6902 6903 void amdgpu_device_set_uid(struct amdgpu_uid *uid_info, 6904 enum amdgpu_uid_type type, uint8_t inst, 6905 uint64_t uid) 6906 { 6907 if (!uid_info) 6908 return; 6909 6910 if (type >= AMDGPU_UID_TYPE_MAX) { 6911 dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n", 6912 type); 6913 return; 6914 } 6915 6916 if (inst >= AMDGPU_UID_INST_MAX) { 6917 dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n", 6918 inst); 6919 return; 6920 } 6921 6922 if (uid_info->uid[type][inst] != 0) { 6923 dev_warn_once( 6924 uid_info->adev->dev, 6925 "Overwriting existing UID %llu for type %d instance %d\n", 6926 uid_info->uid[type][inst], type, inst); 6927 } 6928 6929 uid_info->uid[type][inst] = uid; 6930 } 6931 6932 u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info, 6933 enum amdgpu_uid_type type, uint8_t inst) 6934 { 6935 if (!uid_info) 6936 return 0; 6937 6938 if (type >= AMDGPU_UID_TYPE_MAX) { 6939 dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n", 6940 type); 6941 return 0; 6942 } 6943 6944 if (inst >= AMDGPU_UID_INST_MAX) { 6945 dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n", 6946 inst); 6947 return 0; 6948 } 6949 6950 return uid_info->uid[type][inst]; 6951 } 6952