1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/power_supply.h> 29 #include <linux/kthread.h> 30 #include <linux/module.h> 31 #include <linux/console.h> 32 #include <linux/slab.h> 33 #include <linux/iommu.h> 34 #include <linux/pci.h> 35 #include <linux/pci-p2pdma.h> 36 #include <linux/apple-gmux.h> 37 38 #include <drm/drm_aperture.h> 39 #include <drm/drm_atomic_helper.h> 40 #include <drm/drm_crtc_helper.h> 41 #include <drm/drm_fb_helper.h> 42 #include <drm/drm_probe_helper.h> 43 #include <drm/amdgpu_drm.h> 44 #include <linux/device.h> 45 #include <linux/vgaarb.h> 46 #include <linux/vga_switcheroo.h> 47 #include <linux/efi.h> 48 #include "amdgpu.h" 49 #include "amdgpu_trace.h" 50 #include "amdgpu_i2c.h" 51 #include "atom.h" 52 #include "amdgpu_atombios.h" 53 #include "amdgpu_atomfirmware.h" 54 #include "amd_pcie.h" 55 #ifdef CONFIG_DRM_AMDGPU_SI 56 #include "si.h" 57 #endif 58 #ifdef CONFIG_DRM_AMDGPU_CIK 59 #include "cik.h" 60 #endif 61 #include "vi.h" 62 #include "soc15.h" 63 #include "nv.h" 64 #include "bif/bif_4_1_d.h" 65 #include <linux/firmware.h> 66 #include "amdgpu_vf_error.h" 67 68 #include "amdgpu_amdkfd.h" 69 #include "amdgpu_pm.h" 70 71 #include "amdgpu_xgmi.h" 72 #include "amdgpu_ras.h" 73 #include "amdgpu_pmu.h" 74 #include "amdgpu_fru_eeprom.h" 75 #include "amdgpu_reset.h" 76 #include "amdgpu_virt.h" 77 78 #include <linux/suspend.h> 79 #include <drm/task_barrier.h> 80 #include <linux/pm_runtime.h> 81 82 #include <drm/drm_drv.h> 83 84 #if IS_ENABLED(CONFIG_X86) 85 #include <asm/intel-family.h> 86 #endif 87 88 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 89 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); 90 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 91 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); 92 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); 93 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); 94 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); 95 96 #define AMDGPU_RESUME_MS 2000 97 #define AMDGPU_MAX_RETRY_LIMIT 2 98 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL) 99 100 static const struct drm_driver amdgpu_kms_driver; 101 102 const char *amdgpu_asic_name[] = { 103 "TAHITI", 104 "PITCAIRN", 105 "VERDE", 106 "OLAND", 107 "HAINAN", 108 "BONAIRE", 109 "KAVERI", 110 "KABINI", 111 "HAWAII", 112 "MULLINS", 113 "TOPAZ", 114 "TONGA", 115 "FIJI", 116 "CARRIZO", 117 "STONEY", 118 "POLARIS10", 119 "POLARIS11", 120 "POLARIS12", 121 "VEGAM", 122 "VEGA10", 123 "VEGA12", 124 "VEGA20", 125 "RAVEN", 126 "ARCTURUS", 127 "RENOIR", 128 "ALDEBARAN", 129 "NAVI10", 130 "CYAN_SKILLFISH", 131 "NAVI14", 132 "NAVI12", 133 "SIENNA_CICHLID", 134 "NAVY_FLOUNDER", 135 "VANGOGH", 136 "DIMGREY_CAVEFISH", 137 "BEIGE_GOBY", 138 "YELLOW_CARP", 139 "IP DISCOVERY", 140 "LAST", 141 }; 142 143 /** 144 * DOC: pcie_replay_count 145 * 146 * The amdgpu driver provides a sysfs API for reporting the total number 147 * of PCIe replays (NAKs) 148 * The file pcie_replay_count is used for this and returns the total 149 * number of replays as a sum of the NAKs generated and NAKs received 150 */ 151 152 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, 153 struct device_attribute *attr, char *buf) 154 { 155 struct drm_device *ddev = dev_get_drvdata(dev); 156 struct amdgpu_device *adev = drm_to_adev(ddev); 157 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); 158 159 return sysfs_emit(buf, "%llu\n", cnt); 160 } 161 162 static DEVICE_ATTR(pcie_replay_count, 0444, 163 amdgpu_device_get_pcie_replay_count, NULL); 164 165 /** 166 * DOC: board_info 167 * 168 * The amdgpu driver provides a sysfs API for giving board related information. 169 * It provides the form factor information in the format 170 * 171 * type : form factor 172 * 173 * Possible form factor values 174 * 175 * - "cem" - PCIE CEM card 176 * - "oam" - Open Compute Accelerator Module 177 * - "unknown" - Not known 178 * 179 */ 180 181 static ssize_t amdgpu_device_get_board_info(struct device *dev, 182 struct device_attribute *attr, 183 char *buf) 184 { 185 struct drm_device *ddev = dev_get_drvdata(dev); 186 struct amdgpu_device *adev = drm_to_adev(ddev); 187 enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM; 188 const char *pkg; 189 190 if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type) 191 pkg_type = adev->smuio.funcs->get_pkg_type(adev); 192 193 switch (pkg_type) { 194 case AMDGPU_PKG_TYPE_CEM: 195 pkg = "cem"; 196 break; 197 case AMDGPU_PKG_TYPE_OAM: 198 pkg = "oam"; 199 break; 200 default: 201 pkg = "unknown"; 202 break; 203 } 204 205 return sysfs_emit(buf, "%s : %s\n", "type", pkg); 206 } 207 208 static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL); 209 210 static struct attribute *amdgpu_board_attrs[] = { 211 &dev_attr_board_info.attr, 212 NULL, 213 }; 214 215 static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj, 216 struct attribute *attr, int n) 217 { 218 struct device *dev = kobj_to_dev(kobj); 219 struct drm_device *ddev = dev_get_drvdata(dev); 220 struct amdgpu_device *adev = drm_to_adev(ddev); 221 222 if (adev->flags & AMD_IS_APU) 223 return 0; 224 225 return attr->mode; 226 } 227 228 static const struct attribute_group amdgpu_board_attrs_group = { 229 .attrs = amdgpu_board_attrs, 230 .is_visible = amdgpu_board_attrs_is_visible 231 }; 232 233 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); 234 235 236 /** 237 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control 238 * 239 * @dev: drm_device pointer 240 * 241 * Returns true if the device is a dGPU with ATPX power control, 242 * otherwise return false. 243 */ 244 bool amdgpu_device_supports_px(struct drm_device *dev) 245 { 246 struct amdgpu_device *adev = drm_to_adev(dev); 247 248 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) 249 return true; 250 return false; 251 } 252 253 /** 254 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources 255 * 256 * @dev: drm_device pointer 257 * 258 * Returns true if the device is a dGPU with ACPI power control, 259 * otherwise return false. 260 */ 261 bool amdgpu_device_supports_boco(struct drm_device *dev) 262 { 263 struct amdgpu_device *adev = drm_to_adev(dev); 264 265 if (adev->has_pr3 || 266 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) 267 return true; 268 return false; 269 } 270 271 /** 272 * amdgpu_device_supports_baco - Does the device support BACO 273 * 274 * @dev: drm_device pointer 275 * 276 * Returns true if the device supporte BACO, 277 * otherwise return false. 278 */ 279 bool amdgpu_device_supports_baco(struct drm_device *dev) 280 { 281 struct amdgpu_device *adev = drm_to_adev(dev); 282 283 return amdgpu_asic_supports_baco(adev); 284 } 285 286 /** 287 * amdgpu_device_supports_smart_shift - Is the device dGPU with 288 * smart shift support 289 * 290 * @dev: drm_device pointer 291 * 292 * Returns true if the device is a dGPU with Smart Shift support, 293 * otherwise returns false. 294 */ 295 bool amdgpu_device_supports_smart_shift(struct drm_device *dev) 296 { 297 return (amdgpu_device_supports_boco(dev) && 298 amdgpu_acpi_is_power_shift_control_supported()); 299 } 300 301 /* 302 * VRAM access helper functions 303 */ 304 305 /** 306 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA 307 * 308 * @adev: amdgpu_device pointer 309 * @pos: offset of the buffer in vram 310 * @buf: virtual address of the buffer in system memory 311 * @size: read/write size, sizeof(@buf) must > @size 312 * @write: true - write to vram, otherwise - read from vram 313 */ 314 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, 315 void *buf, size_t size, bool write) 316 { 317 unsigned long flags; 318 uint32_t hi = ~0, tmp = 0; 319 uint32_t *data = buf; 320 uint64_t last; 321 int idx; 322 323 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 324 return; 325 326 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4)); 327 328 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 329 for (last = pos + size; pos < last; pos += 4) { 330 tmp = pos >> 31; 331 332 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); 333 if (tmp != hi) { 334 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); 335 hi = tmp; 336 } 337 if (write) 338 WREG32_NO_KIQ(mmMM_DATA, *data++); 339 else 340 *data++ = RREG32_NO_KIQ(mmMM_DATA); 341 } 342 343 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 344 drm_dev_exit(idx); 345 } 346 347 /** 348 * amdgpu_device_aper_access - access vram by vram aperature 349 * 350 * @adev: amdgpu_device pointer 351 * @pos: offset of the buffer in vram 352 * @buf: virtual address of the buffer in system memory 353 * @size: read/write size, sizeof(@buf) must > @size 354 * @write: true - write to vram, otherwise - read from vram 355 * 356 * The return value means how many bytes have been transferred. 357 */ 358 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, 359 void *buf, size_t size, bool write) 360 { 361 #ifdef CONFIG_64BIT 362 void __iomem *addr; 363 size_t count = 0; 364 uint64_t last; 365 366 if (!adev->mman.aper_base_kaddr) 367 return 0; 368 369 last = min(pos + size, adev->gmc.visible_vram_size); 370 if (last > pos) { 371 addr = adev->mman.aper_base_kaddr + pos; 372 count = last - pos; 373 374 if (write) { 375 memcpy_toio(addr, buf, count); 376 /* Make sure HDP write cache flush happens without any reordering 377 * after the system memory contents are sent over PCIe device 378 */ 379 mb(); 380 amdgpu_device_flush_hdp(adev, NULL); 381 } else { 382 amdgpu_device_invalidate_hdp(adev, NULL); 383 /* Make sure HDP read cache is invalidated before issuing a read 384 * to the PCIe device 385 */ 386 mb(); 387 memcpy_fromio(buf, addr, count); 388 } 389 390 } 391 392 return count; 393 #else 394 return 0; 395 #endif 396 } 397 398 /** 399 * amdgpu_device_vram_access - read/write a buffer in vram 400 * 401 * @adev: amdgpu_device pointer 402 * @pos: offset of the buffer in vram 403 * @buf: virtual address of the buffer in system memory 404 * @size: read/write size, sizeof(@buf) must > @size 405 * @write: true - write to vram, otherwise - read from vram 406 */ 407 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 408 void *buf, size_t size, bool write) 409 { 410 size_t count; 411 412 /* try to using vram apreature to access vram first */ 413 count = amdgpu_device_aper_access(adev, pos, buf, size, write); 414 size -= count; 415 if (size) { 416 /* using MM to access rest vram */ 417 pos += count; 418 buf += count; 419 amdgpu_device_mm_access(adev, pos, buf, size, write); 420 } 421 } 422 423 /* 424 * register access helper functions. 425 */ 426 427 /* Check if hw access should be skipped because of hotplug or device error */ 428 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) 429 { 430 if (adev->no_hw_access) 431 return true; 432 433 #ifdef CONFIG_LOCKDEP 434 /* 435 * This is a bit complicated to understand, so worth a comment. What we assert 436 * here is that the GPU reset is not running on another thread in parallel. 437 * 438 * For this we trylock the read side of the reset semaphore, if that succeeds 439 * we know that the reset is not running in paralell. 440 * 441 * If the trylock fails we assert that we are either already holding the read 442 * side of the lock or are the reset thread itself and hold the write side of 443 * the lock. 444 */ 445 if (in_task()) { 446 if (down_read_trylock(&adev->reset_domain->sem)) 447 up_read(&adev->reset_domain->sem); 448 else 449 lockdep_assert_held(&adev->reset_domain->sem); 450 } 451 #endif 452 return false; 453 } 454 455 /** 456 * amdgpu_device_rreg - read a memory mapped IO or indirect register 457 * 458 * @adev: amdgpu_device pointer 459 * @reg: dword aligned register offset 460 * @acc_flags: access flags which require special behavior 461 * 462 * Returns the 32 bit value from the offset specified. 463 */ 464 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, 465 uint32_t reg, uint32_t acc_flags) 466 { 467 uint32_t ret; 468 469 if (amdgpu_device_skip_hw_access(adev)) 470 return 0; 471 472 if ((reg * 4) < adev->rmmio_size) { 473 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 474 amdgpu_sriov_runtime(adev) && 475 down_read_trylock(&adev->reset_domain->sem)) { 476 ret = amdgpu_kiq_rreg(adev, reg, 0); 477 up_read(&adev->reset_domain->sem); 478 } else { 479 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 480 } 481 } else { 482 ret = adev->pcie_rreg(adev, reg * 4); 483 } 484 485 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); 486 487 return ret; 488 } 489 490 /* 491 * MMIO register read with bytes helper functions 492 * @offset:bytes offset from MMIO start 493 */ 494 495 /** 496 * amdgpu_mm_rreg8 - read a memory mapped IO register 497 * 498 * @adev: amdgpu_device pointer 499 * @offset: byte aligned register offset 500 * 501 * Returns the 8 bit value from the offset specified. 502 */ 503 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) 504 { 505 if (amdgpu_device_skip_hw_access(adev)) 506 return 0; 507 508 if (offset < adev->rmmio_size) 509 return (readb(adev->rmmio + offset)); 510 BUG(); 511 } 512 513 514 /** 515 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC 516 * 517 * @adev: amdgpu_device pointer 518 * @reg: dword aligned register offset 519 * @acc_flags: access flags which require special behavior 520 * @xcc_id: xcc accelerated compute core id 521 * 522 * Returns the 32 bit value from the offset specified. 523 */ 524 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, 525 uint32_t reg, uint32_t acc_flags, 526 uint32_t xcc_id) 527 { 528 uint32_t ret, rlcg_flag; 529 530 if (amdgpu_device_skip_hw_access(adev)) 531 return 0; 532 533 if ((reg * 4) < adev->rmmio_size) { 534 if (amdgpu_sriov_vf(adev) && 535 !amdgpu_sriov_runtime(adev) && 536 adev->gfx.rlc.rlcg_reg_access_supported && 537 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, 538 GC_HWIP, false, 539 &rlcg_flag)) { 540 ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id); 541 } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 542 amdgpu_sriov_runtime(adev) && 543 down_read_trylock(&adev->reset_domain->sem)) { 544 ret = amdgpu_kiq_rreg(adev, reg, xcc_id); 545 up_read(&adev->reset_domain->sem); 546 } else { 547 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 548 } 549 } else { 550 ret = adev->pcie_rreg(adev, reg * 4); 551 } 552 553 return ret; 554 } 555 556 /* 557 * MMIO register write with bytes helper functions 558 * @offset:bytes offset from MMIO start 559 * @value: the value want to be written to the register 560 */ 561 562 /** 563 * amdgpu_mm_wreg8 - read a memory mapped IO register 564 * 565 * @adev: amdgpu_device pointer 566 * @offset: byte aligned register offset 567 * @value: 8 bit value to write 568 * 569 * Writes the value specified to the offset specified. 570 */ 571 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) 572 { 573 if (amdgpu_device_skip_hw_access(adev)) 574 return; 575 576 if (offset < adev->rmmio_size) 577 writeb(value, adev->rmmio + offset); 578 else 579 BUG(); 580 } 581 582 /** 583 * amdgpu_device_wreg - write to a memory mapped IO or indirect register 584 * 585 * @adev: amdgpu_device pointer 586 * @reg: dword aligned register offset 587 * @v: 32 bit value to write to the register 588 * @acc_flags: access flags which require special behavior 589 * 590 * Writes the value specified to the offset specified. 591 */ 592 void amdgpu_device_wreg(struct amdgpu_device *adev, 593 uint32_t reg, uint32_t v, 594 uint32_t acc_flags) 595 { 596 if (amdgpu_device_skip_hw_access(adev)) 597 return; 598 599 if ((reg * 4) < adev->rmmio_size) { 600 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 601 amdgpu_sriov_runtime(adev) && 602 down_read_trylock(&adev->reset_domain->sem)) { 603 amdgpu_kiq_wreg(adev, reg, v, 0); 604 up_read(&adev->reset_domain->sem); 605 } else { 606 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 607 } 608 } else { 609 adev->pcie_wreg(adev, reg * 4, v); 610 } 611 612 trace_amdgpu_device_wreg(adev->pdev->device, reg, v); 613 } 614 615 /** 616 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range 617 * 618 * @adev: amdgpu_device pointer 619 * @reg: mmio/rlc register 620 * @v: value to write 621 * @xcc_id: xcc accelerated compute core id 622 * 623 * this function is invoked only for the debugfs register access 624 */ 625 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, 626 uint32_t reg, uint32_t v, 627 uint32_t xcc_id) 628 { 629 if (amdgpu_device_skip_hw_access(adev)) 630 return; 631 632 if (amdgpu_sriov_fullaccess(adev) && 633 adev->gfx.rlc.funcs && 634 adev->gfx.rlc.funcs->is_rlcg_access_range) { 635 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) 636 return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id); 637 } else if ((reg * 4) >= adev->rmmio_size) { 638 adev->pcie_wreg(adev, reg * 4, v); 639 } else { 640 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 641 } 642 } 643 644 /** 645 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC 646 * 647 * @adev: amdgpu_device pointer 648 * @reg: dword aligned register offset 649 * @v: 32 bit value to write to the register 650 * @acc_flags: access flags which require special behavior 651 * @xcc_id: xcc accelerated compute core id 652 * 653 * Writes the value specified to the offset specified. 654 */ 655 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, 656 uint32_t reg, uint32_t v, 657 uint32_t acc_flags, uint32_t xcc_id) 658 { 659 uint32_t rlcg_flag; 660 661 if (amdgpu_device_skip_hw_access(adev)) 662 return; 663 664 if ((reg * 4) < adev->rmmio_size) { 665 if (amdgpu_sriov_vf(adev) && 666 !amdgpu_sriov_runtime(adev) && 667 adev->gfx.rlc.rlcg_reg_access_supported && 668 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, 669 GC_HWIP, true, 670 &rlcg_flag)) { 671 amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id); 672 } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 673 amdgpu_sriov_runtime(adev) && 674 down_read_trylock(&adev->reset_domain->sem)) { 675 amdgpu_kiq_wreg(adev, reg, v, xcc_id); 676 up_read(&adev->reset_domain->sem); 677 } else { 678 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 679 } 680 } else { 681 adev->pcie_wreg(adev, reg * 4, v); 682 } 683 } 684 685 /** 686 * amdgpu_device_indirect_rreg - read an indirect register 687 * 688 * @adev: amdgpu_device pointer 689 * @reg_addr: indirect register address to read from 690 * 691 * Returns the value of indirect register @reg_addr 692 */ 693 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, 694 u32 reg_addr) 695 { 696 unsigned long flags, pcie_index, pcie_data; 697 void __iomem *pcie_index_offset; 698 void __iomem *pcie_data_offset; 699 u32 r; 700 701 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 702 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 703 704 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 705 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 706 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 707 708 writel(reg_addr, pcie_index_offset); 709 readl(pcie_index_offset); 710 r = readl(pcie_data_offset); 711 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 712 713 return r; 714 } 715 716 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, 717 u64 reg_addr) 718 { 719 unsigned long flags, pcie_index, pcie_index_hi, pcie_data; 720 u32 r; 721 void __iomem *pcie_index_offset; 722 void __iomem *pcie_index_hi_offset; 723 void __iomem *pcie_data_offset; 724 725 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 726 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 727 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) 728 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); 729 else 730 pcie_index_hi = 0; 731 732 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 733 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 734 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 735 if (pcie_index_hi != 0) 736 pcie_index_hi_offset = (void __iomem *)adev->rmmio + 737 pcie_index_hi * 4; 738 739 writel(reg_addr, pcie_index_offset); 740 readl(pcie_index_offset); 741 if (pcie_index_hi != 0) { 742 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 743 readl(pcie_index_hi_offset); 744 } 745 r = readl(pcie_data_offset); 746 747 /* clear the high bits */ 748 if (pcie_index_hi != 0) { 749 writel(0, pcie_index_hi_offset); 750 readl(pcie_index_hi_offset); 751 } 752 753 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 754 755 return r; 756 } 757 758 /** 759 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register 760 * 761 * @adev: amdgpu_device pointer 762 * @reg_addr: indirect register address to read from 763 * 764 * Returns the value of indirect register @reg_addr 765 */ 766 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, 767 u32 reg_addr) 768 { 769 unsigned long flags, pcie_index, pcie_data; 770 void __iomem *pcie_index_offset; 771 void __iomem *pcie_data_offset; 772 u64 r; 773 774 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 775 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 776 777 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 778 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 779 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 780 781 /* read low 32 bits */ 782 writel(reg_addr, pcie_index_offset); 783 readl(pcie_index_offset); 784 r = readl(pcie_data_offset); 785 /* read high 32 bits */ 786 writel(reg_addr + 4, pcie_index_offset); 787 readl(pcie_index_offset); 788 r |= ((u64)readl(pcie_data_offset) << 32); 789 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 790 791 return r; 792 } 793 794 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev, 795 u64 reg_addr) 796 { 797 unsigned long flags, pcie_index, pcie_data; 798 unsigned long pcie_index_hi = 0; 799 void __iomem *pcie_index_offset; 800 void __iomem *pcie_index_hi_offset; 801 void __iomem *pcie_data_offset; 802 u64 r; 803 804 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 805 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 806 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) 807 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); 808 809 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 810 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 811 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 812 if (pcie_index_hi != 0) 813 pcie_index_hi_offset = (void __iomem *)adev->rmmio + 814 pcie_index_hi * 4; 815 816 /* read low 32 bits */ 817 writel(reg_addr, pcie_index_offset); 818 readl(pcie_index_offset); 819 if (pcie_index_hi != 0) { 820 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 821 readl(pcie_index_hi_offset); 822 } 823 r = readl(pcie_data_offset); 824 /* read high 32 bits */ 825 writel(reg_addr + 4, pcie_index_offset); 826 readl(pcie_index_offset); 827 if (pcie_index_hi != 0) { 828 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 829 readl(pcie_index_hi_offset); 830 } 831 r |= ((u64)readl(pcie_data_offset) << 32); 832 833 /* clear the high bits */ 834 if (pcie_index_hi != 0) { 835 writel(0, pcie_index_hi_offset); 836 readl(pcie_index_hi_offset); 837 } 838 839 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 840 841 return r; 842 } 843 844 /** 845 * amdgpu_device_indirect_wreg - write an indirect register address 846 * 847 * @adev: amdgpu_device pointer 848 * @reg_addr: indirect register offset 849 * @reg_data: indirect register data 850 * 851 */ 852 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, 853 u32 reg_addr, u32 reg_data) 854 { 855 unsigned long flags, pcie_index, pcie_data; 856 void __iomem *pcie_index_offset; 857 void __iomem *pcie_data_offset; 858 859 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 860 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 861 862 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 863 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 864 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 865 866 writel(reg_addr, pcie_index_offset); 867 readl(pcie_index_offset); 868 writel(reg_data, pcie_data_offset); 869 readl(pcie_data_offset); 870 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 871 } 872 873 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, 874 u64 reg_addr, u32 reg_data) 875 { 876 unsigned long flags, pcie_index, pcie_index_hi, pcie_data; 877 void __iomem *pcie_index_offset; 878 void __iomem *pcie_index_hi_offset; 879 void __iomem *pcie_data_offset; 880 881 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 882 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 883 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) 884 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); 885 else 886 pcie_index_hi = 0; 887 888 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 889 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 890 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 891 if (pcie_index_hi != 0) 892 pcie_index_hi_offset = (void __iomem *)adev->rmmio + 893 pcie_index_hi * 4; 894 895 writel(reg_addr, pcie_index_offset); 896 readl(pcie_index_offset); 897 if (pcie_index_hi != 0) { 898 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 899 readl(pcie_index_hi_offset); 900 } 901 writel(reg_data, pcie_data_offset); 902 readl(pcie_data_offset); 903 904 /* clear the high bits */ 905 if (pcie_index_hi != 0) { 906 writel(0, pcie_index_hi_offset); 907 readl(pcie_index_hi_offset); 908 } 909 910 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 911 } 912 913 /** 914 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address 915 * 916 * @adev: amdgpu_device pointer 917 * @reg_addr: indirect register offset 918 * @reg_data: indirect register data 919 * 920 */ 921 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, 922 u32 reg_addr, u64 reg_data) 923 { 924 unsigned long flags, pcie_index, pcie_data; 925 void __iomem *pcie_index_offset; 926 void __iomem *pcie_data_offset; 927 928 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 929 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 930 931 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 932 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 933 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 934 935 /* write low 32 bits */ 936 writel(reg_addr, pcie_index_offset); 937 readl(pcie_index_offset); 938 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset); 939 readl(pcie_data_offset); 940 /* write high 32 bits */ 941 writel(reg_addr + 4, pcie_index_offset); 942 readl(pcie_index_offset); 943 writel((u32)(reg_data >> 32), pcie_data_offset); 944 readl(pcie_data_offset); 945 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 946 } 947 948 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, 949 u64 reg_addr, u64 reg_data) 950 { 951 unsigned long flags, pcie_index, pcie_data; 952 unsigned long pcie_index_hi = 0; 953 void __iomem *pcie_index_offset; 954 void __iomem *pcie_index_hi_offset; 955 void __iomem *pcie_data_offset; 956 957 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 958 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 959 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) 960 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); 961 962 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 963 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 964 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 965 if (pcie_index_hi != 0) 966 pcie_index_hi_offset = (void __iomem *)adev->rmmio + 967 pcie_index_hi * 4; 968 969 /* write low 32 bits */ 970 writel(reg_addr, pcie_index_offset); 971 readl(pcie_index_offset); 972 if (pcie_index_hi != 0) { 973 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 974 readl(pcie_index_hi_offset); 975 } 976 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset); 977 readl(pcie_data_offset); 978 /* write high 32 bits */ 979 writel(reg_addr + 4, pcie_index_offset); 980 readl(pcie_index_offset); 981 if (pcie_index_hi != 0) { 982 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 983 readl(pcie_index_hi_offset); 984 } 985 writel((u32)(reg_data >> 32), pcie_data_offset); 986 readl(pcie_data_offset); 987 988 /* clear the high bits */ 989 if (pcie_index_hi != 0) { 990 writel(0, pcie_index_hi_offset); 991 readl(pcie_index_hi_offset); 992 } 993 994 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 995 } 996 997 /** 998 * amdgpu_device_get_rev_id - query device rev_id 999 * 1000 * @adev: amdgpu_device pointer 1001 * 1002 * Return device rev_id 1003 */ 1004 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev) 1005 { 1006 return adev->nbio.funcs->get_rev_id(adev); 1007 } 1008 1009 /** 1010 * amdgpu_invalid_rreg - dummy reg read function 1011 * 1012 * @adev: amdgpu_device pointer 1013 * @reg: offset of register 1014 * 1015 * Dummy register read function. Used for register blocks 1016 * that certain asics don't have (all asics). 1017 * Returns the value in the register. 1018 */ 1019 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 1020 { 1021 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 1022 BUG(); 1023 return 0; 1024 } 1025 1026 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg) 1027 { 1028 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); 1029 BUG(); 1030 return 0; 1031 } 1032 1033 /** 1034 * amdgpu_invalid_wreg - dummy reg write function 1035 * 1036 * @adev: amdgpu_device pointer 1037 * @reg: offset of register 1038 * @v: value to write to the register 1039 * 1040 * Dummy register read function. Used for register blocks 1041 * that certain asics don't have (all asics). 1042 */ 1043 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 1044 { 1045 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 1046 reg, v); 1047 BUG(); 1048 } 1049 1050 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v) 1051 { 1052 DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n", 1053 reg, v); 1054 BUG(); 1055 } 1056 1057 /** 1058 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function 1059 * 1060 * @adev: amdgpu_device pointer 1061 * @reg: offset of register 1062 * 1063 * Dummy register read function. Used for register blocks 1064 * that certain asics don't have (all asics). 1065 * Returns the value in the register. 1066 */ 1067 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) 1068 { 1069 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); 1070 BUG(); 1071 return 0; 1072 } 1073 1074 static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg) 1075 { 1076 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); 1077 BUG(); 1078 return 0; 1079 } 1080 1081 /** 1082 * amdgpu_invalid_wreg64 - dummy reg write function 1083 * 1084 * @adev: amdgpu_device pointer 1085 * @reg: offset of register 1086 * @v: value to write to the register 1087 * 1088 * Dummy register read function. Used for register blocks 1089 * that certain asics don't have (all asics). 1090 */ 1091 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) 1092 { 1093 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", 1094 reg, v); 1095 BUG(); 1096 } 1097 1098 static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v) 1099 { 1100 DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n", 1101 reg, v); 1102 BUG(); 1103 } 1104 1105 /** 1106 * amdgpu_block_invalid_rreg - dummy reg read function 1107 * 1108 * @adev: amdgpu_device pointer 1109 * @block: offset of instance 1110 * @reg: offset of register 1111 * 1112 * Dummy register read function. Used for register blocks 1113 * that certain asics don't have (all asics). 1114 * Returns the value in the register. 1115 */ 1116 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 1117 uint32_t block, uint32_t reg) 1118 { 1119 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 1120 reg, block); 1121 BUG(); 1122 return 0; 1123 } 1124 1125 /** 1126 * amdgpu_block_invalid_wreg - dummy reg write function 1127 * 1128 * @adev: amdgpu_device pointer 1129 * @block: offset of instance 1130 * @reg: offset of register 1131 * @v: value to write to the register 1132 * 1133 * Dummy register read function. Used for register blocks 1134 * that certain asics don't have (all asics). 1135 */ 1136 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, 1137 uint32_t block, 1138 uint32_t reg, uint32_t v) 1139 { 1140 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 1141 reg, block, v); 1142 BUG(); 1143 } 1144 1145 /** 1146 * amdgpu_device_asic_init - Wrapper for atom asic_init 1147 * 1148 * @adev: amdgpu_device pointer 1149 * 1150 * Does any asic specific work and then calls atom asic init. 1151 */ 1152 static int amdgpu_device_asic_init(struct amdgpu_device *adev) 1153 { 1154 int ret; 1155 1156 amdgpu_asic_pre_asic_init(adev); 1157 1158 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || 1159 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { 1160 amdgpu_psp_wait_for_bootloader(adev); 1161 ret = amdgpu_atomfirmware_asic_init(adev, true); 1162 /* TODO: check the return val and stop device initialization if boot fails */ 1163 amdgpu_psp_query_boot_status(adev); 1164 return ret; 1165 } else { 1166 return amdgpu_atom_asic_init(adev->mode_info.atom_context); 1167 } 1168 1169 return 0; 1170 } 1171 1172 /** 1173 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page 1174 * 1175 * @adev: amdgpu_device pointer 1176 * 1177 * Allocates a scratch page of VRAM for use by various things in the 1178 * driver. 1179 */ 1180 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev) 1181 { 1182 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE, 1183 AMDGPU_GEM_DOMAIN_VRAM | 1184 AMDGPU_GEM_DOMAIN_GTT, 1185 &adev->mem_scratch.robj, 1186 &adev->mem_scratch.gpu_addr, 1187 (void **)&adev->mem_scratch.ptr); 1188 } 1189 1190 /** 1191 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page 1192 * 1193 * @adev: amdgpu_device pointer 1194 * 1195 * Frees the VRAM scratch page. 1196 */ 1197 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev) 1198 { 1199 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL); 1200 } 1201 1202 /** 1203 * amdgpu_device_program_register_sequence - program an array of registers. 1204 * 1205 * @adev: amdgpu_device pointer 1206 * @registers: pointer to the register array 1207 * @array_size: size of the register array 1208 * 1209 * Programs an array or registers with and or masks. 1210 * This is a helper for setting golden registers. 1211 */ 1212 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 1213 const u32 *registers, 1214 const u32 array_size) 1215 { 1216 u32 tmp, reg, and_mask, or_mask; 1217 int i; 1218 1219 if (array_size % 3) 1220 return; 1221 1222 for (i = 0; i < array_size; i += 3) { 1223 reg = registers[i + 0]; 1224 and_mask = registers[i + 1]; 1225 or_mask = registers[i + 2]; 1226 1227 if (and_mask == 0xffffffff) { 1228 tmp = or_mask; 1229 } else { 1230 tmp = RREG32(reg); 1231 tmp &= ~and_mask; 1232 if (adev->family >= AMDGPU_FAMILY_AI) 1233 tmp |= (or_mask & and_mask); 1234 else 1235 tmp |= or_mask; 1236 } 1237 WREG32(reg, tmp); 1238 } 1239 } 1240 1241 /** 1242 * amdgpu_device_pci_config_reset - reset the GPU 1243 * 1244 * @adev: amdgpu_device pointer 1245 * 1246 * Resets the GPU using the pci config reset sequence. 1247 * Only applicable to asics prior to vega10. 1248 */ 1249 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) 1250 { 1251 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 1252 } 1253 1254 /** 1255 * amdgpu_device_pci_reset - reset the GPU using generic PCI means 1256 * 1257 * @adev: amdgpu_device pointer 1258 * 1259 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.). 1260 */ 1261 int amdgpu_device_pci_reset(struct amdgpu_device *adev) 1262 { 1263 return pci_reset_function(adev->pdev); 1264 } 1265 1266 /* 1267 * amdgpu_device_wb_*() 1268 * Writeback is the method by which the GPU updates special pages in memory 1269 * with the status of certain GPU events (fences, ring pointers,etc.). 1270 */ 1271 1272 /** 1273 * amdgpu_device_wb_fini - Disable Writeback and free memory 1274 * 1275 * @adev: amdgpu_device pointer 1276 * 1277 * Disables Writeback and frees the Writeback memory (all asics). 1278 * Used at driver shutdown. 1279 */ 1280 static void amdgpu_device_wb_fini(struct amdgpu_device *adev) 1281 { 1282 if (adev->wb.wb_obj) { 1283 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 1284 &adev->wb.gpu_addr, 1285 (void **)&adev->wb.wb); 1286 adev->wb.wb_obj = NULL; 1287 } 1288 } 1289 1290 /** 1291 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory 1292 * 1293 * @adev: amdgpu_device pointer 1294 * 1295 * Initializes writeback and allocates writeback memory (all asics). 1296 * Used at driver startup. 1297 * Returns 0 on success or an -error on failure. 1298 */ 1299 static int amdgpu_device_wb_init(struct amdgpu_device *adev) 1300 { 1301 int r; 1302 1303 if (adev->wb.wb_obj == NULL) { 1304 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 1305 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 1306 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1307 &adev->wb.wb_obj, &adev->wb.gpu_addr, 1308 (void **)&adev->wb.wb); 1309 if (r) { 1310 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 1311 return r; 1312 } 1313 1314 adev->wb.num_wb = AMDGPU_MAX_WB; 1315 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 1316 1317 /* clear wb memory */ 1318 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); 1319 } 1320 1321 return 0; 1322 } 1323 1324 /** 1325 * amdgpu_device_wb_get - Allocate a wb entry 1326 * 1327 * @adev: amdgpu_device pointer 1328 * @wb: wb index 1329 * 1330 * Allocate a wb slot for use by the driver (all asics). 1331 * Returns 0 on success or -EINVAL on failure. 1332 */ 1333 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) 1334 { 1335 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 1336 1337 if (offset < adev->wb.num_wb) { 1338 __set_bit(offset, adev->wb.used); 1339 *wb = offset << 3; /* convert to dw offset */ 1340 return 0; 1341 } else { 1342 return -EINVAL; 1343 } 1344 } 1345 1346 /** 1347 * amdgpu_device_wb_free - Free a wb entry 1348 * 1349 * @adev: amdgpu_device pointer 1350 * @wb: wb index 1351 * 1352 * Free a wb slot allocated for use by the driver (all asics) 1353 */ 1354 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 1355 { 1356 wb >>= 3; 1357 if (wb < adev->wb.num_wb) 1358 __clear_bit(wb, adev->wb.used); 1359 } 1360 1361 /** 1362 * amdgpu_device_resize_fb_bar - try to resize FB BAR 1363 * 1364 * @adev: amdgpu_device pointer 1365 * 1366 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not 1367 * to fail, but if any of the BARs is not accessible after the size we abort 1368 * driver loading by returning -ENODEV. 1369 */ 1370 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) 1371 { 1372 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); 1373 struct pci_bus *root; 1374 struct resource *res; 1375 unsigned int i; 1376 u16 cmd; 1377 int r; 1378 1379 if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) 1380 return 0; 1381 1382 /* Bypass for VF */ 1383 if (amdgpu_sriov_vf(adev)) 1384 return 0; 1385 1386 /* skip if the bios has already enabled large BAR */ 1387 if (adev->gmc.real_vram_size && 1388 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) 1389 return 0; 1390 1391 /* Check if the root BUS has 64bit memory resources */ 1392 root = adev->pdev->bus; 1393 while (root->parent) 1394 root = root->parent; 1395 1396 pci_bus_for_each_resource(root, res, i) { 1397 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && 1398 res->start > 0x100000000ull) 1399 break; 1400 } 1401 1402 /* Trying to resize is pointless without a root hub window above 4GB */ 1403 if (!res) 1404 return 0; 1405 1406 /* Limit the BAR size to what is available */ 1407 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, 1408 rbar_size); 1409 1410 /* Disable memory decoding while we change the BAR addresses and size */ 1411 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); 1412 pci_write_config_word(adev->pdev, PCI_COMMAND, 1413 cmd & ~PCI_COMMAND_MEMORY); 1414 1415 /* Free the VRAM and doorbell BAR, we most likely need to move both. */ 1416 amdgpu_doorbell_fini(adev); 1417 if (adev->asic_type >= CHIP_BONAIRE) 1418 pci_release_resource(adev->pdev, 2); 1419 1420 pci_release_resource(adev->pdev, 0); 1421 1422 r = pci_resize_resource(adev->pdev, 0, rbar_size); 1423 if (r == -ENOSPC) 1424 DRM_INFO("Not enough PCI address space for a large BAR."); 1425 else if (r && r != -ENOTSUPP) 1426 DRM_ERROR("Problem resizing BAR0 (%d).", r); 1427 1428 pci_assign_unassigned_bus_resources(adev->pdev->bus); 1429 1430 /* When the doorbell or fb BAR isn't available we have no chance of 1431 * using the device. 1432 */ 1433 r = amdgpu_doorbell_init(adev); 1434 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) 1435 return -ENODEV; 1436 1437 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); 1438 1439 return 0; 1440 } 1441 1442 static bool amdgpu_device_read_bios(struct amdgpu_device *adev) 1443 { 1444 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) 1445 return false; 1446 1447 return true; 1448 } 1449 1450 /* 1451 * GPU helpers function. 1452 */ 1453 /** 1454 * amdgpu_device_need_post - check if the hw need post or not 1455 * 1456 * @adev: amdgpu_device pointer 1457 * 1458 * Check if the asic has been initialized (all asics) at driver startup 1459 * or post is needed if hw reset is performed. 1460 * Returns true if need or false if not. 1461 */ 1462 bool amdgpu_device_need_post(struct amdgpu_device *adev) 1463 { 1464 uint32_t reg; 1465 1466 if (amdgpu_sriov_vf(adev)) 1467 return false; 1468 1469 if (!amdgpu_device_read_bios(adev)) 1470 return false; 1471 1472 if (amdgpu_passthrough(adev)) { 1473 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 1474 * some old smc fw still need driver do vPost otherwise gpu hang, while 1475 * those smc fw version above 22.15 doesn't have this flaw, so we force 1476 * vpost executed for smc version below 22.15 1477 */ 1478 if (adev->asic_type == CHIP_FIJI) { 1479 int err; 1480 uint32_t fw_ver; 1481 1482 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 1483 /* force vPost if error occured */ 1484 if (err) 1485 return true; 1486 1487 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 1488 if (fw_ver < 0x00160e00) 1489 return true; 1490 } 1491 } 1492 1493 /* Don't post if we need to reset whole hive on init */ 1494 if (adev->gmc.xgmi.pending_reset) 1495 return false; 1496 1497 if (adev->has_hw_reset) { 1498 adev->has_hw_reset = false; 1499 return true; 1500 } 1501 1502 /* bios scratch used on CIK+ */ 1503 if (adev->asic_type >= CHIP_BONAIRE) 1504 return amdgpu_atombios_scratch_need_asic_init(adev); 1505 1506 /* check MEM_SIZE for older asics */ 1507 reg = amdgpu_asic_get_config_memsize(adev); 1508 1509 if ((reg != 0) && (reg != 0xffffffff)) 1510 return false; 1511 1512 return true; 1513 } 1514 1515 /* 1516 * Check whether seamless boot is supported. 1517 * 1518 * So far we only support seamless boot on DCE 3.0 or later. 1519 * If users report that it works on older ASICS as well, we may 1520 * loosen this. 1521 */ 1522 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) 1523 { 1524 switch (amdgpu_seamless) { 1525 case -1: 1526 break; 1527 case 1: 1528 return true; 1529 case 0: 1530 return false; 1531 default: 1532 DRM_ERROR("Invalid value for amdgpu.seamless: %d\n", 1533 amdgpu_seamless); 1534 return false; 1535 } 1536 1537 if (!(adev->flags & AMD_IS_APU)) 1538 return false; 1539 1540 if (adev->mman.keep_stolen_vga_memory) 1541 return false; 1542 1543 return adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0); 1544 } 1545 1546 /* 1547 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids 1548 * don't support dynamic speed switching. Until we have confirmation from Intel 1549 * that a specific host supports it, it's safer that we keep it disabled for all. 1550 * 1551 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/ 1552 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663 1553 */ 1554 static bool amdgpu_device_pcie_dynamic_switching_supported(void) 1555 { 1556 #if IS_ENABLED(CONFIG_X86) 1557 struct cpuinfo_x86 *c = &cpu_data(0); 1558 1559 if (c->x86_vendor == X86_VENDOR_INTEL) 1560 return false; 1561 #endif 1562 return true; 1563 } 1564 1565 /** 1566 * amdgpu_device_should_use_aspm - check if the device should program ASPM 1567 * 1568 * @adev: amdgpu_device pointer 1569 * 1570 * Confirm whether the module parameter and pcie bridge agree that ASPM should 1571 * be set for this device. 1572 * 1573 * Returns true if it should be used or false if not. 1574 */ 1575 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) 1576 { 1577 switch (amdgpu_aspm) { 1578 case -1: 1579 break; 1580 case 0: 1581 return false; 1582 case 1: 1583 return true; 1584 default: 1585 return false; 1586 } 1587 if (adev->flags & AMD_IS_APU) 1588 return false; 1589 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) 1590 return false; 1591 return pcie_aspm_enabled(adev->pdev); 1592 } 1593 1594 /* if we get transitioned to only one device, take VGA back */ 1595 /** 1596 * amdgpu_device_vga_set_decode - enable/disable vga decode 1597 * 1598 * @pdev: PCI device pointer 1599 * @state: enable/disable vga decode 1600 * 1601 * Enable/disable vga decode (all asics). 1602 * Returns VGA resource flags. 1603 */ 1604 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, 1605 bool state) 1606 { 1607 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); 1608 1609 amdgpu_asic_set_vga_state(adev, state); 1610 if (state) 1611 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1612 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1613 else 1614 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1615 } 1616 1617 /** 1618 * amdgpu_device_check_block_size - validate the vm block size 1619 * 1620 * @adev: amdgpu_device pointer 1621 * 1622 * Validates the vm block size specified via module parameter. 1623 * The vm block size defines number of bits in page table versus page directory, 1624 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1625 * page table and the remaining bits are in the page directory. 1626 */ 1627 static void amdgpu_device_check_block_size(struct amdgpu_device *adev) 1628 { 1629 /* defines number of bits in page table versus page directory, 1630 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1631 * page table and the remaining bits are in the page directory 1632 */ 1633 if (amdgpu_vm_block_size == -1) 1634 return; 1635 1636 if (amdgpu_vm_block_size < 9) { 1637 dev_warn(adev->dev, "VM page table size (%d) too small\n", 1638 amdgpu_vm_block_size); 1639 amdgpu_vm_block_size = -1; 1640 } 1641 } 1642 1643 /** 1644 * amdgpu_device_check_vm_size - validate the vm size 1645 * 1646 * @adev: amdgpu_device pointer 1647 * 1648 * Validates the vm size in GB specified via module parameter. 1649 * The VM size is the size of the GPU virtual memory space in GB. 1650 */ 1651 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) 1652 { 1653 /* no need to check the default value */ 1654 if (amdgpu_vm_size == -1) 1655 return; 1656 1657 if (amdgpu_vm_size < 1) { 1658 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 1659 amdgpu_vm_size); 1660 amdgpu_vm_size = -1; 1661 } 1662 } 1663 1664 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) 1665 { 1666 struct sysinfo si; 1667 bool is_os_64 = (sizeof(void *) == 8); 1668 uint64_t total_memory; 1669 uint64_t dram_size_seven_GB = 0x1B8000000; 1670 uint64_t dram_size_three_GB = 0xB8000000; 1671 1672 if (amdgpu_smu_memory_pool_size == 0) 1673 return; 1674 1675 if (!is_os_64) { 1676 DRM_WARN("Not 64-bit OS, feature not supported\n"); 1677 goto def_value; 1678 } 1679 si_meminfo(&si); 1680 total_memory = (uint64_t)si.totalram * si.mem_unit; 1681 1682 if ((amdgpu_smu_memory_pool_size == 1) || 1683 (amdgpu_smu_memory_pool_size == 2)) { 1684 if (total_memory < dram_size_three_GB) 1685 goto def_value1; 1686 } else if ((amdgpu_smu_memory_pool_size == 4) || 1687 (amdgpu_smu_memory_pool_size == 8)) { 1688 if (total_memory < dram_size_seven_GB) 1689 goto def_value1; 1690 } else { 1691 DRM_WARN("Smu memory pool size not supported\n"); 1692 goto def_value; 1693 } 1694 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; 1695 1696 return; 1697 1698 def_value1: 1699 DRM_WARN("No enough system memory\n"); 1700 def_value: 1701 adev->pm.smu_prv_buffer_size = 0; 1702 } 1703 1704 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) 1705 { 1706 if (!(adev->flags & AMD_IS_APU) || 1707 adev->asic_type < CHIP_RAVEN) 1708 return 0; 1709 1710 switch (adev->asic_type) { 1711 case CHIP_RAVEN: 1712 if (adev->pdev->device == 0x15dd) 1713 adev->apu_flags |= AMD_APU_IS_RAVEN; 1714 if (adev->pdev->device == 0x15d8) 1715 adev->apu_flags |= AMD_APU_IS_PICASSO; 1716 break; 1717 case CHIP_RENOIR: 1718 if ((adev->pdev->device == 0x1636) || 1719 (adev->pdev->device == 0x164c)) 1720 adev->apu_flags |= AMD_APU_IS_RENOIR; 1721 else 1722 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; 1723 break; 1724 case CHIP_VANGOGH: 1725 adev->apu_flags |= AMD_APU_IS_VANGOGH; 1726 break; 1727 case CHIP_YELLOW_CARP: 1728 break; 1729 case CHIP_CYAN_SKILLFISH: 1730 if ((adev->pdev->device == 0x13FE) || 1731 (adev->pdev->device == 0x143F)) 1732 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; 1733 break; 1734 default: 1735 break; 1736 } 1737 1738 return 0; 1739 } 1740 1741 /** 1742 * amdgpu_device_check_arguments - validate module params 1743 * 1744 * @adev: amdgpu_device pointer 1745 * 1746 * Validates certain module parameters and updates 1747 * the associated values used by the driver (all asics). 1748 */ 1749 static int amdgpu_device_check_arguments(struct amdgpu_device *adev) 1750 { 1751 if (amdgpu_sched_jobs < 4) { 1752 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1753 amdgpu_sched_jobs); 1754 amdgpu_sched_jobs = 4; 1755 } else if (!is_power_of_2(amdgpu_sched_jobs)) { 1756 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 1757 amdgpu_sched_jobs); 1758 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1759 } 1760 1761 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1762 /* gart size must be greater or equal to 32M */ 1763 dev_warn(adev->dev, "gart size (%d) too small\n", 1764 amdgpu_gart_size); 1765 amdgpu_gart_size = -1; 1766 } 1767 1768 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 1769 /* gtt size must be greater or equal to 32M */ 1770 dev_warn(adev->dev, "gtt size (%d) too small\n", 1771 amdgpu_gtt_size); 1772 amdgpu_gtt_size = -1; 1773 } 1774 1775 /* valid range is between 4 and 9 inclusive */ 1776 if (amdgpu_vm_fragment_size != -1 && 1777 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 1778 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 1779 amdgpu_vm_fragment_size = -1; 1780 } 1781 1782 if (amdgpu_sched_hw_submission < 2) { 1783 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", 1784 amdgpu_sched_hw_submission); 1785 amdgpu_sched_hw_submission = 2; 1786 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) { 1787 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", 1788 amdgpu_sched_hw_submission); 1789 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission); 1790 } 1791 1792 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) { 1793 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n"); 1794 amdgpu_reset_method = -1; 1795 } 1796 1797 amdgpu_device_check_smu_prv_buffer_size(adev); 1798 1799 amdgpu_device_check_vm_size(adev); 1800 1801 amdgpu_device_check_block_size(adev); 1802 1803 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1804 1805 return 0; 1806 } 1807 1808 /** 1809 * amdgpu_switcheroo_set_state - set switcheroo state 1810 * 1811 * @pdev: pci dev pointer 1812 * @state: vga_switcheroo state 1813 * 1814 * Callback for the switcheroo driver. Suspends or resumes 1815 * the asics before or after it is powered up using ACPI methods. 1816 */ 1817 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, 1818 enum vga_switcheroo_state state) 1819 { 1820 struct drm_device *dev = pci_get_drvdata(pdev); 1821 int r; 1822 1823 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF) 1824 return; 1825 1826 if (state == VGA_SWITCHEROO_ON) { 1827 pr_info("switched on\n"); 1828 /* don't suspend or resume card normally */ 1829 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1830 1831 pci_set_power_state(pdev, PCI_D0); 1832 amdgpu_device_load_pci_state(pdev); 1833 r = pci_enable_device(pdev); 1834 if (r) 1835 DRM_WARN("pci_enable_device failed (%d)\n", r); 1836 amdgpu_device_resume(dev, true); 1837 1838 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1839 } else { 1840 pr_info("switched off\n"); 1841 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1842 amdgpu_device_prepare(dev); 1843 amdgpu_device_suspend(dev, true); 1844 amdgpu_device_cache_pci_state(pdev); 1845 /* Shut down the device */ 1846 pci_disable_device(pdev); 1847 pci_set_power_state(pdev, PCI_D3cold); 1848 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1849 } 1850 } 1851 1852 /** 1853 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1854 * 1855 * @pdev: pci dev pointer 1856 * 1857 * Callback for the switcheroo driver. Check of the switcheroo 1858 * state can be changed. 1859 * Returns true if the state can be changed, false if not. 1860 */ 1861 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1862 { 1863 struct drm_device *dev = pci_get_drvdata(pdev); 1864 1865 /* 1866 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1867 * locking inversion with the driver load path. And the access here is 1868 * completely racy anyway. So don't bother with locking for now. 1869 */ 1870 return atomic_read(&dev->open_count) == 0; 1871 } 1872 1873 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1874 .set_gpu_state = amdgpu_switcheroo_set_state, 1875 .reprobe = NULL, 1876 .can_switch = amdgpu_switcheroo_can_switch, 1877 }; 1878 1879 /** 1880 * amdgpu_device_ip_set_clockgating_state - set the CG state 1881 * 1882 * @dev: amdgpu_device pointer 1883 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1884 * @state: clockgating state (gate or ungate) 1885 * 1886 * Sets the requested clockgating state for all instances of 1887 * the hardware IP specified. 1888 * Returns the error code from the last instance. 1889 */ 1890 int amdgpu_device_ip_set_clockgating_state(void *dev, 1891 enum amd_ip_block_type block_type, 1892 enum amd_clockgating_state state) 1893 { 1894 struct amdgpu_device *adev = dev; 1895 int i, r = 0; 1896 1897 for (i = 0; i < adev->num_ip_blocks; i++) { 1898 if (!adev->ip_blocks[i].status.valid) 1899 continue; 1900 if (adev->ip_blocks[i].version->type != block_type) 1901 continue; 1902 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) 1903 continue; 1904 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 1905 (void *)adev, state); 1906 if (r) 1907 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 1908 adev->ip_blocks[i].version->funcs->name, r); 1909 } 1910 return r; 1911 } 1912 1913 /** 1914 * amdgpu_device_ip_set_powergating_state - set the PG state 1915 * 1916 * @dev: amdgpu_device pointer 1917 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1918 * @state: powergating state (gate or ungate) 1919 * 1920 * Sets the requested powergating state for all instances of 1921 * the hardware IP specified. 1922 * Returns the error code from the last instance. 1923 */ 1924 int amdgpu_device_ip_set_powergating_state(void *dev, 1925 enum amd_ip_block_type block_type, 1926 enum amd_powergating_state state) 1927 { 1928 struct amdgpu_device *adev = dev; 1929 int i, r = 0; 1930 1931 for (i = 0; i < adev->num_ip_blocks; i++) { 1932 if (!adev->ip_blocks[i].status.valid) 1933 continue; 1934 if (adev->ip_blocks[i].version->type != block_type) 1935 continue; 1936 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) 1937 continue; 1938 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 1939 (void *)adev, state); 1940 if (r) 1941 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 1942 adev->ip_blocks[i].version->funcs->name, r); 1943 } 1944 return r; 1945 } 1946 1947 /** 1948 * amdgpu_device_ip_get_clockgating_state - get the CG state 1949 * 1950 * @adev: amdgpu_device pointer 1951 * @flags: clockgating feature flags 1952 * 1953 * Walks the list of IPs on the device and updates the clockgating 1954 * flags for each IP. 1955 * Updates @flags with the feature flags for each hardware IP where 1956 * clockgating is enabled. 1957 */ 1958 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 1959 u64 *flags) 1960 { 1961 int i; 1962 1963 for (i = 0; i < adev->num_ip_blocks; i++) { 1964 if (!adev->ip_blocks[i].status.valid) 1965 continue; 1966 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) 1967 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); 1968 } 1969 } 1970 1971 /** 1972 * amdgpu_device_ip_wait_for_idle - wait for idle 1973 * 1974 * @adev: amdgpu_device pointer 1975 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1976 * 1977 * Waits for the request hardware IP to be idle. 1978 * Returns 0 for success or a negative error code on failure. 1979 */ 1980 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 1981 enum amd_ip_block_type block_type) 1982 { 1983 int i, r; 1984 1985 for (i = 0; i < adev->num_ip_blocks; i++) { 1986 if (!adev->ip_blocks[i].status.valid) 1987 continue; 1988 if (adev->ip_blocks[i].version->type == block_type) { 1989 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); 1990 if (r) 1991 return r; 1992 break; 1993 } 1994 } 1995 return 0; 1996 1997 } 1998 1999 /** 2000 * amdgpu_device_ip_is_idle - is the hardware IP idle 2001 * 2002 * @adev: amdgpu_device pointer 2003 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 2004 * 2005 * Check if the hardware IP is idle or not. 2006 * Returns true if it the IP is idle, false if not. 2007 */ 2008 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 2009 enum amd_ip_block_type block_type) 2010 { 2011 int i; 2012 2013 for (i = 0; i < adev->num_ip_blocks; i++) { 2014 if (!adev->ip_blocks[i].status.valid) 2015 continue; 2016 if (adev->ip_blocks[i].version->type == block_type) 2017 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); 2018 } 2019 return true; 2020 2021 } 2022 2023 /** 2024 * amdgpu_device_ip_get_ip_block - get a hw IP pointer 2025 * 2026 * @adev: amdgpu_device pointer 2027 * @type: Type of hardware IP (SMU, GFX, UVD, etc.) 2028 * 2029 * Returns a pointer to the hardware IP block structure 2030 * if it exists for the asic, otherwise NULL. 2031 */ 2032 struct amdgpu_ip_block * 2033 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 2034 enum amd_ip_block_type type) 2035 { 2036 int i; 2037 2038 for (i = 0; i < adev->num_ip_blocks; i++) 2039 if (adev->ip_blocks[i].version->type == type) 2040 return &adev->ip_blocks[i]; 2041 2042 return NULL; 2043 } 2044 2045 /** 2046 * amdgpu_device_ip_block_version_cmp 2047 * 2048 * @adev: amdgpu_device pointer 2049 * @type: enum amd_ip_block_type 2050 * @major: major version 2051 * @minor: minor version 2052 * 2053 * return 0 if equal or greater 2054 * return 1 if smaller or the ip_block doesn't exist 2055 */ 2056 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 2057 enum amd_ip_block_type type, 2058 u32 major, u32 minor) 2059 { 2060 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type); 2061 2062 if (ip_block && ((ip_block->version->major > major) || 2063 ((ip_block->version->major == major) && 2064 (ip_block->version->minor >= minor)))) 2065 return 0; 2066 2067 return 1; 2068 } 2069 2070 /** 2071 * amdgpu_device_ip_block_add 2072 * 2073 * @adev: amdgpu_device pointer 2074 * @ip_block_version: pointer to the IP to add 2075 * 2076 * Adds the IP block driver information to the collection of IPs 2077 * on the asic. 2078 */ 2079 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 2080 const struct amdgpu_ip_block_version *ip_block_version) 2081 { 2082 if (!ip_block_version) 2083 return -EINVAL; 2084 2085 switch (ip_block_version->type) { 2086 case AMD_IP_BLOCK_TYPE_VCN: 2087 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) 2088 return 0; 2089 break; 2090 case AMD_IP_BLOCK_TYPE_JPEG: 2091 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK) 2092 return 0; 2093 break; 2094 default: 2095 break; 2096 } 2097 2098 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, 2099 ip_block_version->funcs->name); 2100 2101 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 2102 2103 return 0; 2104 } 2105 2106 /** 2107 * amdgpu_device_enable_virtual_display - enable virtual display feature 2108 * 2109 * @adev: amdgpu_device pointer 2110 * 2111 * Enabled the virtual display feature if the user has enabled it via 2112 * the module parameter virtual_display. This feature provides a virtual 2113 * display hardware on headless boards or in virtualized environments. 2114 * This function parses and validates the configuration string specified by 2115 * the user and configues the virtual display configuration (number of 2116 * virtual connectors, crtcs, etc.) specified. 2117 */ 2118 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 2119 { 2120 adev->enable_virtual_display = false; 2121 2122 if (amdgpu_virtual_display) { 2123 const char *pci_address_name = pci_name(adev->pdev); 2124 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 2125 2126 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 2127 pciaddstr_tmp = pciaddstr; 2128 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 2129 pciaddname = strsep(&pciaddname_tmp, ","); 2130 if (!strcmp("all", pciaddname) 2131 || !strcmp(pci_address_name, pciaddname)) { 2132 long num_crtc; 2133 int res = -1; 2134 2135 adev->enable_virtual_display = true; 2136 2137 if (pciaddname_tmp) 2138 res = kstrtol(pciaddname_tmp, 10, 2139 &num_crtc); 2140 2141 if (!res) { 2142 if (num_crtc < 1) 2143 num_crtc = 1; 2144 if (num_crtc > 6) 2145 num_crtc = 6; 2146 adev->mode_info.num_crtc = num_crtc; 2147 } else { 2148 adev->mode_info.num_crtc = 1; 2149 } 2150 break; 2151 } 2152 } 2153 2154 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 2155 amdgpu_virtual_display, pci_address_name, 2156 adev->enable_virtual_display, adev->mode_info.num_crtc); 2157 2158 kfree(pciaddstr); 2159 } 2160 } 2161 2162 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev) 2163 { 2164 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { 2165 adev->mode_info.num_crtc = 1; 2166 adev->enable_virtual_display = true; 2167 DRM_INFO("virtual_display:%d, num_crtc:%d\n", 2168 adev->enable_virtual_display, adev->mode_info.num_crtc); 2169 } 2170 } 2171 2172 /** 2173 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware 2174 * 2175 * @adev: amdgpu_device pointer 2176 * 2177 * Parses the asic configuration parameters specified in the gpu info 2178 * firmware and makes them availale to the driver for use in configuring 2179 * the asic. 2180 * Returns 0 on success, -EINVAL on failure. 2181 */ 2182 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 2183 { 2184 const char *chip_name; 2185 char fw_name[40]; 2186 int err; 2187 const struct gpu_info_firmware_header_v1_0 *hdr; 2188 2189 adev->firmware.gpu_info_fw = NULL; 2190 2191 if (adev->mman.discovery_bin) { 2192 /* 2193 * FIXME: The bounding box is still needed by Navi12, so 2194 * temporarily read it from gpu_info firmware. Should be dropped 2195 * when DAL no longer needs it. 2196 */ 2197 if (adev->asic_type != CHIP_NAVI12) 2198 return 0; 2199 } 2200 2201 switch (adev->asic_type) { 2202 default: 2203 return 0; 2204 case CHIP_VEGA10: 2205 chip_name = "vega10"; 2206 break; 2207 case CHIP_VEGA12: 2208 chip_name = "vega12"; 2209 break; 2210 case CHIP_RAVEN: 2211 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 2212 chip_name = "raven2"; 2213 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 2214 chip_name = "picasso"; 2215 else 2216 chip_name = "raven"; 2217 break; 2218 case CHIP_ARCTURUS: 2219 chip_name = "arcturus"; 2220 break; 2221 case CHIP_NAVI12: 2222 chip_name = "navi12"; 2223 break; 2224 } 2225 2226 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); 2227 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name); 2228 if (err) { 2229 dev_err(adev->dev, 2230 "Failed to get gpu_info firmware \"%s\"\n", 2231 fw_name); 2232 goto out; 2233 } 2234 2235 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 2236 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 2237 2238 switch (hdr->version_major) { 2239 case 1: 2240 { 2241 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 2242 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 2243 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2244 2245 /* 2246 * Should be droped when DAL no longer needs it. 2247 */ 2248 if (adev->asic_type == CHIP_NAVI12) 2249 goto parse_soc_bounding_box; 2250 2251 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 2252 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 2253 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 2254 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 2255 adev->gfx.config.max_texture_channel_caches = 2256 le32_to_cpu(gpu_info_fw->gc_num_tccs); 2257 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 2258 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 2259 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 2260 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 2261 adev->gfx.config.double_offchip_lds_buf = 2262 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 2263 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 2264 adev->gfx.cu_info.max_waves_per_simd = 2265 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 2266 adev->gfx.cu_info.max_scratch_slots_per_cu = 2267 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 2268 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 2269 if (hdr->version_minor >= 1) { 2270 const struct gpu_info_firmware_v1_1 *gpu_info_fw = 2271 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + 2272 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2273 adev->gfx.config.num_sc_per_sh = 2274 le32_to_cpu(gpu_info_fw->num_sc_per_sh); 2275 adev->gfx.config.num_packer_per_sc = 2276 le32_to_cpu(gpu_info_fw->num_packer_per_sc); 2277 } 2278 2279 parse_soc_bounding_box: 2280 /* 2281 * soc bounding box info is not integrated in disocovery table, 2282 * we always need to parse it from gpu info firmware if needed. 2283 */ 2284 if (hdr->version_minor == 2) { 2285 const struct gpu_info_firmware_v1_2 *gpu_info_fw = 2286 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + 2287 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2288 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; 2289 } 2290 break; 2291 } 2292 default: 2293 dev_err(adev->dev, 2294 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 2295 err = -EINVAL; 2296 goto out; 2297 } 2298 out: 2299 return err; 2300 } 2301 2302 /** 2303 * amdgpu_device_ip_early_init - run early init for hardware IPs 2304 * 2305 * @adev: amdgpu_device pointer 2306 * 2307 * Early initialization pass for hardware IPs. The hardware IPs that make 2308 * up each asic are discovered each IP's early_init callback is run. This 2309 * is the first stage in initializing the asic. 2310 * Returns 0 on success, negative error code on failure. 2311 */ 2312 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) 2313 { 2314 struct pci_dev *parent; 2315 int i, r; 2316 bool total; 2317 2318 amdgpu_device_enable_virtual_display(adev); 2319 2320 if (amdgpu_sriov_vf(adev)) { 2321 r = amdgpu_virt_request_full_gpu(adev, true); 2322 if (r) 2323 return r; 2324 } 2325 2326 switch (adev->asic_type) { 2327 #ifdef CONFIG_DRM_AMDGPU_SI 2328 case CHIP_VERDE: 2329 case CHIP_TAHITI: 2330 case CHIP_PITCAIRN: 2331 case CHIP_OLAND: 2332 case CHIP_HAINAN: 2333 adev->family = AMDGPU_FAMILY_SI; 2334 r = si_set_ip_blocks(adev); 2335 if (r) 2336 return r; 2337 break; 2338 #endif 2339 #ifdef CONFIG_DRM_AMDGPU_CIK 2340 case CHIP_BONAIRE: 2341 case CHIP_HAWAII: 2342 case CHIP_KAVERI: 2343 case CHIP_KABINI: 2344 case CHIP_MULLINS: 2345 if (adev->flags & AMD_IS_APU) 2346 adev->family = AMDGPU_FAMILY_KV; 2347 else 2348 adev->family = AMDGPU_FAMILY_CI; 2349 2350 r = cik_set_ip_blocks(adev); 2351 if (r) 2352 return r; 2353 break; 2354 #endif 2355 case CHIP_TOPAZ: 2356 case CHIP_TONGA: 2357 case CHIP_FIJI: 2358 case CHIP_POLARIS10: 2359 case CHIP_POLARIS11: 2360 case CHIP_POLARIS12: 2361 case CHIP_VEGAM: 2362 case CHIP_CARRIZO: 2363 case CHIP_STONEY: 2364 if (adev->flags & AMD_IS_APU) 2365 adev->family = AMDGPU_FAMILY_CZ; 2366 else 2367 adev->family = AMDGPU_FAMILY_VI; 2368 2369 r = vi_set_ip_blocks(adev); 2370 if (r) 2371 return r; 2372 break; 2373 default: 2374 r = amdgpu_discovery_set_ip_blocks(adev); 2375 if (r) 2376 return r; 2377 break; 2378 } 2379 2380 if (amdgpu_has_atpx() && 2381 (amdgpu_is_atpx_hybrid() || 2382 amdgpu_has_atpx_dgpu_power_cntl()) && 2383 ((adev->flags & AMD_IS_APU) == 0) && 2384 !dev_is_removable(&adev->pdev->dev)) 2385 adev->flags |= AMD_IS_PX; 2386 2387 if (!(adev->flags & AMD_IS_APU)) { 2388 parent = pcie_find_root_port(adev->pdev); 2389 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; 2390 } 2391 2392 2393 adev->pm.pp_feature = amdgpu_pp_feature_mask; 2394 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) 2395 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2396 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) 2397 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; 2398 if (!amdgpu_device_pcie_dynamic_switching_supported()) 2399 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; 2400 2401 total = true; 2402 for (i = 0; i < adev->num_ip_blocks; i++) { 2403 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 2404 DRM_WARN("disabled ip block: %d <%s>\n", 2405 i, adev->ip_blocks[i].version->funcs->name); 2406 adev->ip_blocks[i].status.valid = false; 2407 } else { 2408 if (adev->ip_blocks[i].version->funcs->early_init) { 2409 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); 2410 if (r == -ENOENT) { 2411 adev->ip_blocks[i].status.valid = false; 2412 } else if (r) { 2413 DRM_ERROR("early_init of IP block <%s> failed %d\n", 2414 adev->ip_blocks[i].version->funcs->name, r); 2415 total = false; 2416 } else { 2417 adev->ip_blocks[i].status.valid = true; 2418 } 2419 } else { 2420 adev->ip_blocks[i].status.valid = true; 2421 } 2422 } 2423 /* get the vbios after the asic_funcs are set up */ 2424 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 2425 r = amdgpu_device_parse_gpu_info_fw(adev); 2426 if (r) 2427 return r; 2428 2429 /* Read BIOS */ 2430 if (amdgpu_device_read_bios(adev)) { 2431 if (!amdgpu_get_bios(adev)) 2432 return -EINVAL; 2433 2434 r = amdgpu_atombios_init(adev); 2435 if (r) { 2436 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 2437 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); 2438 return r; 2439 } 2440 } 2441 2442 /*get pf2vf msg info at it's earliest time*/ 2443 if (amdgpu_sriov_vf(adev)) 2444 amdgpu_virt_init_data_exchange(adev); 2445 2446 } 2447 } 2448 if (!total) 2449 return -ENODEV; 2450 2451 amdgpu_amdkfd_device_probe(adev); 2452 adev->cg_flags &= amdgpu_cg_mask; 2453 adev->pg_flags &= amdgpu_pg_mask; 2454 2455 return 0; 2456 } 2457 2458 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) 2459 { 2460 int i, r; 2461 2462 for (i = 0; i < adev->num_ip_blocks; i++) { 2463 if (!adev->ip_blocks[i].status.sw) 2464 continue; 2465 if (adev->ip_blocks[i].status.hw) 2466 continue; 2467 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2468 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || 2469 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 2470 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2471 if (r) { 2472 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2473 adev->ip_blocks[i].version->funcs->name, r); 2474 return r; 2475 } 2476 adev->ip_blocks[i].status.hw = true; 2477 } 2478 } 2479 2480 return 0; 2481 } 2482 2483 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) 2484 { 2485 int i, r; 2486 2487 for (i = 0; i < adev->num_ip_blocks; i++) { 2488 if (!adev->ip_blocks[i].status.sw) 2489 continue; 2490 if (adev->ip_blocks[i].status.hw) 2491 continue; 2492 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2493 if (r) { 2494 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2495 adev->ip_blocks[i].version->funcs->name, r); 2496 return r; 2497 } 2498 adev->ip_blocks[i].status.hw = true; 2499 } 2500 2501 return 0; 2502 } 2503 2504 static int amdgpu_device_fw_loading(struct amdgpu_device *adev) 2505 { 2506 int r = 0; 2507 int i; 2508 uint32_t smu_version; 2509 2510 if (adev->asic_type >= CHIP_VEGA10) { 2511 for (i = 0; i < adev->num_ip_blocks; i++) { 2512 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) 2513 continue; 2514 2515 if (!adev->ip_blocks[i].status.sw) 2516 continue; 2517 2518 /* no need to do the fw loading again if already done*/ 2519 if (adev->ip_blocks[i].status.hw == true) 2520 break; 2521 2522 if (amdgpu_in_reset(adev) || adev->in_suspend) { 2523 r = adev->ip_blocks[i].version->funcs->resume(adev); 2524 if (r) { 2525 DRM_ERROR("resume of IP block <%s> failed %d\n", 2526 adev->ip_blocks[i].version->funcs->name, r); 2527 return r; 2528 } 2529 } else { 2530 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2531 if (r) { 2532 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2533 adev->ip_blocks[i].version->funcs->name, r); 2534 return r; 2535 } 2536 } 2537 2538 adev->ip_blocks[i].status.hw = true; 2539 break; 2540 } 2541 } 2542 2543 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) 2544 r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 2545 2546 return r; 2547 } 2548 2549 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) 2550 { 2551 long timeout; 2552 int r, i; 2553 2554 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2555 struct amdgpu_ring *ring = adev->rings[i]; 2556 2557 /* No need to setup the GPU scheduler for rings that don't need it */ 2558 if (!ring || ring->no_scheduler) 2559 continue; 2560 2561 switch (ring->funcs->type) { 2562 case AMDGPU_RING_TYPE_GFX: 2563 timeout = adev->gfx_timeout; 2564 break; 2565 case AMDGPU_RING_TYPE_COMPUTE: 2566 timeout = adev->compute_timeout; 2567 break; 2568 case AMDGPU_RING_TYPE_SDMA: 2569 timeout = adev->sdma_timeout; 2570 break; 2571 default: 2572 timeout = adev->video_timeout; 2573 break; 2574 } 2575 2576 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, 2577 DRM_SCHED_PRIORITY_COUNT, 2578 ring->num_hw_submission, 0, 2579 timeout, adev->reset_domain->wq, 2580 ring->sched_score, ring->name, 2581 adev->dev); 2582 if (r) { 2583 DRM_ERROR("Failed to create scheduler on ring %s.\n", 2584 ring->name); 2585 return r; 2586 } 2587 r = amdgpu_uvd_entity_init(adev, ring); 2588 if (r) { 2589 DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n", 2590 ring->name); 2591 return r; 2592 } 2593 r = amdgpu_vce_entity_init(adev, ring); 2594 if (r) { 2595 DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n", 2596 ring->name); 2597 return r; 2598 } 2599 } 2600 2601 amdgpu_xcp_update_partition_sched_list(adev); 2602 2603 return 0; 2604 } 2605 2606 2607 /** 2608 * amdgpu_device_ip_init - run init for hardware IPs 2609 * 2610 * @adev: amdgpu_device pointer 2611 * 2612 * Main initialization pass for hardware IPs. The list of all the hardware 2613 * IPs that make up the asic is walked and the sw_init and hw_init callbacks 2614 * are run. sw_init initializes the software state associated with each IP 2615 * and hw_init initializes the hardware associated with each IP. 2616 * Returns 0 on success, negative error code on failure. 2617 */ 2618 static int amdgpu_device_ip_init(struct amdgpu_device *adev) 2619 { 2620 int i, r; 2621 2622 r = amdgpu_ras_init(adev); 2623 if (r) 2624 return r; 2625 2626 for (i = 0; i < adev->num_ip_blocks; i++) { 2627 if (!adev->ip_blocks[i].status.valid) 2628 continue; 2629 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); 2630 if (r) { 2631 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 2632 adev->ip_blocks[i].version->funcs->name, r); 2633 goto init_failed; 2634 } 2635 adev->ip_blocks[i].status.sw = true; 2636 2637 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 2638 /* need to do common hw init early so everything is set up for gmc */ 2639 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 2640 if (r) { 2641 DRM_ERROR("hw_init %d failed %d\n", i, r); 2642 goto init_failed; 2643 } 2644 adev->ip_blocks[i].status.hw = true; 2645 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2646 /* need to do gmc hw init early so we can allocate gpu mem */ 2647 /* Try to reserve bad pages early */ 2648 if (amdgpu_sriov_vf(adev)) 2649 amdgpu_virt_exchange_data(adev); 2650 2651 r = amdgpu_device_mem_scratch_init(adev); 2652 if (r) { 2653 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); 2654 goto init_failed; 2655 } 2656 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 2657 if (r) { 2658 DRM_ERROR("hw_init %d failed %d\n", i, r); 2659 goto init_failed; 2660 } 2661 r = amdgpu_device_wb_init(adev); 2662 if (r) { 2663 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); 2664 goto init_failed; 2665 } 2666 adev->ip_blocks[i].status.hw = true; 2667 2668 /* right after GMC hw init, we create CSA */ 2669 if (adev->gfx.mcbp) { 2670 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, 2671 AMDGPU_GEM_DOMAIN_VRAM | 2672 AMDGPU_GEM_DOMAIN_GTT, 2673 AMDGPU_CSA_SIZE); 2674 if (r) { 2675 DRM_ERROR("allocate CSA failed %d\n", r); 2676 goto init_failed; 2677 } 2678 } 2679 } 2680 } 2681 2682 if (amdgpu_sriov_vf(adev)) 2683 amdgpu_virt_init_data_exchange(adev); 2684 2685 r = amdgpu_ib_pool_init(adev); 2686 if (r) { 2687 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 2688 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 2689 goto init_failed; 2690 } 2691 2692 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ 2693 if (r) 2694 goto init_failed; 2695 2696 r = amdgpu_device_ip_hw_init_phase1(adev); 2697 if (r) 2698 goto init_failed; 2699 2700 r = amdgpu_device_fw_loading(adev); 2701 if (r) 2702 goto init_failed; 2703 2704 r = amdgpu_device_ip_hw_init_phase2(adev); 2705 if (r) 2706 goto init_failed; 2707 2708 /* 2709 * retired pages will be loaded from eeprom and reserved here, 2710 * it should be called after amdgpu_device_ip_hw_init_phase2 since 2711 * for some ASICs the RAS EEPROM code relies on SMU fully functioning 2712 * for I2C communication which only true at this point. 2713 * 2714 * amdgpu_ras_recovery_init may fail, but the upper only cares the 2715 * failure from bad gpu situation and stop amdgpu init process 2716 * accordingly. For other failed cases, it will still release all 2717 * the resource and print error message, rather than returning one 2718 * negative value to upper level. 2719 * 2720 * Note: theoretically, this should be called before all vram allocations 2721 * to protect retired page from abusing 2722 */ 2723 r = amdgpu_ras_recovery_init(adev); 2724 if (r) 2725 goto init_failed; 2726 2727 /** 2728 * In case of XGMI grab extra reference for reset domain for this device 2729 */ 2730 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2731 if (amdgpu_xgmi_add_device(adev) == 0) { 2732 if (!amdgpu_sriov_vf(adev)) { 2733 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2734 2735 if (WARN_ON(!hive)) { 2736 r = -ENOENT; 2737 goto init_failed; 2738 } 2739 2740 if (!hive->reset_domain || 2741 !amdgpu_reset_get_reset_domain(hive->reset_domain)) { 2742 r = -ENOENT; 2743 amdgpu_put_xgmi_hive(hive); 2744 goto init_failed; 2745 } 2746 2747 /* Drop the early temporary reset domain we created for device */ 2748 amdgpu_reset_put_reset_domain(adev->reset_domain); 2749 adev->reset_domain = hive->reset_domain; 2750 amdgpu_put_xgmi_hive(hive); 2751 } 2752 } 2753 } 2754 2755 r = amdgpu_device_init_schedulers(adev); 2756 if (r) 2757 goto init_failed; 2758 2759 if (adev->mman.buffer_funcs_ring->sched.ready) 2760 amdgpu_ttm_set_buffer_funcs_status(adev, true); 2761 2762 /* Don't init kfd if whole hive need to be reset during init */ 2763 if (!adev->gmc.xgmi.pending_reset) { 2764 kgd2kfd_init_zone_device(adev); 2765 amdgpu_amdkfd_device_init(adev); 2766 } 2767 2768 amdgpu_fru_get_product_info(adev); 2769 2770 init_failed: 2771 2772 return r; 2773 } 2774 2775 /** 2776 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer 2777 * 2778 * @adev: amdgpu_device pointer 2779 * 2780 * Writes a reset magic value to the gart pointer in VRAM. The driver calls 2781 * this function before a GPU reset. If the value is retained after a 2782 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents. 2783 */ 2784 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) 2785 { 2786 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 2787 } 2788 2789 /** 2790 * amdgpu_device_check_vram_lost - check if vram is valid 2791 * 2792 * @adev: amdgpu_device pointer 2793 * 2794 * Checks the reset magic value written to the gart pointer in VRAM. 2795 * The driver calls this after a GPU reset to see if the contents of 2796 * VRAM is lost or now. 2797 * returns true if vram is lost, false if not. 2798 */ 2799 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) 2800 { 2801 if (memcmp(adev->gart.ptr, adev->reset_magic, 2802 AMDGPU_RESET_MAGIC_NUM)) 2803 return true; 2804 2805 if (!amdgpu_in_reset(adev)) 2806 return false; 2807 2808 /* 2809 * For all ASICs with baco/mode1 reset, the VRAM is 2810 * always assumed to be lost. 2811 */ 2812 switch (amdgpu_asic_reset_method(adev)) { 2813 case AMD_RESET_METHOD_BACO: 2814 case AMD_RESET_METHOD_MODE1: 2815 return true; 2816 default: 2817 return false; 2818 } 2819 } 2820 2821 /** 2822 * amdgpu_device_set_cg_state - set clockgating for amdgpu device 2823 * 2824 * @adev: amdgpu_device pointer 2825 * @state: clockgating state (gate or ungate) 2826 * 2827 * The list of all the hardware IPs that make up the asic is walked and the 2828 * set_clockgating_state callbacks are run. 2829 * Late initialization pass enabling clockgating for hardware IPs. 2830 * Fini or suspend, pass disabling clockgating for hardware IPs. 2831 * Returns 0 on success, negative error code on failure. 2832 */ 2833 2834 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 2835 enum amd_clockgating_state state) 2836 { 2837 int i, j, r; 2838 2839 if (amdgpu_emu_mode == 1) 2840 return 0; 2841 2842 for (j = 0; j < adev->num_ip_blocks; j++) { 2843 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2844 if (!adev->ip_blocks[i].status.late_initialized) 2845 continue; 2846 /* skip CG for GFX, SDMA on S0ix */ 2847 if (adev->in_s0ix && 2848 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 2849 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) 2850 continue; 2851 /* skip CG for VCE/UVD, it's handled specially */ 2852 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2853 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2854 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2855 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2856 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 2857 /* enable clockgating to save power */ 2858 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 2859 state); 2860 if (r) { 2861 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 2862 adev->ip_blocks[i].version->funcs->name, r); 2863 return r; 2864 } 2865 } 2866 } 2867 2868 return 0; 2869 } 2870 2871 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, 2872 enum amd_powergating_state state) 2873 { 2874 int i, j, r; 2875 2876 if (amdgpu_emu_mode == 1) 2877 return 0; 2878 2879 for (j = 0; j < adev->num_ip_blocks; j++) { 2880 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2881 if (!adev->ip_blocks[i].status.late_initialized) 2882 continue; 2883 /* skip PG for GFX, SDMA on S0ix */ 2884 if (adev->in_s0ix && 2885 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 2886 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) 2887 continue; 2888 /* skip CG for VCE/UVD, it's handled specially */ 2889 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2890 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2891 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2892 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2893 adev->ip_blocks[i].version->funcs->set_powergating_state) { 2894 /* enable powergating to save power */ 2895 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, 2896 state); 2897 if (r) { 2898 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", 2899 adev->ip_blocks[i].version->funcs->name, r); 2900 return r; 2901 } 2902 } 2903 } 2904 return 0; 2905 } 2906 2907 static int amdgpu_device_enable_mgpu_fan_boost(void) 2908 { 2909 struct amdgpu_gpu_instance *gpu_ins; 2910 struct amdgpu_device *adev; 2911 int i, ret = 0; 2912 2913 mutex_lock(&mgpu_info.mutex); 2914 2915 /* 2916 * MGPU fan boost feature should be enabled 2917 * only when there are two or more dGPUs in 2918 * the system 2919 */ 2920 if (mgpu_info.num_dgpu < 2) 2921 goto out; 2922 2923 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2924 gpu_ins = &(mgpu_info.gpu_ins[i]); 2925 adev = gpu_ins->adev; 2926 if (!(adev->flags & AMD_IS_APU) && 2927 !gpu_ins->mgpu_fan_enabled) { 2928 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); 2929 if (ret) 2930 break; 2931 2932 gpu_ins->mgpu_fan_enabled = 1; 2933 } 2934 } 2935 2936 out: 2937 mutex_unlock(&mgpu_info.mutex); 2938 2939 return ret; 2940 } 2941 2942 /** 2943 * amdgpu_device_ip_late_init - run late init for hardware IPs 2944 * 2945 * @adev: amdgpu_device pointer 2946 * 2947 * Late initialization pass for hardware IPs. The list of all the hardware 2948 * IPs that make up the asic is walked and the late_init callbacks are run. 2949 * late_init covers any special initialization that an IP requires 2950 * after all of the have been initialized or something that needs to happen 2951 * late in the init process. 2952 * Returns 0 on success, negative error code on failure. 2953 */ 2954 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) 2955 { 2956 struct amdgpu_gpu_instance *gpu_instance; 2957 int i = 0, r; 2958 2959 for (i = 0; i < adev->num_ip_blocks; i++) { 2960 if (!adev->ip_blocks[i].status.hw) 2961 continue; 2962 if (adev->ip_blocks[i].version->funcs->late_init) { 2963 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 2964 if (r) { 2965 DRM_ERROR("late_init of IP block <%s> failed %d\n", 2966 adev->ip_blocks[i].version->funcs->name, r); 2967 return r; 2968 } 2969 } 2970 adev->ip_blocks[i].status.late_initialized = true; 2971 } 2972 2973 r = amdgpu_ras_late_init(adev); 2974 if (r) { 2975 DRM_ERROR("amdgpu_ras_late_init failed %d", r); 2976 return r; 2977 } 2978 2979 amdgpu_ras_set_error_query_ready(adev, true); 2980 2981 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 2982 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); 2983 2984 amdgpu_device_fill_reset_magic(adev); 2985 2986 r = amdgpu_device_enable_mgpu_fan_boost(); 2987 if (r) 2988 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); 2989 2990 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ 2991 if (amdgpu_passthrough(adev) && 2992 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) || 2993 adev->asic_type == CHIP_ALDEBARAN)) 2994 amdgpu_dpm_handle_passthrough_sbr(adev, true); 2995 2996 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2997 mutex_lock(&mgpu_info.mutex); 2998 2999 /* 3000 * Reset device p-state to low as this was booted with high. 3001 * 3002 * This should be performed only after all devices from the same 3003 * hive get initialized. 3004 * 3005 * However, it's unknown how many device in the hive in advance. 3006 * As this is counted one by one during devices initializations. 3007 * 3008 * So, we wait for all XGMI interlinked devices initialized. 3009 * This may bring some delays as those devices may come from 3010 * different hives. But that should be OK. 3011 */ 3012 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { 3013 for (i = 0; i < mgpu_info.num_gpu; i++) { 3014 gpu_instance = &(mgpu_info.gpu_ins[i]); 3015 if (gpu_instance->adev->flags & AMD_IS_APU) 3016 continue; 3017 3018 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 3019 AMDGPU_XGMI_PSTATE_MIN); 3020 if (r) { 3021 DRM_ERROR("pstate setting failed (%d).\n", r); 3022 break; 3023 } 3024 } 3025 } 3026 3027 mutex_unlock(&mgpu_info.mutex); 3028 } 3029 3030 return 0; 3031 } 3032 3033 /** 3034 * amdgpu_device_smu_fini_early - smu hw_fini wrapper 3035 * 3036 * @adev: amdgpu_device pointer 3037 * 3038 * For ASICs need to disable SMC first 3039 */ 3040 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) 3041 { 3042 int i, r; 3043 3044 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) 3045 return; 3046 3047 for (i = 0; i < adev->num_ip_blocks; i++) { 3048 if (!adev->ip_blocks[i].status.hw) 3049 continue; 3050 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 3051 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 3052 /* XXX handle errors */ 3053 if (r) { 3054 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 3055 adev->ip_blocks[i].version->funcs->name, r); 3056 } 3057 adev->ip_blocks[i].status.hw = false; 3058 break; 3059 } 3060 } 3061 } 3062 3063 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) 3064 { 3065 int i, r; 3066 3067 for (i = 0; i < adev->num_ip_blocks; i++) { 3068 if (!adev->ip_blocks[i].version->funcs->early_fini) 3069 continue; 3070 3071 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); 3072 if (r) { 3073 DRM_DEBUG("early_fini of IP block <%s> failed %d\n", 3074 adev->ip_blocks[i].version->funcs->name, r); 3075 } 3076 } 3077 3078 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 3079 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 3080 3081 amdgpu_amdkfd_suspend(adev, false); 3082 3083 /* Workaroud for ASICs need to disable SMC first */ 3084 amdgpu_device_smu_fini_early(adev); 3085 3086 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 3087 if (!adev->ip_blocks[i].status.hw) 3088 continue; 3089 3090 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 3091 /* XXX handle errors */ 3092 if (r) { 3093 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 3094 adev->ip_blocks[i].version->funcs->name, r); 3095 } 3096 3097 adev->ip_blocks[i].status.hw = false; 3098 } 3099 3100 if (amdgpu_sriov_vf(adev)) { 3101 if (amdgpu_virt_release_full_gpu(adev, false)) 3102 DRM_ERROR("failed to release exclusive mode on fini\n"); 3103 } 3104 3105 return 0; 3106 } 3107 3108 /** 3109 * amdgpu_device_ip_fini - run fini for hardware IPs 3110 * 3111 * @adev: amdgpu_device pointer 3112 * 3113 * Main teardown pass for hardware IPs. The list of all the hardware 3114 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks 3115 * are run. hw_fini tears down the hardware associated with each IP 3116 * and sw_fini tears down any software state associated with each IP. 3117 * Returns 0 on success, negative error code on failure. 3118 */ 3119 static int amdgpu_device_ip_fini(struct amdgpu_device *adev) 3120 { 3121 int i, r; 3122 3123 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) 3124 amdgpu_virt_release_ras_err_handler_data(adev); 3125 3126 if (adev->gmc.xgmi.num_physical_nodes > 1) 3127 amdgpu_xgmi_remove_device(adev); 3128 3129 amdgpu_amdkfd_device_fini_sw(adev); 3130 3131 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 3132 if (!adev->ip_blocks[i].status.sw) 3133 continue; 3134 3135 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 3136 amdgpu_ucode_free_bo(adev); 3137 amdgpu_free_static_csa(&adev->virt.csa_obj); 3138 amdgpu_device_wb_fini(adev); 3139 amdgpu_device_mem_scratch_fini(adev); 3140 amdgpu_ib_pool_fini(adev); 3141 } 3142 3143 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 3144 /* XXX handle errors */ 3145 if (r) { 3146 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 3147 adev->ip_blocks[i].version->funcs->name, r); 3148 } 3149 adev->ip_blocks[i].status.sw = false; 3150 adev->ip_blocks[i].status.valid = false; 3151 } 3152 3153 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 3154 if (!adev->ip_blocks[i].status.late_initialized) 3155 continue; 3156 if (adev->ip_blocks[i].version->funcs->late_fini) 3157 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); 3158 adev->ip_blocks[i].status.late_initialized = false; 3159 } 3160 3161 amdgpu_ras_fini(adev); 3162 3163 return 0; 3164 } 3165 3166 /** 3167 * amdgpu_device_delayed_init_work_handler - work handler for IB tests 3168 * 3169 * @work: work_struct. 3170 */ 3171 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) 3172 { 3173 struct amdgpu_device *adev = 3174 container_of(work, struct amdgpu_device, delayed_init_work.work); 3175 int r; 3176 3177 r = amdgpu_ib_ring_tests(adev); 3178 if (r) 3179 DRM_ERROR("ib ring test failed (%d).\n", r); 3180 } 3181 3182 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) 3183 { 3184 struct amdgpu_device *adev = 3185 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); 3186 3187 WARN_ON_ONCE(adev->gfx.gfx_off_state); 3188 WARN_ON_ONCE(adev->gfx.gfx_off_req_count); 3189 3190 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) 3191 adev->gfx.gfx_off_state = true; 3192 } 3193 3194 /** 3195 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) 3196 * 3197 * @adev: amdgpu_device pointer 3198 * 3199 * Main suspend function for hardware IPs. The list of all the hardware 3200 * IPs that make up the asic is walked, clockgating is disabled and the 3201 * suspend callbacks are run. suspend puts the hardware and software state 3202 * in each IP into a state suitable for suspend. 3203 * Returns 0 on success, negative error code on failure. 3204 */ 3205 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) 3206 { 3207 int i, r; 3208 3209 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 3210 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 3211 3212 /* 3213 * Per PMFW team's suggestion, driver needs to handle gfxoff 3214 * and df cstate features disablement for gpu reset(e.g. Mode1Reset) 3215 * scenario. Add the missing df cstate disablement here. 3216 */ 3217 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) 3218 dev_warn(adev->dev, "Failed to disallow df cstate"); 3219 3220 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 3221 if (!adev->ip_blocks[i].status.valid) 3222 continue; 3223 3224 /* displays are handled separately */ 3225 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) 3226 continue; 3227 3228 /* XXX handle errors */ 3229 r = adev->ip_blocks[i].version->funcs->suspend(adev); 3230 /* XXX handle errors */ 3231 if (r) { 3232 DRM_ERROR("suspend of IP block <%s> failed %d\n", 3233 adev->ip_blocks[i].version->funcs->name, r); 3234 return r; 3235 } 3236 3237 adev->ip_blocks[i].status.hw = false; 3238 } 3239 3240 return 0; 3241 } 3242 3243 /** 3244 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) 3245 * 3246 * @adev: amdgpu_device pointer 3247 * 3248 * Main suspend function for hardware IPs. The list of all the hardware 3249 * IPs that make up the asic is walked, clockgating is disabled and the 3250 * suspend callbacks are run. suspend puts the hardware and software state 3251 * in each IP into a state suitable for suspend. 3252 * Returns 0 on success, negative error code on failure. 3253 */ 3254 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) 3255 { 3256 int i, r; 3257 3258 if (adev->in_s0ix) 3259 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry); 3260 3261 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 3262 if (!adev->ip_blocks[i].status.valid) 3263 continue; 3264 /* displays are handled in phase1 */ 3265 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) 3266 continue; 3267 /* PSP lost connection when err_event_athub occurs */ 3268 if (amdgpu_ras_intr_triggered() && 3269 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 3270 adev->ip_blocks[i].status.hw = false; 3271 continue; 3272 } 3273 3274 /* skip unnecessary suspend if we do not initialize them yet */ 3275 if (adev->gmc.xgmi.pending_reset && 3276 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3277 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || 3278 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3279 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { 3280 adev->ip_blocks[i].status.hw = false; 3281 continue; 3282 } 3283 3284 /* skip suspend of gfx/mes and psp for S0ix 3285 * gfx is in gfxoff state, so on resume it will exit gfxoff just 3286 * like at runtime. PSP is also part of the always on hardware 3287 * so no need to suspend it. 3288 */ 3289 if (adev->in_s0ix && 3290 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || 3291 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 3292 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) 3293 continue; 3294 3295 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ 3296 if (adev->in_s0ix && 3297 (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= 3298 IP_VERSION(5, 0, 0)) && 3299 (adev->ip_blocks[i].version->type == 3300 AMD_IP_BLOCK_TYPE_SDMA)) 3301 continue; 3302 3303 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot. 3304 * These are in TMR, hence are expected to be reused by PSP-TOS to reload 3305 * from this location and RLC Autoload automatically also gets loaded 3306 * from here based on PMFW -> PSP message during re-init sequence. 3307 * Therefore, the psp suspend & resume should be skipped to avoid destroy 3308 * the TMR and reload FWs again for IMU enabled APU ASICs. 3309 */ 3310 if (amdgpu_in_reset(adev) && 3311 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs && 3312 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 3313 continue; 3314 3315 /* XXX handle errors */ 3316 r = adev->ip_blocks[i].version->funcs->suspend(adev); 3317 /* XXX handle errors */ 3318 if (r) { 3319 DRM_ERROR("suspend of IP block <%s> failed %d\n", 3320 adev->ip_blocks[i].version->funcs->name, r); 3321 } 3322 adev->ip_blocks[i].status.hw = false; 3323 /* handle putting the SMC in the appropriate state */ 3324 if (!amdgpu_sriov_vf(adev)) { 3325 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 3326 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); 3327 if (r) { 3328 DRM_ERROR("SMC failed to set mp1 state %d, %d\n", 3329 adev->mp1_state, r); 3330 return r; 3331 } 3332 } 3333 } 3334 } 3335 3336 return 0; 3337 } 3338 3339 /** 3340 * amdgpu_device_ip_suspend - run suspend for hardware IPs 3341 * 3342 * @adev: amdgpu_device pointer 3343 * 3344 * Main suspend function for hardware IPs. The list of all the hardware 3345 * IPs that make up the asic is walked, clockgating is disabled and the 3346 * suspend callbacks are run. suspend puts the hardware and software state 3347 * in each IP into a state suitable for suspend. 3348 * Returns 0 on success, negative error code on failure. 3349 */ 3350 int amdgpu_device_ip_suspend(struct amdgpu_device *adev) 3351 { 3352 int r; 3353 3354 if (amdgpu_sriov_vf(adev)) { 3355 amdgpu_virt_fini_data_exchange(adev); 3356 amdgpu_virt_request_full_gpu(adev, false); 3357 } 3358 3359 amdgpu_ttm_set_buffer_funcs_status(adev, false); 3360 3361 r = amdgpu_device_ip_suspend_phase1(adev); 3362 if (r) 3363 return r; 3364 r = amdgpu_device_ip_suspend_phase2(adev); 3365 3366 if (amdgpu_sriov_vf(adev)) 3367 amdgpu_virt_release_full_gpu(adev, false); 3368 3369 return r; 3370 } 3371 3372 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) 3373 { 3374 int i, r; 3375 3376 static enum amd_ip_block_type ip_order[] = { 3377 AMD_IP_BLOCK_TYPE_COMMON, 3378 AMD_IP_BLOCK_TYPE_GMC, 3379 AMD_IP_BLOCK_TYPE_PSP, 3380 AMD_IP_BLOCK_TYPE_IH, 3381 }; 3382 3383 for (i = 0; i < adev->num_ip_blocks; i++) { 3384 int j; 3385 struct amdgpu_ip_block *block; 3386 3387 block = &adev->ip_blocks[i]; 3388 block->status.hw = false; 3389 3390 for (j = 0; j < ARRAY_SIZE(ip_order); j++) { 3391 3392 if (block->version->type != ip_order[j] || 3393 !block->status.valid) 3394 continue; 3395 3396 r = block->version->funcs->hw_init(adev); 3397 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 3398 if (r) 3399 return r; 3400 block->status.hw = true; 3401 } 3402 } 3403 3404 return 0; 3405 } 3406 3407 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) 3408 { 3409 int i, r; 3410 3411 static enum amd_ip_block_type ip_order[] = { 3412 AMD_IP_BLOCK_TYPE_SMC, 3413 AMD_IP_BLOCK_TYPE_DCE, 3414 AMD_IP_BLOCK_TYPE_GFX, 3415 AMD_IP_BLOCK_TYPE_SDMA, 3416 AMD_IP_BLOCK_TYPE_MES, 3417 AMD_IP_BLOCK_TYPE_UVD, 3418 AMD_IP_BLOCK_TYPE_VCE, 3419 AMD_IP_BLOCK_TYPE_VCN, 3420 AMD_IP_BLOCK_TYPE_JPEG 3421 }; 3422 3423 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 3424 int j; 3425 struct amdgpu_ip_block *block; 3426 3427 for (j = 0; j < adev->num_ip_blocks; j++) { 3428 block = &adev->ip_blocks[j]; 3429 3430 if (block->version->type != ip_order[i] || 3431 !block->status.valid || 3432 block->status.hw) 3433 continue; 3434 3435 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) 3436 r = block->version->funcs->resume(adev); 3437 else 3438 r = block->version->funcs->hw_init(adev); 3439 3440 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 3441 if (r) 3442 return r; 3443 block->status.hw = true; 3444 } 3445 } 3446 3447 return 0; 3448 } 3449 3450 /** 3451 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs 3452 * 3453 * @adev: amdgpu_device pointer 3454 * 3455 * First resume function for hardware IPs. The list of all the hardware 3456 * IPs that make up the asic is walked and the resume callbacks are run for 3457 * COMMON, GMC, and IH. resume puts the hardware into a functional state 3458 * after a suspend and updates the software state as necessary. This 3459 * function is also used for restoring the GPU after a GPU reset. 3460 * Returns 0 on success, negative error code on failure. 3461 */ 3462 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) 3463 { 3464 int i, r; 3465 3466 for (i = 0; i < adev->num_ip_blocks; i++) { 3467 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3468 continue; 3469 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3470 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3471 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3472 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { 3473 3474 r = adev->ip_blocks[i].version->funcs->resume(adev); 3475 if (r) { 3476 DRM_ERROR("resume of IP block <%s> failed %d\n", 3477 adev->ip_blocks[i].version->funcs->name, r); 3478 return r; 3479 } 3480 adev->ip_blocks[i].status.hw = true; 3481 } 3482 } 3483 3484 return 0; 3485 } 3486 3487 /** 3488 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs 3489 * 3490 * @adev: amdgpu_device pointer 3491 * 3492 * First resume function for hardware IPs. The list of all the hardware 3493 * IPs that make up the asic is walked and the resume callbacks are run for 3494 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a 3495 * functional state after a suspend and updates the software state as 3496 * necessary. This function is also used for restoring the GPU after a GPU 3497 * reset. 3498 * Returns 0 on success, negative error code on failure. 3499 */ 3500 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) 3501 { 3502 int i, r; 3503 3504 for (i = 0; i < adev->num_ip_blocks; i++) { 3505 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3506 continue; 3507 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3508 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3509 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3510 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 3511 continue; 3512 r = adev->ip_blocks[i].version->funcs->resume(adev); 3513 if (r) { 3514 DRM_ERROR("resume of IP block <%s> failed %d\n", 3515 adev->ip_blocks[i].version->funcs->name, r); 3516 return r; 3517 } 3518 adev->ip_blocks[i].status.hw = true; 3519 } 3520 3521 return 0; 3522 } 3523 3524 /** 3525 * amdgpu_device_ip_resume - run resume for hardware IPs 3526 * 3527 * @adev: amdgpu_device pointer 3528 * 3529 * Main resume function for hardware IPs. The hardware IPs 3530 * are split into two resume functions because they are 3531 * also used in recovering from a GPU reset and some additional 3532 * steps need to be take between them. In this case (S3/S4) they are 3533 * run sequentially. 3534 * Returns 0 on success, negative error code on failure. 3535 */ 3536 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) 3537 { 3538 int r; 3539 3540 r = amdgpu_device_ip_resume_phase1(adev); 3541 if (r) 3542 return r; 3543 3544 r = amdgpu_device_fw_loading(adev); 3545 if (r) 3546 return r; 3547 3548 r = amdgpu_device_ip_resume_phase2(adev); 3549 3550 if (adev->mman.buffer_funcs_ring->sched.ready) 3551 amdgpu_ttm_set_buffer_funcs_status(adev, true); 3552 3553 return r; 3554 } 3555 3556 /** 3557 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV 3558 * 3559 * @adev: amdgpu_device pointer 3560 * 3561 * Query the VBIOS data tables to determine if the board supports SR-IOV. 3562 */ 3563 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 3564 { 3565 if (amdgpu_sriov_vf(adev)) { 3566 if (adev->is_atom_fw) { 3567 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev)) 3568 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3569 } else { 3570 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 3571 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3572 } 3573 3574 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) 3575 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 3576 } 3577 } 3578 3579 /** 3580 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic 3581 * 3582 * @asic_type: AMD asic type 3583 * 3584 * Check if there is DC (new modesetting infrastructre) support for an asic. 3585 * returns true if DC has support, false if not. 3586 */ 3587 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) 3588 { 3589 switch (asic_type) { 3590 #ifdef CONFIG_DRM_AMDGPU_SI 3591 case CHIP_HAINAN: 3592 #endif 3593 case CHIP_TOPAZ: 3594 /* chips with no display hardware */ 3595 return false; 3596 #if defined(CONFIG_DRM_AMD_DC) 3597 case CHIP_TAHITI: 3598 case CHIP_PITCAIRN: 3599 case CHIP_VERDE: 3600 case CHIP_OLAND: 3601 /* 3602 * We have systems in the wild with these ASICs that require 3603 * LVDS and VGA support which is not supported with DC. 3604 * 3605 * Fallback to the non-DC driver here by default so as not to 3606 * cause regressions. 3607 */ 3608 #if defined(CONFIG_DRM_AMD_DC_SI) 3609 return amdgpu_dc > 0; 3610 #else 3611 return false; 3612 #endif 3613 case CHIP_BONAIRE: 3614 case CHIP_KAVERI: 3615 case CHIP_KABINI: 3616 case CHIP_MULLINS: 3617 /* 3618 * We have systems in the wild with these ASICs that require 3619 * VGA support which is not supported with DC. 3620 * 3621 * Fallback to the non-DC driver here by default so as not to 3622 * cause regressions. 3623 */ 3624 return amdgpu_dc > 0; 3625 default: 3626 return amdgpu_dc != 0; 3627 #else 3628 default: 3629 if (amdgpu_dc > 0) 3630 DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); 3631 return false; 3632 #endif 3633 } 3634 } 3635 3636 /** 3637 * amdgpu_device_has_dc_support - check if dc is supported 3638 * 3639 * @adev: amdgpu_device pointer 3640 * 3641 * Returns true for supported, false for not supported 3642 */ 3643 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 3644 { 3645 if (adev->enable_virtual_display || 3646 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 3647 return false; 3648 3649 return amdgpu_device_asic_has_dc_support(adev->asic_type); 3650 } 3651 3652 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) 3653 { 3654 struct amdgpu_device *adev = 3655 container_of(__work, struct amdgpu_device, xgmi_reset_work); 3656 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 3657 3658 /* It's a bug to not have a hive within this function */ 3659 if (WARN_ON(!hive)) 3660 return; 3661 3662 /* 3663 * Use task barrier to synchronize all xgmi reset works across the 3664 * hive. task_barrier_enter and task_barrier_exit will block 3665 * until all the threads running the xgmi reset works reach 3666 * those points. task_barrier_full will do both blocks. 3667 */ 3668 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 3669 3670 task_barrier_enter(&hive->tb); 3671 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); 3672 3673 if (adev->asic_reset_res) 3674 goto fail; 3675 3676 task_barrier_exit(&hive->tb); 3677 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); 3678 3679 if (adev->asic_reset_res) 3680 goto fail; 3681 3682 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB); 3683 } else { 3684 3685 task_barrier_full(&hive->tb); 3686 adev->asic_reset_res = amdgpu_asic_reset(adev); 3687 } 3688 3689 fail: 3690 if (adev->asic_reset_res) 3691 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", 3692 adev->asic_reset_res, adev_to_drm(adev)->unique); 3693 amdgpu_put_xgmi_hive(hive); 3694 } 3695 3696 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) 3697 { 3698 char *input = amdgpu_lockup_timeout; 3699 char *timeout_setting = NULL; 3700 int index = 0; 3701 long timeout; 3702 int ret = 0; 3703 3704 /* 3705 * By default timeout for non compute jobs is 10000 3706 * and 60000 for compute jobs. 3707 * In SR-IOV or passthrough mode, timeout for compute 3708 * jobs are 60000 by default. 3709 */ 3710 adev->gfx_timeout = msecs_to_jiffies(10000); 3711 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 3712 if (amdgpu_sriov_vf(adev)) 3713 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? 3714 msecs_to_jiffies(60000) : msecs_to_jiffies(10000); 3715 else 3716 adev->compute_timeout = msecs_to_jiffies(60000); 3717 3718 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3719 while ((timeout_setting = strsep(&input, ",")) && 3720 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3721 ret = kstrtol(timeout_setting, 0, &timeout); 3722 if (ret) 3723 return ret; 3724 3725 if (timeout == 0) { 3726 index++; 3727 continue; 3728 } else if (timeout < 0) { 3729 timeout = MAX_SCHEDULE_TIMEOUT; 3730 dev_warn(adev->dev, "lockup timeout disabled"); 3731 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); 3732 } else { 3733 timeout = msecs_to_jiffies(timeout); 3734 } 3735 3736 switch (index++) { 3737 case 0: 3738 adev->gfx_timeout = timeout; 3739 break; 3740 case 1: 3741 adev->compute_timeout = timeout; 3742 break; 3743 case 2: 3744 adev->sdma_timeout = timeout; 3745 break; 3746 case 3: 3747 adev->video_timeout = timeout; 3748 break; 3749 default: 3750 break; 3751 } 3752 } 3753 /* 3754 * There is only one value specified and 3755 * it should apply to all non-compute jobs. 3756 */ 3757 if (index == 1) { 3758 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 3759 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) 3760 adev->compute_timeout = adev->gfx_timeout; 3761 } 3762 } 3763 3764 return ret; 3765 } 3766 3767 /** 3768 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU 3769 * 3770 * @adev: amdgpu_device pointer 3771 * 3772 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode 3773 */ 3774 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) 3775 { 3776 struct iommu_domain *domain; 3777 3778 domain = iommu_get_domain_for_dev(adev->dev); 3779 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY) 3780 adev->ram_is_direct_mapped = true; 3781 } 3782 3783 static const struct attribute *amdgpu_dev_attributes[] = { 3784 &dev_attr_pcie_replay_count.attr, 3785 NULL 3786 }; 3787 3788 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) 3789 { 3790 if (amdgpu_mcbp == 1) 3791 adev->gfx.mcbp = true; 3792 else if (amdgpu_mcbp == 0) 3793 adev->gfx.mcbp = false; 3794 else if ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 0, 0)) && 3795 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) && 3796 adev->gfx.num_gfx_rings) 3797 adev->gfx.mcbp = true; 3798 3799 if (amdgpu_sriov_vf(adev)) 3800 adev->gfx.mcbp = true; 3801 3802 if (adev->gfx.mcbp) 3803 DRM_INFO("MCBP is enabled\n"); 3804 } 3805 3806 /** 3807 * amdgpu_device_init - initialize the driver 3808 * 3809 * @adev: amdgpu_device pointer 3810 * @flags: driver flags 3811 * 3812 * Initializes the driver info and hw (all asics). 3813 * Returns 0 for success or an error on failure. 3814 * Called at driver startup. 3815 */ 3816 int amdgpu_device_init(struct amdgpu_device *adev, 3817 uint32_t flags) 3818 { 3819 struct drm_device *ddev = adev_to_drm(adev); 3820 struct pci_dev *pdev = adev->pdev; 3821 int r, i; 3822 bool px = false; 3823 u32 max_MBps; 3824 int tmp; 3825 3826 adev->shutdown = false; 3827 adev->flags = flags; 3828 3829 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) 3830 adev->asic_type = amdgpu_force_asic_type; 3831 else 3832 adev->asic_type = flags & AMD_ASIC_MASK; 3833 3834 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 3835 if (amdgpu_emu_mode == 1) 3836 adev->usec_timeout *= 10; 3837 adev->gmc.gart_size = 512 * 1024 * 1024; 3838 adev->accel_working = false; 3839 adev->num_rings = 0; 3840 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub()); 3841 adev->mman.buffer_funcs = NULL; 3842 adev->mman.buffer_funcs_ring = NULL; 3843 adev->vm_manager.vm_pte_funcs = NULL; 3844 adev->vm_manager.vm_pte_num_scheds = 0; 3845 adev->gmc.gmc_funcs = NULL; 3846 adev->harvest_ip_mask = 0x0; 3847 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 3848 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 3849 3850 adev->smc_rreg = &amdgpu_invalid_rreg; 3851 adev->smc_wreg = &amdgpu_invalid_wreg; 3852 adev->pcie_rreg = &amdgpu_invalid_rreg; 3853 adev->pcie_wreg = &amdgpu_invalid_wreg; 3854 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext; 3855 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext; 3856 adev->pciep_rreg = &amdgpu_invalid_rreg; 3857 adev->pciep_wreg = &amdgpu_invalid_wreg; 3858 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; 3859 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; 3860 adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext; 3861 adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext; 3862 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; 3863 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 3864 adev->didt_rreg = &amdgpu_invalid_rreg; 3865 adev->didt_wreg = &amdgpu_invalid_wreg; 3866 adev->gc_cac_rreg = &amdgpu_invalid_rreg; 3867 adev->gc_cac_wreg = &amdgpu_invalid_wreg; 3868 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 3869 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 3870 3871 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 3872 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 3873 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 3874 3875 /* mutex initialization are all done here so we 3876 * can recall function without having locking issues 3877 */ 3878 mutex_init(&adev->firmware.mutex); 3879 mutex_init(&adev->pm.mutex); 3880 mutex_init(&adev->gfx.gpu_clock_mutex); 3881 mutex_init(&adev->srbm_mutex); 3882 mutex_init(&adev->gfx.pipe_reserve_mutex); 3883 mutex_init(&adev->gfx.gfx_off_mutex); 3884 mutex_init(&adev->gfx.partition_mutex); 3885 mutex_init(&adev->grbm_idx_mutex); 3886 mutex_init(&adev->mn_lock); 3887 mutex_init(&adev->virt.vf_errors.lock); 3888 hash_init(adev->mn_hash); 3889 mutex_init(&adev->psp.mutex); 3890 mutex_init(&adev->notifier_lock); 3891 mutex_init(&adev->pm.stable_pstate_ctx_lock); 3892 mutex_init(&adev->benchmark_mutex); 3893 3894 amdgpu_device_init_apu_flags(adev); 3895 3896 r = amdgpu_device_check_arguments(adev); 3897 if (r) 3898 return r; 3899 3900 spin_lock_init(&adev->mmio_idx_lock); 3901 spin_lock_init(&adev->smc_idx_lock); 3902 spin_lock_init(&adev->pcie_idx_lock); 3903 spin_lock_init(&adev->uvd_ctx_idx_lock); 3904 spin_lock_init(&adev->didt_idx_lock); 3905 spin_lock_init(&adev->gc_cac_idx_lock); 3906 spin_lock_init(&adev->se_cac_idx_lock); 3907 spin_lock_init(&adev->audio_endpt_idx_lock); 3908 spin_lock_init(&adev->mm_stats.lock); 3909 3910 INIT_LIST_HEAD(&adev->shadow_list); 3911 mutex_init(&adev->shadow_list_lock); 3912 3913 INIT_LIST_HEAD(&adev->reset_list); 3914 3915 INIT_LIST_HEAD(&adev->ras_list); 3916 3917 INIT_LIST_HEAD(&adev->pm.od_kobj_list); 3918 3919 INIT_DELAYED_WORK(&adev->delayed_init_work, 3920 amdgpu_device_delayed_init_work_handler); 3921 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, 3922 amdgpu_device_delay_enable_gfx_off); 3923 3924 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); 3925 3926 adev->gfx.gfx_off_req_count = 1; 3927 adev->gfx.gfx_off_residency = 0; 3928 adev->gfx.gfx_off_entrycount = 0; 3929 adev->pm.ac_power = power_supply_is_system_supplied() > 0; 3930 3931 atomic_set(&adev->throttling_logging_enabled, 1); 3932 /* 3933 * If throttling continues, logging will be performed every minute 3934 * to avoid log flooding. "-1" is subtracted since the thermal 3935 * throttling interrupt comes every second. Thus, the total logging 3936 * interval is 59 seconds(retelimited printk interval) + 1(waiting 3937 * for throttling interrupt) = 60 seconds. 3938 */ 3939 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); 3940 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); 3941 3942 /* Registers mapping */ 3943 /* TODO: block userspace mapping of io register */ 3944 if (adev->asic_type >= CHIP_BONAIRE) { 3945 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 3946 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 3947 } else { 3948 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 3949 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 3950 } 3951 3952 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) 3953 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); 3954 3955 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 3956 if (!adev->rmmio) 3957 return -ENOMEM; 3958 3959 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 3960 DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size); 3961 3962 /* 3963 * Reset domain needs to be present early, before XGMI hive discovered 3964 * (if any) and intitialized to use reset sem and in_gpu reset flag 3965 * early on during init and before calling to RREG32. 3966 */ 3967 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); 3968 if (!adev->reset_domain) 3969 return -ENOMEM; 3970 3971 /* detect hw virtualization here */ 3972 amdgpu_detect_virtualization(adev); 3973 3974 amdgpu_device_get_pcie_info(adev); 3975 3976 r = amdgpu_device_get_job_timeout_settings(adev); 3977 if (r) { 3978 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); 3979 return r; 3980 } 3981 3982 /* early init functions */ 3983 r = amdgpu_device_ip_early_init(adev); 3984 if (r) 3985 return r; 3986 3987 amdgpu_device_set_mcbp(adev); 3988 3989 /* Get rid of things like offb */ 3990 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); 3991 if (r) 3992 return r; 3993 3994 /* Enable TMZ based on IP_VERSION */ 3995 amdgpu_gmc_tmz_set(adev); 3996 3997 amdgpu_gmc_noretry_set(adev); 3998 /* Need to get xgmi info early to decide the reset behavior*/ 3999 if (adev->gmc.xgmi.supported) { 4000 r = adev->gfxhub.funcs->get_xgmi_info(adev); 4001 if (r) 4002 return r; 4003 } 4004 4005 /* enable PCIE atomic ops */ 4006 if (amdgpu_sriov_vf(adev)) { 4007 if (adev->virt.fw_reserve.p_pf2vf) 4008 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) 4009 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == 4010 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); 4011 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a 4012 * internal path natively support atomics, set have_atomics_support to true. 4013 */ 4014 } else if ((adev->flags & AMD_IS_APU) && 4015 (amdgpu_ip_version(adev, GC_HWIP, 0) > 4016 IP_VERSION(9, 0, 0))) { 4017 adev->have_atomics_support = true; 4018 } else { 4019 adev->have_atomics_support = 4020 !pci_enable_atomic_ops_to_root(adev->pdev, 4021 PCI_EXP_DEVCAP2_ATOMIC_COMP32 | 4022 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 4023 } 4024 4025 if (!adev->have_atomics_support) 4026 dev_info(adev->dev, "PCIE atomic ops is not supported\n"); 4027 4028 /* doorbell bar mapping and doorbell index init*/ 4029 amdgpu_doorbell_init(adev); 4030 4031 if (amdgpu_emu_mode == 1) { 4032 /* post the asic on emulation mode */ 4033 emu_soc_asic_init(adev); 4034 goto fence_driver_init; 4035 } 4036 4037 amdgpu_reset_init(adev); 4038 4039 /* detect if we are with an SRIOV vbios */ 4040 if (adev->bios) 4041 amdgpu_device_detect_sriov_bios(adev); 4042 4043 /* check if we need to reset the asic 4044 * E.g., driver was not cleanly unloaded previously, etc. 4045 */ 4046 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { 4047 if (adev->gmc.xgmi.num_physical_nodes) { 4048 dev_info(adev->dev, "Pending hive reset.\n"); 4049 adev->gmc.xgmi.pending_reset = true; 4050 /* Only need to init necessary block for SMU to handle the reset */ 4051 for (i = 0; i < adev->num_ip_blocks; i++) { 4052 if (!adev->ip_blocks[i].status.valid) 4053 continue; 4054 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 4055 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 4056 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 4057 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { 4058 DRM_DEBUG("IP %s disabled for hw_init.\n", 4059 adev->ip_blocks[i].version->funcs->name); 4060 adev->ip_blocks[i].status.hw = true; 4061 } 4062 } 4063 } else { 4064 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 4065 case IP_VERSION(13, 0, 0): 4066 case IP_VERSION(13, 0, 7): 4067 case IP_VERSION(13, 0, 10): 4068 r = psp_gpu_reset(adev); 4069 break; 4070 default: 4071 tmp = amdgpu_reset_method; 4072 /* It should do a default reset when loading or reloading the driver, 4073 * regardless of the module parameter reset_method. 4074 */ 4075 amdgpu_reset_method = AMD_RESET_METHOD_NONE; 4076 r = amdgpu_asic_reset(adev); 4077 amdgpu_reset_method = tmp; 4078 break; 4079 } 4080 4081 if (r) { 4082 dev_err(adev->dev, "asic reset on init failed\n"); 4083 goto failed; 4084 } 4085 } 4086 } 4087 4088 /* Post card if necessary */ 4089 if (amdgpu_device_need_post(adev)) { 4090 if (!adev->bios) { 4091 dev_err(adev->dev, "no vBIOS found\n"); 4092 r = -EINVAL; 4093 goto failed; 4094 } 4095 DRM_INFO("GPU posting now...\n"); 4096 r = amdgpu_device_asic_init(adev); 4097 if (r) { 4098 dev_err(adev->dev, "gpu post error!\n"); 4099 goto failed; 4100 } 4101 } 4102 4103 if (adev->bios) { 4104 if (adev->is_atom_fw) { 4105 /* Initialize clocks */ 4106 r = amdgpu_atomfirmware_get_clock_info(adev); 4107 if (r) { 4108 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 4109 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 4110 goto failed; 4111 } 4112 } else { 4113 /* Initialize clocks */ 4114 r = amdgpu_atombios_get_clock_info(adev); 4115 if (r) { 4116 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 4117 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 4118 goto failed; 4119 } 4120 /* init i2c buses */ 4121 if (!amdgpu_device_has_dc_support(adev)) 4122 amdgpu_atombios_i2c_init(adev); 4123 } 4124 } 4125 4126 fence_driver_init: 4127 /* Fence driver */ 4128 r = amdgpu_fence_driver_sw_init(adev); 4129 if (r) { 4130 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); 4131 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 4132 goto failed; 4133 } 4134 4135 /* init the mode config */ 4136 drm_mode_config_init(adev_to_drm(adev)); 4137 4138 r = amdgpu_device_ip_init(adev); 4139 if (r) { 4140 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); 4141 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 4142 goto release_ras_con; 4143 } 4144 4145 amdgpu_fence_driver_hw_init(adev); 4146 4147 dev_info(adev->dev, 4148 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", 4149 adev->gfx.config.max_shader_engines, 4150 adev->gfx.config.max_sh_per_se, 4151 adev->gfx.config.max_cu_per_sh, 4152 adev->gfx.cu_info.number); 4153 4154 adev->accel_working = true; 4155 4156 amdgpu_vm_check_compute_bug(adev); 4157 4158 /* Initialize the buffer migration limit. */ 4159 if (amdgpu_moverate >= 0) 4160 max_MBps = amdgpu_moverate; 4161 else 4162 max_MBps = 8; /* Allow 8 MB/s. */ 4163 /* Get a log2 for easy divisions. */ 4164 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 4165 4166 /* 4167 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. 4168 * Otherwise the mgpu fan boost feature will be skipped due to the 4169 * gpu instance is counted less. 4170 */ 4171 amdgpu_register_gpu_instance(adev); 4172 4173 /* enable clockgating, etc. after ib tests, etc. since some blocks require 4174 * explicit gating rather than handling it automatically. 4175 */ 4176 if (!adev->gmc.xgmi.pending_reset) { 4177 r = amdgpu_device_ip_late_init(adev); 4178 if (r) { 4179 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); 4180 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 4181 goto release_ras_con; 4182 } 4183 /* must succeed. */ 4184 amdgpu_ras_resume(adev); 4185 queue_delayed_work(system_wq, &adev->delayed_init_work, 4186 msecs_to_jiffies(AMDGPU_RESUME_MS)); 4187 } 4188 4189 if (amdgpu_sriov_vf(adev)) { 4190 amdgpu_virt_release_full_gpu(adev, true); 4191 flush_delayed_work(&adev->delayed_init_work); 4192 } 4193 4194 /* 4195 * Place those sysfs registering after `late_init`. As some of those 4196 * operations performed in `late_init` might affect the sysfs 4197 * interfaces creating. 4198 */ 4199 r = amdgpu_atombios_sysfs_init(adev); 4200 if (r) 4201 drm_err(&adev->ddev, 4202 "registering atombios sysfs failed (%d).\n", r); 4203 4204 r = amdgpu_pm_sysfs_init(adev); 4205 if (r) 4206 DRM_ERROR("registering pm sysfs failed (%d).\n", r); 4207 4208 r = amdgpu_ucode_sysfs_init(adev); 4209 if (r) { 4210 adev->ucode_sysfs_en = false; 4211 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); 4212 } else 4213 adev->ucode_sysfs_en = true; 4214 4215 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); 4216 if (r) 4217 dev_err(adev->dev, "Could not create amdgpu device attr\n"); 4218 4219 r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); 4220 if (r) 4221 dev_err(adev->dev, 4222 "Could not create amdgpu board attributes\n"); 4223 4224 amdgpu_fru_sysfs_init(adev); 4225 4226 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 4227 r = amdgpu_pmu_init(adev); 4228 if (r) 4229 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); 4230 4231 /* Have stored pci confspace at hand for restore in sudden PCI error */ 4232 if (amdgpu_device_cache_pci_state(adev->pdev)) 4233 pci_restore_state(pdev); 4234 4235 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 4236 /* this will fail for cards that aren't VGA class devices, just 4237 * ignore it 4238 */ 4239 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 4240 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); 4241 4242 px = amdgpu_device_supports_px(ddev); 4243 4244 if (px || (!dev_is_removable(&adev->pdev->dev) && 4245 apple_gmux_detect(NULL, NULL))) 4246 vga_switcheroo_register_client(adev->pdev, 4247 &amdgpu_switcheroo_ops, px); 4248 4249 if (px) 4250 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 4251 4252 if (adev->gmc.xgmi.pending_reset) 4253 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, 4254 msecs_to_jiffies(AMDGPU_RESUME_MS)); 4255 4256 amdgpu_device_check_iommu_direct_map(adev); 4257 4258 return 0; 4259 4260 release_ras_con: 4261 if (amdgpu_sriov_vf(adev)) 4262 amdgpu_virt_release_full_gpu(adev, true); 4263 4264 /* failed in exclusive mode due to timeout */ 4265 if (amdgpu_sriov_vf(adev) && 4266 !amdgpu_sriov_runtime(adev) && 4267 amdgpu_virt_mmio_blocked(adev) && 4268 !amdgpu_virt_wait_reset(adev)) { 4269 dev_err(adev->dev, "VF exclusive mode timeout\n"); 4270 /* Don't send request since VF is inactive. */ 4271 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 4272 adev->virt.ops = NULL; 4273 r = -EAGAIN; 4274 } 4275 amdgpu_release_ras_context(adev); 4276 4277 failed: 4278 amdgpu_vf_error_trans_all(adev); 4279 4280 return r; 4281 } 4282 4283 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) 4284 { 4285 4286 /* Clear all CPU mappings pointing to this device */ 4287 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); 4288 4289 /* Unmap all mapped bars - Doorbell, registers and VRAM */ 4290 amdgpu_doorbell_fini(adev); 4291 4292 iounmap(adev->rmmio); 4293 adev->rmmio = NULL; 4294 if (adev->mman.aper_base_kaddr) 4295 iounmap(adev->mman.aper_base_kaddr); 4296 adev->mman.aper_base_kaddr = NULL; 4297 4298 /* Memory manager related */ 4299 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { 4300 arch_phys_wc_del(adev->gmc.vram_mtrr); 4301 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); 4302 } 4303 } 4304 4305 /** 4306 * amdgpu_device_fini_hw - tear down the driver 4307 * 4308 * @adev: amdgpu_device pointer 4309 * 4310 * Tear down the driver info (all asics). 4311 * Called at driver shutdown. 4312 */ 4313 void amdgpu_device_fini_hw(struct amdgpu_device *adev) 4314 { 4315 dev_info(adev->dev, "amdgpu: finishing device.\n"); 4316 flush_delayed_work(&adev->delayed_init_work); 4317 adev->shutdown = true; 4318 4319 /* make sure IB test finished before entering exclusive mode 4320 * to avoid preemption on IB test 4321 */ 4322 if (amdgpu_sriov_vf(adev)) { 4323 amdgpu_virt_request_full_gpu(adev, false); 4324 amdgpu_virt_fini_data_exchange(adev); 4325 } 4326 4327 /* disable all interrupts */ 4328 amdgpu_irq_disable_all(adev); 4329 if (adev->mode_info.mode_config_initialized) { 4330 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) 4331 drm_helper_force_disable_all(adev_to_drm(adev)); 4332 else 4333 drm_atomic_helper_shutdown(adev_to_drm(adev)); 4334 } 4335 amdgpu_fence_driver_hw_fini(adev); 4336 4337 if (adev->mman.initialized) 4338 drain_workqueue(adev->mman.bdev.wq); 4339 4340 if (adev->pm.sysfs_initialized) 4341 amdgpu_pm_sysfs_fini(adev); 4342 if (adev->ucode_sysfs_en) 4343 amdgpu_ucode_sysfs_fini(adev); 4344 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); 4345 amdgpu_fru_sysfs_fini(adev); 4346 4347 /* disable ras feature must before hw fini */ 4348 amdgpu_ras_pre_fini(adev); 4349 4350 amdgpu_ttm_set_buffer_funcs_status(adev, false); 4351 4352 amdgpu_device_ip_fini_early(adev); 4353 4354 amdgpu_irq_fini_hw(adev); 4355 4356 if (adev->mman.initialized) 4357 ttm_device_clear_dma_mappings(&adev->mman.bdev); 4358 4359 amdgpu_gart_dummy_page_fini(adev); 4360 4361 if (drm_dev_is_unplugged(adev_to_drm(adev))) 4362 amdgpu_device_unmap_mmio(adev); 4363 4364 } 4365 4366 void amdgpu_device_fini_sw(struct amdgpu_device *adev) 4367 { 4368 int idx; 4369 bool px; 4370 4371 amdgpu_fence_driver_sw_fini(adev); 4372 amdgpu_device_ip_fini(adev); 4373 amdgpu_ucode_release(&adev->firmware.gpu_info_fw); 4374 adev->accel_working = false; 4375 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); 4376 4377 amdgpu_reset_fini(adev); 4378 4379 /* free i2c buses */ 4380 if (!amdgpu_device_has_dc_support(adev)) 4381 amdgpu_i2c_fini(adev); 4382 4383 if (amdgpu_emu_mode != 1) 4384 amdgpu_atombios_fini(adev); 4385 4386 kfree(adev->bios); 4387 adev->bios = NULL; 4388 4389 kfree(adev->fru_info); 4390 adev->fru_info = NULL; 4391 4392 px = amdgpu_device_supports_px(adev_to_drm(adev)); 4393 4394 if (px || (!dev_is_removable(&adev->pdev->dev) && 4395 apple_gmux_detect(NULL, NULL))) 4396 vga_switcheroo_unregister_client(adev->pdev); 4397 4398 if (px) 4399 vga_switcheroo_fini_domain_pm_ops(adev->dev); 4400 4401 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 4402 vga_client_unregister(adev->pdev); 4403 4404 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 4405 4406 iounmap(adev->rmmio); 4407 adev->rmmio = NULL; 4408 amdgpu_doorbell_fini(adev); 4409 drm_dev_exit(idx); 4410 } 4411 4412 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 4413 amdgpu_pmu_fini(adev); 4414 if (adev->mman.discovery_bin) 4415 amdgpu_discovery_fini(adev); 4416 4417 amdgpu_reset_put_reset_domain(adev->reset_domain); 4418 adev->reset_domain = NULL; 4419 4420 kfree(adev->pci_state); 4421 4422 } 4423 4424 /** 4425 * amdgpu_device_evict_resources - evict device resources 4426 * @adev: amdgpu device object 4427 * 4428 * Evicts all ttm device resources(vram BOs, gart table) from the lru list 4429 * of the vram memory type. Mainly used for evicting device resources 4430 * at suspend time. 4431 * 4432 */ 4433 static int amdgpu_device_evict_resources(struct amdgpu_device *adev) 4434 { 4435 int ret; 4436 4437 /* No need to evict vram on APUs for suspend to ram or s2idle */ 4438 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) 4439 return 0; 4440 4441 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 4442 if (ret) 4443 DRM_WARN("evicting device resources failed\n"); 4444 return ret; 4445 } 4446 4447 /* 4448 * Suspend & resume. 4449 */ 4450 /** 4451 * amdgpu_device_prepare - prepare for device suspend 4452 * 4453 * @dev: drm dev pointer 4454 * 4455 * Prepare to put the hw in the suspend state (all asics). 4456 * Returns 0 for success or an error on failure. 4457 * Called at driver suspend. 4458 */ 4459 int amdgpu_device_prepare(struct drm_device *dev) 4460 { 4461 struct amdgpu_device *adev = drm_to_adev(dev); 4462 int i, r; 4463 4464 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4465 return 0; 4466 4467 /* Evict the majority of BOs before starting suspend sequence */ 4468 r = amdgpu_device_evict_resources(adev); 4469 if (r) 4470 return r; 4471 4472 for (i = 0; i < adev->num_ip_blocks; i++) { 4473 if (!adev->ip_blocks[i].status.valid) 4474 continue; 4475 if (!adev->ip_blocks[i].version->funcs->prepare_suspend) 4476 continue; 4477 r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); 4478 if (r) 4479 return r; 4480 } 4481 4482 return 0; 4483 } 4484 4485 /** 4486 * amdgpu_device_suspend - initiate device suspend 4487 * 4488 * @dev: drm dev pointer 4489 * @fbcon : notify the fbdev of suspend 4490 * 4491 * Puts the hw in the suspend state (all asics). 4492 * Returns 0 for success or an error on failure. 4493 * Called at driver suspend. 4494 */ 4495 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) 4496 { 4497 struct amdgpu_device *adev = drm_to_adev(dev); 4498 int r = 0; 4499 4500 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4501 return 0; 4502 4503 adev->in_suspend = true; 4504 4505 if (amdgpu_sriov_vf(adev)) { 4506 amdgpu_virt_fini_data_exchange(adev); 4507 r = amdgpu_virt_request_full_gpu(adev, false); 4508 if (r) 4509 return r; 4510 } 4511 4512 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) 4513 DRM_WARN("smart shift update failed\n"); 4514 4515 if (fbcon) 4516 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); 4517 4518 cancel_delayed_work_sync(&adev->delayed_init_work); 4519 flush_delayed_work(&adev->gfx.gfx_off_delay_work); 4520 4521 amdgpu_ras_suspend(adev); 4522 4523 amdgpu_ttm_set_buffer_funcs_status(adev, false); 4524 4525 amdgpu_device_ip_suspend_phase1(adev); 4526 4527 if (!adev->in_s0ix) 4528 amdgpu_amdkfd_suspend(adev, adev->in_runpm); 4529 4530 r = amdgpu_device_evict_resources(adev); 4531 if (r) 4532 return r; 4533 4534 amdgpu_fence_driver_hw_fini(adev); 4535 4536 amdgpu_device_ip_suspend_phase2(adev); 4537 4538 if (amdgpu_sriov_vf(adev)) 4539 amdgpu_virt_release_full_gpu(adev, false); 4540 4541 r = amdgpu_dpm_notify_rlc_state(adev, false); 4542 if (r) 4543 return r; 4544 4545 return 0; 4546 } 4547 4548 /** 4549 * amdgpu_device_resume - initiate device resume 4550 * 4551 * @dev: drm dev pointer 4552 * @fbcon : notify the fbdev of resume 4553 * 4554 * Bring the hw back to operating state (all asics). 4555 * Returns 0 for success or an error on failure. 4556 * Called at driver resume. 4557 */ 4558 int amdgpu_device_resume(struct drm_device *dev, bool fbcon) 4559 { 4560 struct amdgpu_device *adev = drm_to_adev(dev); 4561 int r = 0; 4562 4563 if (amdgpu_sriov_vf(adev)) { 4564 r = amdgpu_virt_request_full_gpu(adev, true); 4565 if (r) 4566 return r; 4567 } 4568 4569 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4570 return 0; 4571 4572 if (adev->in_s0ix) 4573 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry); 4574 4575 /* post card */ 4576 if (amdgpu_device_need_post(adev)) { 4577 r = amdgpu_device_asic_init(adev); 4578 if (r) 4579 dev_err(adev->dev, "amdgpu asic init failed\n"); 4580 } 4581 4582 r = amdgpu_device_ip_resume(adev); 4583 4584 if (r) { 4585 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); 4586 goto exit; 4587 } 4588 amdgpu_fence_driver_hw_init(adev); 4589 4590 if (!adev->in_s0ix) { 4591 r = amdgpu_amdkfd_resume(adev, adev->in_runpm); 4592 if (r) 4593 goto exit; 4594 } 4595 4596 r = amdgpu_device_ip_late_init(adev); 4597 if (r) 4598 goto exit; 4599 4600 queue_delayed_work(system_wq, &adev->delayed_init_work, 4601 msecs_to_jiffies(AMDGPU_RESUME_MS)); 4602 exit: 4603 if (amdgpu_sriov_vf(adev)) { 4604 amdgpu_virt_init_data_exchange(adev); 4605 amdgpu_virt_release_full_gpu(adev, true); 4606 } 4607 4608 if (r) 4609 return r; 4610 4611 /* Make sure IB tests flushed */ 4612 flush_delayed_work(&adev->delayed_init_work); 4613 4614 if (fbcon) 4615 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); 4616 4617 amdgpu_ras_resume(adev); 4618 4619 if (adev->mode_info.num_crtc) { 4620 /* 4621 * Most of the connector probing functions try to acquire runtime pm 4622 * refs to ensure that the GPU is powered on when connector polling is 4623 * performed. Since we're calling this from a runtime PM callback, 4624 * trying to acquire rpm refs will cause us to deadlock. 4625 * 4626 * Since we're guaranteed to be holding the rpm lock, it's safe to 4627 * temporarily disable the rpm helpers so this doesn't deadlock us. 4628 */ 4629 #ifdef CONFIG_PM 4630 dev->dev->power.disable_depth++; 4631 #endif 4632 if (!adev->dc_enabled) 4633 drm_helper_hpd_irq_event(dev); 4634 else 4635 drm_kms_helper_hotplug_event(dev); 4636 #ifdef CONFIG_PM 4637 dev->dev->power.disable_depth--; 4638 #endif 4639 } 4640 adev->in_suspend = false; 4641 4642 if (adev->enable_mes) 4643 amdgpu_mes_self_test(adev); 4644 4645 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) 4646 DRM_WARN("smart shift update failed\n"); 4647 4648 return 0; 4649 } 4650 4651 /** 4652 * amdgpu_device_ip_check_soft_reset - did soft reset succeed 4653 * 4654 * @adev: amdgpu_device pointer 4655 * 4656 * The list of all the hardware IPs that make up the asic is walked and 4657 * the check_soft_reset callbacks are run. check_soft_reset determines 4658 * if the asic is still hung or not. 4659 * Returns true if any of the IPs are still in a hung state, false if not. 4660 */ 4661 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) 4662 { 4663 int i; 4664 bool asic_hang = false; 4665 4666 if (amdgpu_sriov_vf(adev)) 4667 return true; 4668 4669 if (amdgpu_asic_need_full_reset(adev)) 4670 return true; 4671 4672 for (i = 0; i < adev->num_ip_blocks; i++) { 4673 if (!adev->ip_blocks[i].status.valid) 4674 continue; 4675 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 4676 adev->ip_blocks[i].status.hang = 4677 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); 4678 if (adev->ip_blocks[i].status.hang) { 4679 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 4680 asic_hang = true; 4681 } 4682 } 4683 return asic_hang; 4684 } 4685 4686 /** 4687 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset 4688 * 4689 * @adev: amdgpu_device pointer 4690 * 4691 * The list of all the hardware IPs that make up the asic is walked and the 4692 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset 4693 * handles any IP specific hardware or software state changes that are 4694 * necessary for a soft reset to succeed. 4695 * Returns 0 on success, negative error code on failure. 4696 */ 4697 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) 4698 { 4699 int i, r = 0; 4700 4701 for (i = 0; i < adev->num_ip_blocks; i++) { 4702 if (!adev->ip_blocks[i].status.valid) 4703 continue; 4704 if (adev->ip_blocks[i].status.hang && 4705 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 4706 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); 4707 if (r) 4708 return r; 4709 } 4710 } 4711 4712 return 0; 4713 } 4714 4715 /** 4716 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed 4717 * 4718 * @adev: amdgpu_device pointer 4719 * 4720 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu 4721 * reset is necessary to recover. 4722 * Returns true if a full asic reset is required, false if not. 4723 */ 4724 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) 4725 { 4726 int i; 4727 4728 if (amdgpu_asic_need_full_reset(adev)) 4729 return true; 4730 4731 for (i = 0; i < adev->num_ip_blocks; i++) { 4732 if (!adev->ip_blocks[i].status.valid) 4733 continue; 4734 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 4735 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 4736 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 4737 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 4738 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 4739 if (adev->ip_blocks[i].status.hang) { 4740 dev_info(adev->dev, "Some block need full reset!\n"); 4741 return true; 4742 } 4743 } 4744 } 4745 return false; 4746 } 4747 4748 /** 4749 * amdgpu_device_ip_soft_reset - do a soft reset 4750 * 4751 * @adev: amdgpu_device pointer 4752 * 4753 * The list of all the hardware IPs that make up the asic is walked and the 4754 * soft_reset callbacks are run if the block is hung. soft_reset handles any 4755 * IP specific hardware or software state changes that are necessary to soft 4756 * reset the IP. 4757 * Returns 0 on success, negative error code on failure. 4758 */ 4759 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) 4760 { 4761 int i, r = 0; 4762 4763 for (i = 0; i < adev->num_ip_blocks; i++) { 4764 if (!adev->ip_blocks[i].status.valid) 4765 continue; 4766 if (adev->ip_blocks[i].status.hang && 4767 adev->ip_blocks[i].version->funcs->soft_reset) { 4768 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); 4769 if (r) 4770 return r; 4771 } 4772 } 4773 4774 return 0; 4775 } 4776 4777 /** 4778 * amdgpu_device_ip_post_soft_reset - clean up from soft reset 4779 * 4780 * @adev: amdgpu_device pointer 4781 * 4782 * The list of all the hardware IPs that make up the asic is walked and the 4783 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset 4784 * handles any IP specific hardware or software state changes that are 4785 * necessary after the IP has been soft reset. 4786 * Returns 0 on success, negative error code on failure. 4787 */ 4788 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) 4789 { 4790 int i, r = 0; 4791 4792 for (i = 0; i < adev->num_ip_blocks; i++) { 4793 if (!adev->ip_blocks[i].status.valid) 4794 continue; 4795 if (adev->ip_blocks[i].status.hang && 4796 adev->ip_blocks[i].version->funcs->post_soft_reset) 4797 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); 4798 if (r) 4799 return r; 4800 } 4801 4802 return 0; 4803 } 4804 4805 /** 4806 * amdgpu_device_recover_vram - Recover some VRAM contents 4807 * 4808 * @adev: amdgpu_device pointer 4809 * 4810 * Restores the contents of VRAM buffers from the shadows in GTT. Used to 4811 * restore things like GPUVM page tables after a GPU reset where 4812 * the contents of VRAM might be lost. 4813 * 4814 * Returns: 4815 * 0 on success, negative error code on failure. 4816 */ 4817 static int amdgpu_device_recover_vram(struct amdgpu_device *adev) 4818 { 4819 struct dma_fence *fence = NULL, *next = NULL; 4820 struct amdgpu_bo *shadow; 4821 struct amdgpu_bo_vm *vmbo; 4822 long r = 1, tmo; 4823 4824 if (amdgpu_sriov_runtime(adev)) 4825 tmo = msecs_to_jiffies(8000); 4826 else 4827 tmo = msecs_to_jiffies(100); 4828 4829 dev_info(adev->dev, "recover vram bo from shadow start\n"); 4830 mutex_lock(&adev->shadow_list_lock); 4831 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { 4832 /* If vm is compute context or adev is APU, shadow will be NULL */ 4833 if (!vmbo->shadow) 4834 continue; 4835 shadow = vmbo->shadow; 4836 4837 /* No need to recover an evicted BO */ 4838 if (shadow->tbo.resource->mem_type != TTM_PL_TT || 4839 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || 4840 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) 4841 continue; 4842 4843 r = amdgpu_bo_restore_shadow(shadow, &next); 4844 if (r) 4845 break; 4846 4847 if (fence) { 4848 tmo = dma_fence_wait_timeout(fence, false, tmo); 4849 dma_fence_put(fence); 4850 fence = next; 4851 if (tmo == 0) { 4852 r = -ETIMEDOUT; 4853 break; 4854 } else if (tmo < 0) { 4855 r = tmo; 4856 break; 4857 } 4858 } else { 4859 fence = next; 4860 } 4861 } 4862 mutex_unlock(&adev->shadow_list_lock); 4863 4864 if (fence) 4865 tmo = dma_fence_wait_timeout(fence, false, tmo); 4866 dma_fence_put(fence); 4867 4868 if (r < 0 || tmo <= 0) { 4869 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); 4870 return -EIO; 4871 } 4872 4873 dev_info(adev->dev, "recover vram bo from shadow done\n"); 4874 return 0; 4875 } 4876 4877 4878 /** 4879 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 4880 * 4881 * @adev: amdgpu_device pointer 4882 * @from_hypervisor: request from hypervisor 4883 * 4884 * do VF FLR and reinitialize Asic 4885 * return 0 means succeeded otherwise failed 4886 */ 4887 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 4888 bool from_hypervisor) 4889 { 4890 int r; 4891 struct amdgpu_hive_info *hive = NULL; 4892 int retry_limit = 0; 4893 4894 retry: 4895 amdgpu_amdkfd_pre_reset(adev); 4896 4897 if (from_hypervisor) 4898 r = amdgpu_virt_request_full_gpu(adev, true); 4899 else 4900 r = amdgpu_virt_reset_gpu(adev); 4901 if (r) 4902 return r; 4903 amdgpu_irq_gpu_reset_resume_helper(adev); 4904 4905 /* some sw clean up VF needs to do before recover */ 4906 amdgpu_virt_post_reset(adev); 4907 4908 /* Resume IP prior to SMC */ 4909 r = amdgpu_device_ip_reinit_early_sriov(adev); 4910 if (r) 4911 goto error; 4912 4913 amdgpu_virt_init_data_exchange(adev); 4914 4915 r = amdgpu_device_fw_loading(adev); 4916 if (r) 4917 return r; 4918 4919 /* now we are okay to resume SMC/CP/SDMA */ 4920 r = amdgpu_device_ip_reinit_late_sriov(adev); 4921 if (r) 4922 goto error; 4923 4924 hive = amdgpu_get_xgmi_hive(adev); 4925 /* Update PSP FW topology after reset */ 4926 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) 4927 r = amdgpu_xgmi_update_topology(hive, adev); 4928 4929 if (hive) 4930 amdgpu_put_xgmi_hive(hive); 4931 4932 if (!r) { 4933 r = amdgpu_ib_ring_tests(adev); 4934 4935 amdgpu_amdkfd_post_reset(adev); 4936 } 4937 4938 error: 4939 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 4940 amdgpu_inc_vram_lost(adev); 4941 r = amdgpu_device_recover_vram(adev); 4942 } 4943 amdgpu_virt_release_full_gpu(adev, true); 4944 4945 if (AMDGPU_RETRY_SRIOV_RESET(r)) { 4946 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) { 4947 retry_limit++; 4948 goto retry; 4949 } else 4950 DRM_ERROR("GPU reset retry is beyond the retry limit\n"); 4951 } 4952 4953 return r; 4954 } 4955 4956 /** 4957 * amdgpu_device_has_job_running - check if there is any job in mirror list 4958 * 4959 * @adev: amdgpu_device pointer 4960 * 4961 * check if there is any job in mirror list 4962 */ 4963 bool amdgpu_device_has_job_running(struct amdgpu_device *adev) 4964 { 4965 int i; 4966 struct drm_sched_job *job; 4967 4968 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4969 struct amdgpu_ring *ring = adev->rings[i]; 4970 4971 if (!ring || !ring->sched.thread) 4972 continue; 4973 4974 spin_lock(&ring->sched.job_list_lock); 4975 job = list_first_entry_or_null(&ring->sched.pending_list, 4976 struct drm_sched_job, list); 4977 spin_unlock(&ring->sched.job_list_lock); 4978 if (job) 4979 return true; 4980 } 4981 return false; 4982 } 4983 4984 /** 4985 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery 4986 * 4987 * @adev: amdgpu_device pointer 4988 * 4989 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover 4990 * a hung GPU. 4991 */ 4992 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) 4993 { 4994 4995 if (amdgpu_gpu_recovery == 0) 4996 goto disabled; 4997 4998 /* Skip soft reset check in fatal error mode */ 4999 if (!amdgpu_ras_is_poison_mode_supported(adev)) 5000 return true; 5001 5002 if (amdgpu_sriov_vf(adev)) 5003 return true; 5004 5005 if (amdgpu_gpu_recovery == -1) { 5006 switch (adev->asic_type) { 5007 #ifdef CONFIG_DRM_AMDGPU_SI 5008 case CHIP_VERDE: 5009 case CHIP_TAHITI: 5010 case CHIP_PITCAIRN: 5011 case CHIP_OLAND: 5012 case CHIP_HAINAN: 5013 #endif 5014 #ifdef CONFIG_DRM_AMDGPU_CIK 5015 case CHIP_KAVERI: 5016 case CHIP_KABINI: 5017 case CHIP_MULLINS: 5018 #endif 5019 case CHIP_CARRIZO: 5020 case CHIP_STONEY: 5021 case CHIP_CYAN_SKILLFISH: 5022 goto disabled; 5023 default: 5024 break; 5025 } 5026 } 5027 5028 return true; 5029 5030 disabled: 5031 dev_info(adev->dev, "GPU recovery disabled.\n"); 5032 return false; 5033 } 5034 5035 int amdgpu_device_mode1_reset(struct amdgpu_device *adev) 5036 { 5037 u32 i; 5038 int ret = 0; 5039 5040 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 5041 5042 dev_info(adev->dev, "GPU mode1 reset\n"); 5043 5044 /* disable BM */ 5045 pci_clear_master(adev->pdev); 5046 5047 amdgpu_device_cache_pci_state(adev->pdev); 5048 5049 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 5050 dev_info(adev->dev, "GPU smu mode1 reset\n"); 5051 ret = amdgpu_dpm_mode1_reset(adev); 5052 } else { 5053 dev_info(adev->dev, "GPU psp mode1 reset\n"); 5054 ret = psp_gpu_reset(adev); 5055 } 5056 5057 if (ret) 5058 goto mode1_reset_failed; 5059 5060 amdgpu_device_load_pci_state(adev->pdev); 5061 ret = amdgpu_psp_wait_for_bootloader(adev); 5062 if (ret) 5063 goto mode1_reset_failed; 5064 5065 /* wait for asic to come out of reset */ 5066 for (i = 0; i < adev->usec_timeout; i++) { 5067 u32 memsize = adev->nbio.funcs->get_memsize(adev); 5068 5069 if (memsize != 0xffffffff) 5070 break; 5071 udelay(1); 5072 } 5073 5074 if (i >= adev->usec_timeout) { 5075 ret = -ETIMEDOUT; 5076 goto mode1_reset_failed; 5077 } 5078 5079 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 5080 5081 return 0; 5082 5083 mode1_reset_failed: 5084 dev_err(adev->dev, "GPU mode1 reset failed\n"); 5085 return ret; 5086 } 5087 5088 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 5089 struct amdgpu_reset_context *reset_context) 5090 { 5091 int i, r = 0; 5092 struct amdgpu_job *job = NULL; 5093 bool need_full_reset = 5094 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5095 5096 if (reset_context->reset_req_dev == adev) 5097 job = reset_context->job; 5098 5099 if (amdgpu_sriov_vf(adev)) { 5100 /* stop the data exchange thread */ 5101 amdgpu_virt_fini_data_exchange(adev); 5102 } 5103 5104 amdgpu_fence_driver_isr_toggle(adev, true); 5105 5106 /* block all schedulers and reset given job's ring */ 5107 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5108 struct amdgpu_ring *ring = adev->rings[i]; 5109 5110 if (!ring || !ring->sched.thread) 5111 continue; 5112 5113 /* Clear job fence from fence drv to avoid force_completion 5114 * leave NULL and vm flush fence in fence drv 5115 */ 5116 amdgpu_fence_driver_clear_job_fences(ring); 5117 5118 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 5119 amdgpu_fence_driver_force_completion(ring); 5120 } 5121 5122 amdgpu_fence_driver_isr_toggle(adev, false); 5123 5124 if (job && job->vm) 5125 drm_sched_increase_karma(&job->base); 5126 5127 r = amdgpu_reset_prepare_hwcontext(adev, reset_context); 5128 /* If reset handler not implemented, continue; otherwise return */ 5129 if (r == -EOPNOTSUPP) 5130 r = 0; 5131 else 5132 return r; 5133 5134 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ 5135 if (!amdgpu_sriov_vf(adev)) { 5136 5137 if (!need_full_reset) 5138 need_full_reset = amdgpu_device_ip_need_full_reset(adev); 5139 5140 if (!need_full_reset && amdgpu_gpu_recovery && 5141 amdgpu_device_ip_check_soft_reset(adev)) { 5142 amdgpu_device_ip_pre_soft_reset(adev); 5143 r = amdgpu_device_ip_soft_reset(adev); 5144 amdgpu_device_ip_post_soft_reset(adev); 5145 if (r || amdgpu_device_ip_check_soft_reset(adev)) { 5146 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); 5147 need_full_reset = true; 5148 } 5149 } 5150 5151 if (need_full_reset) 5152 r = amdgpu_device_ip_suspend(adev); 5153 if (need_full_reset) 5154 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5155 else 5156 clear_bit(AMDGPU_NEED_FULL_RESET, 5157 &reset_context->flags); 5158 } 5159 5160 return r; 5161 } 5162 5163 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev) 5164 { 5165 int i; 5166 5167 lockdep_assert_held(&adev->reset_domain->sem); 5168 5169 for (i = 0; i < adev->reset_info.num_regs; i++) { 5170 adev->reset_info.reset_dump_reg_value[i] = 5171 RREG32(adev->reset_info.reset_dump_reg_list[i]); 5172 5173 trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i], 5174 adev->reset_info.reset_dump_reg_value[i]); 5175 } 5176 5177 return 0; 5178 } 5179 5180 int amdgpu_do_asic_reset(struct list_head *device_list_handle, 5181 struct amdgpu_reset_context *reset_context) 5182 { 5183 struct amdgpu_device *tmp_adev = NULL; 5184 bool need_full_reset, skip_hw_reset, vram_lost = false; 5185 int r = 0; 5186 bool gpu_reset_for_dev_remove = 0; 5187 5188 /* Try reset handler method first */ 5189 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5190 reset_list); 5191 amdgpu_reset_reg_dumps(tmp_adev); 5192 5193 reset_context->reset_device_list = device_list_handle; 5194 r = amdgpu_reset_perform_reset(tmp_adev, reset_context); 5195 /* If reset handler not implemented, continue; otherwise return */ 5196 if (r == -EOPNOTSUPP) 5197 r = 0; 5198 else 5199 return r; 5200 5201 /* Reset handler not implemented, use the default method */ 5202 need_full_reset = 5203 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5204 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); 5205 5206 gpu_reset_for_dev_remove = 5207 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) && 5208 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5209 5210 /* 5211 * ASIC reset has to be done on all XGMI hive nodes ASAP 5212 * to allow proper links negotiation in FW (within 1 sec) 5213 */ 5214 if (!skip_hw_reset && need_full_reset) { 5215 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5216 /* For XGMI run all resets in parallel to speed up the process */ 5217 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 5218 tmp_adev->gmc.xgmi.pending_reset = false; 5219 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) 5220 r = -EALREADY; 5221 } else 5222 r = amdgpu_asic_reset(tmp_adev); 5223 5224 if (r) { 5225 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", 5226 r, adev_to_drm(tmp_adev)->unique); 5227 goto out; 5228 } 5229 } 5230 5231 /* For XGMI wait for all resets to complete before proceed */ 5232 if (!r) { 5233 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5234 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 5235 flush_work(&tmp_adev->xgmi_reset_work); 5236 r = tmp_adev->asic_reset_res; 5237 if (r) 5238 break; 5239 } 5240 } 5241 } 5242 } 5243 5244 if (!r && amdgpu_ras_intr_triggered()) { 5245 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5246 amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB); 5247 } 5248 5249 amdgpu_ras_intr_cleared(); 5250 } 5251 5252 /* Since the mode1 reset affects base ip blocks, the 5253 * phase1 ip blocks need to be resumed. Otherwise there 5254 * will be a BIOS signature error and the psp bootloader 5255 * can't load kdb on the next amdgpu install. 5256 */ 5257 if (gpu_reset_for_dev_remove) { 5258 list_for_each_entry(tmp_adev, device_list_handle, reset_list) 5259 amdgpu_device_ip_resume_phase1(tmp_adev); 5260 5261 goto end; 5262 } 5263 5264 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5265 if (need_full_reset) { 5266 /* post card */ 5267 r = amdgpu_device_asic_init(tmp_adev); 5268 if (r) { 5269 dev_warn(tmp_adev->dev, "asic atom init failed!"); 5270 } else { 5271 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); 5272 5273 r = amdgpu_device_ip_resume_phase1(tmp_adev); 5274 if (r) 5275 goto out; 5276 5277 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 5278 5279 amdgpu_coredump(tmp_adev, vram_lost, reset_context); 5280 5281 if (vram_lost) { 5282 DRM_INFO("VRAM is lost due to GPU reset!\n"); 5283 amdgpu_inc_vram_lost(tmp_adev); 5284 } 5285 5286 r = amdgpu_device_fw_loading(tmp_adev); 5287 if (r) 5288 return r; 5289 5290 r = amdgpu_xcp_restore_partition_mode( 5291 tmp_adev->xcp_mgr); 5292 if (r) 5293 goto out; 5294 5295 r = amdgpu_device_ip_resume_phase2(tmp_adev); 5296 if (r) 5297 goto out; 5298 5299 if (tmp_adev->mman.buffer_funcs_ring->sched.ready) 5300 amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true); 5301 5302 if (vram_lost) 5303 amdgpu_device_fill_reset_magic(tmp_adev); 5304 5305 /* 5306 * Add this ASIC as tracked as reset was already 5307 * complete successfully. 5308 */ 5309 amdgpu_register_gpu_instance(tmp_adev); 5310 5311 if (!reset_context->hive && 5312 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 5313 amdgpu_xgmi_add_device(tmp_adev); 5314 5315 r = amdgpu_device_ip_late_init(tmp_adev); 5316 if (r) 5317 goto out; 5318 5319 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); 5320 5321 /* 5322 * The GPU enters bad state once faulty pages 5323 * by ECC has reached the threshold, and ras 5324 * recovery is scheduled next. So add one check 5325 * here to break recovery if it indeed exceeds 5326 * bad page threshold, and remind user to 5327 * retire this GPU or setting one bigger 5328 * bad_page_threshold value to fix this once 5329 * probing driver again. 5330 */ 5331 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) { 5332 /* must succeed. */ 5333 amdgpu_ras_resume(tmp_adev); 5334 } else { 5335 r = -EINVAL; 5336 goto out; 5337 } 5338 5339 /* Update PSP FW topology after reset */ 5340 if (reset_context->hive && 5341 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 5342 r = amdgpu_xgmi_update_topology( 5343 reset_context->hive, tmp_adev); 5344 } 5345 } 5346 5347 out: 5348 if (!r) { 5349 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); 5350 r = amdgpu_ib_ring_tests(tmp_adev); 5351 if (r) { 5352 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); 5353 need_full_reset = true; 5354 r = -EAGAIN; 5355 goto end; 5356 } 5357 } 5358 5359 if (!r) 5360 r = amdgpu_device_recover_vram(tmp_adev); 5361 else 5362 tmp_adev->asic_reset_res = r; 5363 } 5364 5365 end: 5366 if (need_full_reset) 5367 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5368 else 5369 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5370 return r; 5371 } 5372 5373 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev) 5374 { 5375 5376 switch (amdgpu_asic_reset_method(adev)) { 5377 case AMD_RESET_METHOD_MODE1: 5378 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; 5379 break; 5380 case AMD_RESET_METHOD_MODE2: 5381 adev->mp1_state = PP_MP1_STATE_RESET; 5382 break; 5383 default: 5384 adev->mp1_state = PP_MP1_STATE_NONE; 5385 break; 5386 } 5387 } 5388 5389 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev) 5390 { 5391 amdgpu_vf_error_trans_all(adev); 5392 adev->mp1_state = PP_MP1_STATE_NONE; 5393 } 5394 5395 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) 5396 { 5397 struct pci_dev *p = NULL; 5398 5399 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 5400 adev->pdev->bus->number, 1); 5401 if (p) { 5402 pm_runtime_enable(&(p->dev)); 5403 pm_runtime_resume(&(p->dev)); 5404 } 5405 5406 pci_dev_put(p); 5407 } 5408 5409 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) 5410 { 5411 enum amd_reset_method reset_method; 5412 struct pci_dev *p = NULL; 5413 u64 expires; 5414 5415 /* 5416 * For now, only BACO and mode1 reset are confirmed 5417 * to suffer the audio issue without proper suspended. 5418 */ 5419 reset_method = amdgpu_asic_reset_method(adev); 5420 if ((reset_method != AMD_RESET_METHOD_BACO) && 5421 (reset_method != AMD_RESET_METHOD_MODE1)) 5422 return -EINVAL; 5423 5424 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 5425 adev->pdev->bus->number, 1); 5426 if (!p) 5427 return -ENODEV; 5428 5429 expires = pm_runtime_autosuspend_expiration(&(p->dev)); 5430 if (!expires) 5431 /* 5432 * If we cannot get the audio device autosuspend delay, 5433 * a fixed 4S interval will be used. Considering 3S is 5434 * the audio controller default autosuspend delay setting. 5435 * 4S used here is guaranteed to cover that. 5436 */ 5437 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL; 5438 5439 while (!pm_runtime_status_suspended(&(p->dev))) { 5440 if (!pm_runtime_suspend(&(p->dev))) 5441 break; 5442 5443 if (expires < ktime_get_mono_fast_ns()) { 5444 dev_warn(adev->dev, "failed to suspend display audio\n"); 5445 pci_dev_put(p); 5446 /* TODO: abort the succeeding gpu reset? */ 5447 return -ETIMEDOUT; 5448 } 5449 } 5450 5451 pm_runtime_disable(&(p->dev)); 5452 5453 pci_dev_put(p); 5454 return 0; 5455 } 5456 5457 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) 5458 { 5459 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5460 5461 #if defined(CONFIG_DEBUG_FS) 5462 if (!amdgpu_sriov_vf(adev)) 5463 cancel_work(&adev->reset_work); 5464 #endif 5465 5466 if (adev->kfd.dev) 5467 cancel_work(&adev->kfd.reset_work); 5468 5469 if (amdgpu_sriov_vf(adev)) 5470 cancel_work(&adev->virt.flr_work); 5471 5472 if (con && adev->ras_enabled) 5473 cancel_work(&con->recovery_work); 5474 5475 } 5476 5477 /** 5478 * amdgpu_device_gpu_recover - reset the asic and recover scheduler 5479 * 5480 * @adev: amdgpu_device pointer 5481 * @job: which job trigger hang 5482 * @reset_context: amdgpu reset context pointer 5483 * 5484 * Attempt to reset the GPU if it has hung (all asics). 5485 * Attempt to do soft-reset or full-reset and reinitialize Asic 5486 * Returns 0 for success or an error on failure. 5487 */ 5488 5489 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 5490 struct amdgpu_job *job, 5491 struct amdgpu_reset_context *reset_context) 5492 { 5493 struct list_head device_list, *device_list_handle = NULL; 5494 bool job_signaled = false; 5495 struct amdgpu_hive_info *hive = NULL; 5496 struct amdgpu_device *tmp_adev = NULL; 5497 int i, r = 0; 5498 bool need_emergency_restart = false; 5499 bool audio_suspended = false; 5500 bool gpu_reset_for_dev_remove = false; 5501 5502 gpu_reset_for_dev_remove = 5503 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) && 5504 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5505 5506 /* 5507 * Special case: RAS triggered and full reset isn't supported 5508 */ 5509 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); 5510 5511 /* 5512 * Flush RAM to disk so that after reboot 5513 * the user can read log and see why the system rebooted. 5514 */ 5515 if (need_emergency_restart && amdgpu_ras_get_context(adev) && 5516 amdgpu_ras_get_context(adev)->reboot) { 5517 DRM_WARN("Emergency reboot."); 5518 5519 ksys_sync_helper(); 5520 emergency_restart(); 5521 } 5522 5523 dev_info(adev->dev, "GPU %s begin!\n", 5524 need_emergency_restart ? "jobs stop":"reset"); 5525 5526 if (!amdgpu_sriov_vf(adev)) 5527 hive = amdgpu_get_xgmi_hive(adev); 5528 if (hive) 5529 mutex_lock(&hive->hive_lock); 5530 5531 reset_context->job = job; 5532 reset_context->hive = hive; 5533 /* 5534 * Build list of devices to reset. 5535 * In case we are in XGMI hive mode, resort the device list 5536 * to put adev in the 1st position. 5537 */ 5538 INIT_LIST_HEAD(&device_list); 5539 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { 5540 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 5541 list_add_tail(&tmp_adev->reset_list, &device_list); 5542 if (gpu_reset_for_dev_remove && adev->shutdown) 5543 tmp_adev->shutdown = true; 5544 } 5545 if (!list_is_first(&adev->reset_list, &device_list)) 5546 list_rotate_to_front(&adev->reset_list, &device_list); 5547 device_list_handle = &device_list; 5548 } else { 5549 list_add_tail(&adev->reset_list, &device_list); 5550 device_list_handle = &device_list; 5551 } 5552 5553 /* We need to lock reset domain only once both for XGMI and single device */ 5554 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5555 reset_list); 5556 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); 5557 5558 /* block all schedulers and reset given job's ring */ 5559 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5560 5561 amdgpu_device_set_mp1_state(tmp_adev); 5562 5563 /* 5564 * Try to put the audio codec into suspend state 5565 * before gpu reset started. 5566 * 5567 * Due to the power domain of the graphics device 5568 * is shared with AZ power domain. Without this, 5569 * we may change the audio hardware from behind 5570 * the audio driver's back. That will trigger 5571 * some audio codec errors. 5572 */ 5573 if (!amdgpu_device_suspend_display_audio(tmp_adev)) 5574 audio_suspended = true; 5575 5576 amdgpu_ras_set_error_query_ready(tmp_adev, false); 5577 5578 cancel_delayed_work_sync(&tmp_adev->delayed_init_work); 5579 5580 if (!amdgpu_sriov_vf(tmp_adev)) 5581 amdgpu_amdkfd_pre_reset(tmp_adev); 5582 5583 /* 5584 * Mark these ASICs to be reseted as untracked first 5585 * And add them back after reset completed 5586 */ 5587 amdgpu_unregister_gpu_instance(tmp_adev); 5588 5589 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true); 5590 5591 /* disable ras on ALL IPs */ 5592 if (!need_emergency_restart && 5593 amdgpu_device_ip_need_full_reset(tmp_adev)) 5594 amdgpu_ras_suspend(tmp_adev); 5595 5596 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5597 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5598 5599 if (!ring || !ring->sched.thread) 5600 continue; 5601 5602 drm_sched_stop(&ring->sched, job ? &job->base : NULL); 5603 5604 if (need_emergency_restart) 5605 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); 5606 } 5607 atomic_inc(&tmp_adev->gpu_reset_counter); 5608 } 5609 5610 if (need_emergency_restart) 5611 goto skip_sched_resume; 5612 5613 /* 5614 * Must check guilty signal here since after this point all old 5615 * HW fences are force signaled. 5616 * 5617 * job->base holds a reference to parent fence 5618 */ 5619 if (job && dma_fence_is_signaled(&job->hw_fence)) { 5620 job_signaled = true; 5621 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); 5622 goto skip_hw_reset; 5623 } 5624 5625 retry: /* Rest of adevs pre asic reset from XGMI hive. */ 5626 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5627 if (gpu_reset_for_dev_remove) { 5628 /* Workaroud for ASICs need to disable SMC first */ 5629 amdgpu_device_smu_fini_early(tmp_adev); 5630 } 5631 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context); 5632 /*TODO Should we stop ?*/ 5633 if (r) { 5634 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", 5635 r, adev_to_drm(tmp_adev)->unique); 5636 tmp_adev->asic_reset_res = r; 5637 } 5638 5639 /* 5640 * Drop all pending non scheduler resets. Scheduler resets 5641 * were already dropped during drm_sched_stop 5642 */ 5643 amdgpu_device_stop_pending_resets(tmp_adev); 5644 } 5645 5646 /* Actual ASIC resets if needed.*/ 5647 /* Host driver will handle XGMI hive reset for SRIOV */ 5648 if (amdgpu_sriov_vf(adev)) { 5649 r = amdgpu_device_reset_sriov(adev, job ? false : true); 5650 if (r) 5651 adev->asic_reset_res = r; 5652 5653 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */ 5654 if (amdgpu_ip_version(adev, GC_HWIP, 0) == 5655 IP_VERSION(9, 4, 2) || 5656 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) 5657 amdgpu_ras_resume(adev); 5658 } else { 5659 r = amdgpu_do_asic_reset(device_list_handle, reset_context); 5660 if (r && r == -EAGAIN) 5661 goto retry; 5662 5663 if (!r && gpu_reset_for_dev_remove) 5664 goto recover_end; 5665 } 5666 5667 skip_hw_reset: 5668 5669 /* Post ASIC reset for all devs .*/ 5670 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5671 5672 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5673 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5674 5675 if (!ring || !ring->sched.thread) 5676 continue; 5677 5678 drm_sched_start(&ring->sched, true); 5679 } 5680 5681 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) 5682 drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); 5683 5684 if (tmp_adev->asic_reset_res) 5685 r = tmp_adev->asic_reset_res; 5686 5687 tmp_adev->asic_reset_res = 0; 5688 5689 if (r) { 5690 /* bad news, how to tell it to userspace ? */ 5691 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); 5692 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 5693 } else { 5694 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); 5695 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) 5696 DRM_WARN("smart shift update failed\n"); 5697 } 5698 } 5699 5700 skip_sched_resume: 5701 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5702 /* unlock kfd: SRIOV would do it separately */ 5703 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) 5704 amdgpu_amdkfd_post_reset(tmp_adev); 5705 5706 /* kfd_post_reset will do nothing if kfd device is not initialized, 5707 * need to bring up kfd here if it's not be initialized before 5708 */ 5709 if (!adev->kfd.init_complete) 5710 amdgpu_amdkfd_device_init(adev); 5711 5712 if (audio_suspended) 5713 amdgpu_device_resume_display_audio(tmp_adev); 5714 5715 amdgpu_device_unset_mp1_state(tmp_adev); 5716 5717 amdgpu_ras_set_error_query_ready(tmp_adev, true); 5718 } 5719 5720 recover_end: 5721 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5722 reset_list); 5723 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); 5724 5725 if (hive) { 5726 mutex_unlock(&hive->hive_lock); 5727 amdgpu_put_xgmi_hive(hive); 5728 } 5729 5730 if (r) 5731 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); 5732 5733 atomic_set(&adev->reset_domain->reset_res, r); 5734 return r; 5735 } 5736 5737 /** 5738 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot 5739 * 5740 * @adev: amdgpu_device pointer 5741 * 5742 * Fetchs and stores in the driver the PCIE capabilities (gen speed 5743 * and lanes) of the slot the device is in. Handles APUs and 5744 * virtualized environments where PCIE config space may not be available. 5745 */ 5746 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 5747 { 5748 struct pci_dev *pdev; 5749 enum pci_bus_speed speed_cap, platform_speed_cap; 5750 enum pcie_link_width platform_link_width; 5751 5752 if (amdgpu_pcie_gen_cap) 5753 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 5754 5755 if (amdgpu_pcie_lane_cap) 5756 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 5757 5758 /* covers APUs as well */ 5759 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) { 5760 if (adev->pm.pcie_gen_mask == 0) 5761 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 5762 if (adev->pm.pcie_mlw_mask == 0) 5763 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 5764 return; 5765 } 5766 5767 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) 5768 return; 5769 5770 pcie_bandwidth_available(adev->pdev, NULL, 5771 &platform_speed_cap, &platform_link_width); 5772 5773 if (adev->pm.pcie_gen_mask == 0) { 5774 /* asic caps */ 5775 pdev = adev->pdev; 5776 speed_cap = pcie_get_speed_cap(pdev); 5777 if (speed_cap == PCI_SPEED_UNKNOWN) { 5778 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5779 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5780 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 5781 } else { 5782 if (speed_cap == PCIE_SPEED_32_0GT) 5783 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5784 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5785 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5786 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 | 5787 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5); 5788 else if (speed_cap == PCIE_SPEED_16_0GT) 5789 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5790 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5791 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5792 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4); 5793 else if (speed_cap == PCIE_SPEED_8_0GT) 5794 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5795 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5796 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 5797 else if (speed_cap == PCIE_SPEED_5_0GT) 5798 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5799 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2); 5800 else 5801 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; 5802 } 5803 /* platform caps */ 5804 if (platform_speed_cap == PCI_SPEED_UNKNOWN) { 5805 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5806 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 5807 } else { 5808 if (platform_speed_cap == PCIE_SPEED_32_0GT) 5809 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5810 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5811 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5812 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 | 5813 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5); 5814 else if (platform_speed_cap == PCIE_SPEED_16_0GT) 5815 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5816 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5817 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5818 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); 5819 else if (platform_speed_cap == PCIE_SPEED_8_0GT) 5820 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5821 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5822 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); 5823 else if (platform_speed_cap == PCIE_SPEED_5_0GT) 5824 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5825 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 5826 else 5827 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 5828 5829 } 5830 } 5831 if (adev->pm.pcie_mlw_mask == 0) { 5832 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { 5833 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; 5834 } else { 5835 switch (platform_link_width) { 5836 case PCIE_LNK_X32: 5837 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 5838 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 5839 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5840 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5841 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5842 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5843 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5844 break; 5845 case PCIE_LNK_X16: 5846 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 5847 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5848 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5849 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5850 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5851 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5852 break; 5853 case PCIE_LNK_X12: 5854 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5855 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5856 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5857 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5858 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5859 break; 5860 case PCIE_LNK_X8: 5861 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5862 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5863 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5864 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5865 break; 5866 case PCIE_LNK_X4: 5867 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5868 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5869 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5870 break; 5871 case PCIE_LNK_X2: 5872 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5873 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5874 break; 5875 case PCIE_LNK_X1: 5876 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 5877 break; 5878 default: 5879 break; 5880 } 5881 } 5882 } 5883 } 5884 5885 /** 5886 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR 5887 * 5888 * @adev: amdgpu_device pointer 5889 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev 5890 * 5891 * Return true if @peer_adev can access (DMA) @adev through the PCIe 5892 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of 5893 * @peer_adev. 5894 */ 5895 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 5896 struct amdgpu_device *peer_adev) 5897 { 5898 #ifdef CONFIG_HSA_AMD_P2P 5899 uint64_t address_mask = peer_adev->dev->dma_mask ? 5900 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); 5901 resource_size_t aper_limit = 5902 adev->gmc.aper_base + adev->gmc.aper_size - 1; 5903 bool p2p_access = 5904 !adev->gmc.xgmi.connected_to_cpu && 5905 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); 5906 5907 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size && 5908 adev->gmc.real_vram_size == adev->gmc.visible_vram_size && 5909 !(adev->gmc.aper_base & address_mask || 5910 aper_limit & address_mask)); 5911 #else 5912 return false; 5913 #endif 5914 } 5915 5916 int amdgpu_device_baco_enter(struct drm_device *dev) 5917 { 5918 struct amdgpu_device *adev = drm_to_adev(dev); 5919 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 5920 5921 if (!amdgpu_device_supports_baco(dev)) 5922 return -ENOTSUPP; 5923 5924 if (ras && adev->ras_enabled && 5925 adev->nbio.funcs->enable_doorbell_interrupt) 5926 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 5927 5928 return amdgpu_dpm_baco_enter(adev); 5929 } 5930 5931 int amdgpu_device_baco_exit(struct drm_device *dev) 5932 { 5933 struct amdgpu_device *adev = drm_to_adev(dev); 5934 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 5935 int ret = 0; 5936 5937 if (!amdgpu_device_supports_baco(dev)) 5938 return -ENOTSUPP; 5939 5940 ret = amdgpu_dpm_baco_exit(adev); 5941 if (ret) 5942 return ret; 5943 5944 if (ras && adev->ras_enabled && 5945 adev->nbio.funcs->enable_doorbell_interrupt) 5946 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 5947 5948 if (amdgpu_passthrough(adev) && 5949 adev->nbio.funcs->clear_doorbell_interrupt) 5950 adev->nbio.funcs->clear_doorbell_interrupt(adev); 5951 5952 return 0; 5953 } 5954 5955 /** 5956 * amdgpu_pci_error_detected - Called when a PCI error is detected. 5957 * @pdev: PCI device struct 5958 * @state: PCI channel state 5959 * 5960 * Description: Called when a PCI error is detected. 5961 * 5962 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 5963 */ 5964 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5965 { 5966 struct drm_device *dev = pci_get_drvdata(pdev); 5967 struct amdgpu_device *adev = drm_to_adev(dev); 5968 int i; 5969 5970 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state); 5971 5972 if (adev->gmc.xgmi.num_physical_nodes > 1) { 5973 DRM_WARN("No support for XGMI hive yet..."); 5974 return PCI_ERS_RESULT_DISCONNECT; 5975 } 5976 5977 adev->pci_channel_state = state; 5978 5979 switch (state) { 5980 case pci_channel_io_normal: 5981 return PCI_ERS_RESULT_CAN_RECOVER; 5982 /* Fatal error, prepare for slot reset */ 5983 case pci_channel_io_frozen: 5984 /* 5985 * Locking adev->reset_domain->sem will prevent any external access 5986 * to GPU during PCI error recovery 5987 */ 5988 amdgpu_device_lock_reset_domain(adev->reset_domain); 5989 amdgpu_device_set_mp1_state(adev); 5990 5991 /* 5992 * Block any work scheduling as we do for regular GPU reset 5993 * for the duration of the recovery 5994 */ 5995 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5996 struct amdgpu_ring *ring = adev->rings[i]; 5997 5998 if (!ring || !ring->sched.thread) 5999 continue; 6000 6001 drm_sched_stop(&ring->sched, NULL); 6002 } 6003 atomic_inc(&adev->gpu_reset_counter); 6004 return PCI_ERS_RESULT_NEED_RESET; 6005 case pci_channel_io_perm_failure: 6006 /* Permanent error, prepare for device removal */ 6007 return PCI_ERS_RESULT_DISCONNECT; 6008 } 6009 6010 return PCI_ERS_RESULT_NEED_RESET; 6011 } 6012 6013 /** 6014 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers 6015 * @pdev: pointer to PCI device 6016 */ 6017 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev) 6018 { 6019 6020 DRM_INFO("PCI error: mmio enabled callback!!\n"); 6021 6022 /* TODO - dump whatever for debugging purposes */ 6023 6024 /* This called only if amdgpu_pci_error_detected returns 6025 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 6026 * works, no need to reset slot. 6027 */ 6028 6029 return PCI_ERS_RESULT_RECOVERED; 6030 } 6031 6032 /** 6033 * amdgpu_pci_slot_reset - Called when PCI slot has been reset. 6034 * @pdev: PCI device struct 6035 * 6036 * Description: This routine is called by the pci error recovery 6037 * code after the PCI slot has been reset, just before we 6038 * should resume normal operations. 6039 */ 6040 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) 6041 { 6042 struct drm_device *dev = pci_get_drvdata(pdev); 6043 struct amdgpu_device *adev = drm_to_adev(dev); 6044 int r, i; 6045 struct amdgpu_reset_context reset_context; 6046 u32 memsize; 6047 struct list_head device_list; 6048 6049 DRM_INFO("PCI error: slot reset callback!!\n"); 6050 6051 memset(&reset_context, 0, sizeof(reset_context)); 6052 6053 INIT_LIST_HEAD(&device_list); 6054 list_add_tail(&adev->reset_list, &device_list); 6055 6056 /* wait for asic to come out of reset */ 6057 msleep(500); 6058 6059 /* Restore PCI confspace */ 6060 amdgpu_device_load_pci_state(pdev); 6061 6062 /* confirm ASIC came out of reset */ 6063 for (i = 0; i < adev->usec_timeout; i++) { 6064 memsize = amdgpu_asic_get_config_memsize(adev); 6065 6066 if (memsize != 0xffffffff) 6067 break; 6068 udelay(1); 6069 } 6070 if (memsize == 0xffffffff) { 6071 r = -ETIME; 6072 goto out; 6073 } 6074 6075 reset_context.method = AMD_RESET_METHOD_NONE; 6076 reset_context.reset_req_dev = adev; 6077 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 6078 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); 6079 6080 adev->no_hw_access = true; 6081 r = amdgpu_device_pre_asic_reset(adev, &reset_context); 6082 adev->no_hw_access = false; 6083 if (r) 6084 goto out; 6085 6086 r = amdgpu_do_asic_reset(&device_list, &reset_context); 6087 6088 out: 6089 if (!r) { 6090 if (amdgpu_device_cache_pci_state(adev->pdev)) 6091 pci_restore_state(adev->pdev); 6092 6093 DRM_INFO("PCIe error recovery succeeded\n"); 6094 } else { 6095 DRM_ERROR("PCIe error recovery failed, err:%d", r); 6096 amdgpu_device_unset_mp1_state(adev); 6097 amdgpu_device_unlock_reset_domain(adev->reset_domain); 6098 } 6099 6100 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 6101 } 6102 6103 /** 6104 * amdgpu_pci_resume() - resume normal ops after PCI reset 6105 * @pdev: pointer to PCI device 6106 * 6107 * Called when the error recovery driver tells us that its 6108 * OK to resume normal operation. 6109 */ 6110 void amdgpu_pci_resume(struct pci_dev *pdev) 6111 { 6112 struct drm_device *dev = pci_get_drvdata(pdev); 6113 struct amdgpu_device *adev = drm_to_adev(dev); 6114 int i; 6115 6116 6117 DRM_INFO("PCI error: resume callback!!\n"); 6118 6119 /* Only continue execution for the case of pci_channel_io_frozen */ 6120 if (adev->pci_channel_state != pci_channel_io_frozen) 6121 return; 6122 6123 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 6124 struct amdgpu_ring *ring = adev->rings[i]; 6125 6126 if (!ring || !ring->sched.thread) 6127 continue; 6128 6129 drm_sched_start(&ring->sched, true); 6130 } 6131 6132 amdgpu_device_unset_mp1_state(adev); 6133 amdgpu_device_unlock_reset_domain(adev->reset_domain); 6134 } 6135 6136 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) 6137 { 6138 struct drm_device *dev = pci_get_drvdata(pdev); 6139 struct amdgpu_device *adev = drm_to_adev(dev); 6140 int r; 6141 6142 r = pci_save_state(pdev); 6143 if (!r) { 6144 kfree(adev->pci_state); 6145 6146 adev->pci_state = pci_store_saved_state(pdev); 6147 6148 if (!adev->pci_state) { 6149 DRM_ERROR("Failed to store PCI saved state"); 6150 return false; 6151 } 6152 } else { 6153 DRM_WARN("Failed to save PCI state, err:%d\n", r); 6154 return false; 6155 } 6156 6157 return true; 6158 } 6159 6160 bool amdgpu_device_load_pci_state(struct pci_dev *pdev) 6161 { 6162 struct drm_device *dev = pci_get_drvdata(pdev); 6163 struct amdgpu_device *adev = drm_to_adev(dev); 6164 int r; 6165 6166 if (!adev->pci_state) 6167 return false; 6168 6169 r = pci_load_saved_state(pdev, adev->pci_state); 6170 6171 if (!r) { 6172 pci_restore_state(pdev); 6173 } else { 6174 DRM_WARN("Failed to load PCI state, err:%d\n", r); 6175 return false; 6176 } 6177 6178 return true; 6179 } 6180 6181 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, 6182 struct amdgpu_ring *ring) 6183 { 6184 #ifdef CONFIG_X86_64 6185 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) 6186 return; 6187 #endif 6188 if (adev->gmc.xgmi.connected_to_cpu) 6189 return; 6190 6191 if (ring && ring->funcs->emit_hdp_flush) 6192 amdgpu_ring_emit_hdp_flush(ring); 6193 else 6194 amdgpu_asic_flush_hdp(adev, ring); 6195 } 6196 6197 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, 6198 struct amdgpu_ring *ring) 6199 { 6200 #ifdef CONFIG_X86_64 6201 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) 6202 return; 6203 #endif 6204 if (adev->gmc.xgmi.connected_to_cpu) 6205 return; 6206 6207 amdgpu_asic_invalidate_hdp(adev, ring); 6208 } 6209 6210 int amdgpu_in_reset(struct amdgpu_device *adev) 6211 { 6212 return atomic_read(&adev->reset_domain->in_gpu_reset); 6213 } 6214 6215 /** 6216 * amdgpu_device_halt() - bring hardware to some kind of halt state 6217 * 6218 * @adev: amdgpu_device pointer 6219 * 6220 * Bring hardware to some kind of halt state so that no one can touch it 6221 * any more. It will help to maintain error context when error occurred. 6222 * Compare to a simple hang, the system will keep stable at least for SSH 6223 * access. Then it should be trivial to inspect the hardware state and 6224 * see what's going on. Implemented as following: 6225 * 6226 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc), 6227 * clears all CPU mappings to device, disallows remappings through page faults 6228 * 2. amdgpu_irq_disable_all() disables all interrupts 6229 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences 6230 * 4. set adev->no_hw_access to avoid potential crashes after setp 5 6231 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings 6232 * 6. pci_disable_device() and pci_wait_for_pending_transaction() 6233 * flush any in flight DMA operations 6234 */ 6235 void amdgpu_device_halt(struct amdgpu_device *adev) 6236 { 6237 struct pci_dev *pdev = adev->pdev; 6238 struct drm_device *ddev = adev_to_drm(adev); 6239 6240 amdgpu_xcp_dev_unplug(adev); 6241 drm_dev_unplug(ddev); 6242 6243 amdgpu_irq_disable_all(adev); 6244 6245 amdgpu_fence_driver_hw_fini(adev); 6246 6247 adev->no_hw_access = true; 6248 6249 amdgpu_device_unmap_mmio(adev); 6250 6251 pci_disable_device(pdev); 6252 pci_wait_for_pending_transaction(pdev); 6253 } 6254 6255 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, 6256 u32 reg) 6257 { 6258 unsigned long flags, address, data; 6259 u32 r; 6260 6261 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); 6262 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); 6263 6264 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 6265 WREG32(address, reg * 4); 6266 (void)RREG32(address); 6267 r = RREG32(data); 6268 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 6269 return r; 6270 } 6271 6272 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, 6273 u32 reg, u32 v) 6274 { 6275 unsigned long flags, address, data; 6276 6277 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); 6278 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); 6279 6280 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 6281 WREG32(address, reg * 4); 6282 (void)RREG32(address); 6283 WREG32(data, v); 6284 (void)RREG32(data); 6285 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 6286 } 6287 6288 /** 6289 * amdgpu_device_switch_gang - switch to a new gang 6290 * @adev: amdgpu_device pointer 6291 * @gang: the gang to switch to 6292 * 6293 * Try to switch to a new gang. 6294 * Returns: NULL if we switched to the new gang or a reference to the current 6295 * gang leader. 6296 */ 6297 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, 6298 struct dma_fence *gang) 6299 { 6300 struct dma_fence *old = NULL; 6301 6302 do { 6303 dma_fence_put(old); 6304 rcu_read_lock(); 6305 old = dma_fence_get_rcu_safe(&adev->gang_submit); 6306 rcu_read_unlock(); 6307 6308 if (old == gang) 6309 break; 6310 6311 if (!dma_fence_is_signaled(old)) 6312 return old; 6313 6314 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, 6315 old, gang) != old); 6316 6317 dma_fence_put(old); 6318 return NULL; 6319 } 6320 6321 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) 6322 { 6323 switch (adev->asic_type) { 6324 #ifdef CONFIG_DRM_AMDGPU_SI 6325 case CHIP_HAINAN: 6326 #endif 6327 case CHIP_TOPAZ: 6328 /* chips with no display hardware */ 6329 return false; 6330 #ifdef CONFIG_DRM_AMDGPU_SI 6331 case CHIP_TAHITI: 6332 case CHIP_PITCAIRN: 6333 case CHIP_VERDE: 6334 case CHIP_OLAND: 6335 #endif 6336 #ifdef CONFIG_DRM_AMDGPU_CIK 6337 case CHIP_BONAIRE: 6338 case CHIP_HAWAII: 6339 case CHIP_KAVERI: 6340 case CHIP_KABINI: 6341 case CHIP_MULLINS: 6342 #endif 6343 case CHIP_TONGA: 6344 case CHIP_FIJI: 6345 case CHIP_POLARIS10: 6346 case CHIP_POLARIS11: 6347 case CHIP_POLARIS12: 6348 case CHIP_VEGAM: 6349 case CHIP_CARRIZO: 6350 case CHIP_STONEY: 6351 /* chips with display hardware */ 6352 return true; 6353 default: 6354 /* IP discovery */ 6355 if (!amdgpu_ip_version(adev, DCE_HWIP, 0) || 6356 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 6357 return false; 6358 return true; 6359 } 6360 } 6361 6362 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, 6363 uint32_t inst, uint32_t reg_addr, char reg_name[], 6364 uint32_t expected_value, uint32_t mask) 6365 { 6366 uint32_t ret = 0; 6367 uint32_t old_ = 0; 6368 uint32_t tmp_ = RREG32(reg_addr); 6369 uint32_t loop = adev->usec_timeout; 6370 6371 while ((tmp_ & (mask)) != (expected_value)) { 6372 if (old_ != tmp_) { 6373 loop = adev->usec_timeout; 6374 old_ = tmp_; 6375 } else 6376 udelay(1); 6377 tmp_ = RREG32(reg_addr); 6378 loop--; 6379 if (!loop) { 6380 DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", 6381 inst, reg_name, (uint32_t)expected_value, 6382 (uint32_t)(tmp_ & (mask))); 6383 ret = -ETIMEDOUT; 6384 break; 6385 } 6386 } 6387 return ret; 6388 } 6389