1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/power_supply.h> 29 #include <linux/kthread.h> 30 #include <linux/module.h> 31 #include <linux/console.h> 32 #include <linux/slab.h> 33 34 #include <drm/drm_atomic_helper.h> 35 #include <drm/drm_probe_helper.h> 36 #include <drm/amdgpu_drm.h> 37 #include <linux/vgaarb.h> 38 #include <linux/vga_switcheroo.h> 39 #include <linux/efi.h> 40 #include "amdgpu.h" 41 #include "amdgpu_trace.h" 42 #include "amdgpu_i2c.h" 43 #include "atom.h" 44 #include "amdgpu_atombios.h" 45 #include "amdgpu_atomfirmware.h" 46 #include "amd_pcie.h" 47 #ifdef CONFIG_DRM_AMDGPU_SI 48 #include "si.h" 49 #endif 50 #ifdef CONFIG_DRM_AMDGPU_CIK 51 #include "cik.h" 52 #endif 53 #include "vi.h" 54 #include "soc15.h" 55 #include "nv.h" 56 #include "bif/bif_4_1_d.h" 57 #include <linux/pci.h> 58 #include <linux/firmware.h> 59 #include "amdgpu_vf_error.h" 60 61 #include "amdgpu_amdkfd.h" 62 #include "amdgpu_pm.h" 63 64 #include "amdgpu_xgmi.h" 65 #include "amdgpu_ras.h" 66 #include "amdgpu_pmu.h" 67 #include "amdgpu_fru_eeprom.h" 68 #include "amdgpu_reset.h" 69 70 #include <linux/suspend.h> 71 #include <drm/task_barrier.h> 72 #include <linux/pm_runtime.h> 73 74 #include <drm/drm_drv.h> 75 76 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 77 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); 78 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 79 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); 80 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); 81 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); 82 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin"); 83 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); 84 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); 85 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); 86 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin"); 87 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin"); 88 89 #define AMDGPU_RESUME_MS 2000 90 91 const char *amdgpu_asic_name[] = { 92 "TAHITI", 93 "PITCAIRN", 94 "VERDE", 95 "OLAND", 96 "HAINAN", 97 "BONAIRE", 98 "KAVERI", 99 "KABINI", 100 "HAWAII", 101 "MULLINS", 102 "TOPAZ", 103 "TONGA", 104 "FIJI", 105 "CARRIZO", 106 "STONEY", 107 "POLARIS10", 108 "POLARIS11", 109 "POLARIS12", 110 "VEGAM", 111 "VEGA10", 112 "VEGA12", 113 "VEGA20", 114 "RAVEN", 115 "ARCTURUS", 116 "RENOIR", 117 "ALDEBARAN", 118 "NAVI10", 119 "CYAN_SKILLFISH", 120 "NAVI14", 121 "NAVI12", 122 "SIENNA_CICHLID", 123 "NAVY_FLOUNDER", 124 "VANGOGH", 125 "DIMGREY_CAVEFISH", 126 "BEIGE_GOBY", 127 "YELLOW_CARP", 128 "LAST", 129 }; 130 131 /** 132 * DOC: pcie_replay_count 133 * 134 * The amdgpu driver provides a sysfs API for reporting the total number 135 * of PCIe replays (NAKs) 136 * The file pcie_replay_count is used for this and returns the total 137 * number of replays as a sum of the NAKs generated and NAKs received 138 */ 139 140 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, 141 struct device_attribute *attr, char *buf) 142 { 143 struct drm_device *ddev = dev_get_drvdata(dev); 144 struct amdgpu_device *adev = drm_to_adev(ddev); 145 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); 146 147 return sysfs_emit(buf, "%llu\n", cnt); 148 } 149 150 static DEVICE_ATTR(pcie_replay_count, S_IRUGO, 151 amdgpu_device_get_pcie_replay_count, NULL); 152 153 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); 154 155 /** 156 * DOC: product_name 157 * 158 * The amdgpu driver provides a sysfs API for reporting the product name 159 * for the device 160 * The file serial_number is used for this and returns the product name 161 * as returned from the FRU. 162 * NOTE: This is only available for certain server cards 163 */ 164 165 static ssize_t amdgpu_device_get_product_name(struct device *dev, 166 struct device_attribute *attr, char *buf) 167 { 168 struct drm_device *ddev = dev_get_drvdata(dev); 169 struct amdgpu_device *adev = drm_to_adev(ddev); 170 171 return sysfs_emit(buf, "%s\n", adev->product_name); 172 } 173 174 static DEVICE_ATTR(product_name, S_IRUGO, 175 amdgpu_device_get_product_name, NULL); 176 177 /** 178 * DOC: product_number 179 * 180 * The amdgpu driver provides a sysfs API for reporting the part number 181 * for the device 182 * The file serial_number is used for this and returns the part number 183 * as returned from the FRU. 184 * NOTE: This is only available for certain server cards 185 */ 186 187 static ssize_t amdgpu_device_get_product_number(struct device *dev, 188 struct device_attribute *attr, char *buf) 189 { 190 struct drm_device *ddev = dev_get_drvdata(dev); 191 struct amdgpu_device *adev = drm_to_adev(ddev); 192 193 return sysfs_emit(buf, "%s\n", adev->product_number); 194 } 195 196 static DEVICE_ATTR(product_number, S_IRUGO, 197 amdgpu_device_get_product_number, NULL); 198 199 /** 200 * DOC: serial_number 201 * 202 * The amdgpu driver provides a sysfs API for reporting the serial number 203 * for the device 204 * The file serial_number is used for this and returns the serial number 205 * as returned from the FRU. 206 * NOTE: This is only available for certain server cards 207 */ 208 209 static ssize_t amdgpu_device_get_serial_number(struct device *dev, 210 struct device_attribute *attr, char *buf) 211 { 212 struct drm_device *ddev = dev_get_drvdata(dev); 213 struct amdgpu_device *adev = drm_to_adev(ddev); 214 215 return sysfs_emit(buf, "%s\n", adev->serial); 216 } 217 218 static DEVICE_ATTR(serial_number, S_IRUGO, 219 amdgpu_device_get_serial_number, NULL); 220 221 /** 222 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control 223 * 224 * @dev: drm_device pointer 225 * 226 * Returns true if the device is a dGPU with ATPX power control, 227 * otherwise return false. 228 */ 229 bool amdgpu_device_supports_px(struct drm_device *dev) 230 { 231 struct amdgpu_device *adev = drm_to_adev(dev); 232 233 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) 234 return true; 235 return false; 236 } 237 238 /** 239 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources 240 * 241 * @dev: drm_device pointer 242 * 243 * Returns true if the device is a dGPU with ACPI power control, 244 * otherwise return false. 245 */ 246 bool amdgpu_device_supports_boco(struct drm_device *dev) 247 { 248 struct amdgpu_device *adev = drm_to_adev(dev); 249 250 if (adev->has_pr3 || 251 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) 252 return true; 253 return false; 254 } 255 256 /** 257 * amdgpu_device_supports_baco - Does the device support BACO 258 * 259 * @dev: drm_device pointer 260 * 261 * Returns true if the device supporte BACO, 262 * otherwise return false. 263 */ 264 bool amdgpu_device_supports_baco(struct drm_device *dev) 265 { 266 struct amdgpu_device *adev = drm_to_adev(dev); 267 268 return amdgpu_asic_supports_baco(adev); 269 } 270 271 /** 272 * amdgpu_device_supports_smart_shift - Is the device dGPU with 273 * smart shift support 274 * 275 * @dev: drm_device pointer 276 * 277 * Returns true if the device is a dGPU with Smart Shift support, 278 * otherwise returns false. 279 */ 280 bool amdgpu_device_supports_smart_shift(struct drm_device *dev) 281 { 282 return (amdgpu_device_supports_boco(dev) && 283 amdgpu_acpi_is_power_shift_control_supported()); 284 } 285 286 /* 287 * VRAM access helper functions 288 */ 289 290 /** 291 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA 292 * 293 * @adev: amdgpu_device pointer 294 * @pos: offset of the buffer in vram 295 * @buf: virtual address of the buffer in system memory 296 * @size: read/write size, sizeof(@buf) must > @size 297 * @write: true - write to vram, otherwise - read from vram 298 */ 299 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, 300 void *buf, size_t size, bool write) 301 { 302 unsigned long flags; 303 uint32_t hi = ~0, tmp = 0; 304 uint32_t *data = buf; 305 uint64_t last; 306 int idx; 307 308 if (!drm_dev_enter(&adev->ddev, &idx)) 309 return; 310 311 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4)); 312 313 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 314 for (last = pos + size; pos < last; pos += 4) { 315 tmp = pos >> 31; 316 317 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); 318 if (tmp != hi) { 319 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); 320 hi = tmp; 321 } 322 if (write) 323 WREG32_NO_KIQ(mmMM_DATA, *data++); 324 else 325 *data++ = RREG32_NO_KIQ(mmMM_DATA); 326 } 327 328 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 329 drm_dev_exit(idx); 330 } 331 332 /** 333 * amdgpu_device_vram_access - access vram by vram aperature 334 * 335 * @adev: amdgpu_device pointer 336 * @pos: offset of the buffer in vram 337 * @buf: virtual address of the buffer in system memory 338 * @size: read/write size, sizeof(@buf) must > @size 339 * @write: true - write to vram, otherwise - read from vram 340 * 341 * The return value means how many bytes have been transferred. 342 */ 343 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, 344 void *buf, size_t size, bool write) 345 { 346 #ifdef CONFIG_64BIT 347 void __iomem *addr; 348 size_t count = 0; 349 uint64_t last; 350 351 if (!adev->mman.aper_base_kaddr) 352 return 0; 353 354 last = min(pos + size, adev->gmc.visible_vram_size); 355 if (last > pos) { 356 addr = adev->mman.aper_base_kaddr + pos; 357 count = last - pos; 358 359 if (write) { 360 memcpy_toio(addr, buf, count); 361 mb(); 362 amdgpu_device_flush_hdp(adev, NULL); 363 } else { 364 amdgpu_device_invalidate_hdp(adev, NULL); 365 mb(); 366 memcpy_fromio(buf, addr, count); 367 } 368 369 } 370 371 return count; 372 #else 373 return 0; 374 #endif 375 } 376 377 /** 378 * amdgpu_device_vram_access - read/write a buffer in vram 379 * 380 * @adev: amdgpu_device pointer 381 * @pos: offset of the buffer in vram 382 * @buf: virtual address of the buffer in system memory 383 * @size: read/write size, sizeof(@buf) must > @size 384 * @write: true - write to vram, otherwise - read from vram 385 */ 386 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 387 void *buf, size_t size, bool write) 388 { 389 size_t count; 390 391 /* try to using vram apreature to access vram first */ 392 count = amdgpu_device_aper_access(adev, pos, buf, size, write); 393 size -= count; 394 if (size) { 395 /* using MM to access rest vram */ 396 pos += count; 397 buf += count; 398 amdgpu_device_mm_access(adev, pos, buf, size, write); 399 } 400 } 401 402 /* 403 * register access helper functions. 404 */ 405 406 /* Check if hw access should be skipped because of hotplug or device error */ 407 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) 408 { 409 if (adev->no_hw_access) 410 return true; 411 412 #ifdef CONFIG_LOCKDEP 413 /* 414 * This is a bit complicated to understand, so worth a comment. What we assert 415 * here is that the GPU reset is not running on another thread in parallel. 416 * 417 * For this we trylock the read side of the reset semaphore, if that succeeds 418 * we know that the reset is not running in paralell. 419 * 420 * If the trylock fails we assert that we are either already holding the read 421 * side of the lock or are the reset thread itself and hold the write side of 422 * the lock. 423 */ 424 if (in_task()) { 425 if (down_read_trylock(&adev->reset_sem)) 426 up_read(&adev->reset_sem); 427 else 428 lockdep_assert_held(&adev->reset_sem); 429 } 430 #endif 431 return false; 432 } 433 434 /** 435 * amdgpu_device_rreg - read a memory mapped IO or indirect register 436 * 437 * @adev: amdgpu_device pointer 438 * @reg: dword aligned register offset 439 * @acc_flags: access flags which require special behavior 440 * 441 * Returns the 32 bit value from the offset specified. 442 */ 443 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, 444 uint32_t reg, uint32_t acc_flags) 445 { 446 uint32_t ret; 447 448 if (amdgpu_device_skip_hw_access(adev)) 449 return 0; 450 451 if ((reg * 4) < adev->rmmio_size) { 452 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 453 amdgpu_sriov_runtime(adev) && 454 down_read_trylock(&adev->reset_sem)) { 455 ret = amdgpu_kiq_rreg(adev, reg); 456 up_read(&adev->reset_sem); 457 } else { 458 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 459 } 460 } else { 461 ret = adev->pcie_rreg(adev, reg * 4); 462 } 463 464 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); 465 466 return ret; 467 } 468 469 /* 470 * MMIO register read with bytes helper functions 471 * @offset:bytes offset from MMIO start 472 * 473 */ 474 475 /** 476 * amdgpu_mm_rreg8 - read a memory mapped IO register 477 * 478 * @adev: amdgpu_device pointer 479 * @offset: byte aligned register offset 480 * 481 * Returns the 8 bit value from the offset specified. 482 */ 483 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) 484 { 485 if (amdgpu_device_skip_hw_access(adev)) 486 return 0; 487 488 if (offset < adev->rmmio_size) 489 return (readb(adev->rmmio + offset)); 490 BUG(); 491 } 492 493 /* 494 * MMIO register write with bytes helper functions 495 * @offset:bytes offset from MMIO start 496 * @value: the value want to be written to the register 497 * 498 */ 499 /** 500 * amdgpu_mm_wreg8 - read a memory mapped IO register 501 * 502 * @adev: amdgpu_device pointer 503 * @offset: byte aligned register offset 504 * @value: 8 bit value to write 505 * 506 * Writes the value specified to the offset specified. 507 */ 508 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) 509 { 510 if (amdgpu_device_skip_hw_access(adev)) 511 return; 512 513 if (offset < adev->rmmio_size) 514 writeb(value, adev->rmmio + offset); 515 else 516 BUG(); 517 } 518 519 /** 520 * amdgpu_device_wreg - write to a memory mapped IO or indirect register 521 * 522 * @adev: amdgpu_device pointer 523 * @reg: dword aligned register offset 524 * @v: 32 bit value to write to the register 525 * @acc_flags: access flags which require special behavior 526 * 527 * Writes the value specified to the offset specified. 528 */ 529 void amdgpu_device_wreg(struct amdgpu_device *adev, 530 uint32_t reg, uint32_t v, 531 uint32_t acc_flags) 532 { 533 if (amdgpu_device_skip_hw_access(adev)) 534 return; 535 536 if ((reg * 4) < adev->rmmio_size) { 537 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 538 amdgpu_sriov_runtime(adev) && 539 down_read_trylock(&adev->reset_sem)) { 540 amdgpu_kiq_wreg(adev, reg, v); 541 up_read(&adev->reset_sem); 542 } else { 543 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 544 } 545 } else { 546 adev->pcie_wreg(adev, reg * 4, v); 547 } 548 549 trace_amdgpu_device_wreg(adev->pdev->device, reg, v); 550 } 551 552 /* 553 * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range 554 * 555 * this function is invoked only the debugfs register access 556 * */ 557 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, 558 uint32_t reg, uint32_t v) 559 { 560 if (amdgpu_device_skip_hw_access(adev)) 561 return; 562 563 if (amdgpu_sriov_fullaccess(adev) && 564 adev->gfx.rlc.funcs && 565 adev->gfx.rlc.funcs->is_rlcg_access_range) { 566 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) 567 return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0); 568 } else { 569 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 570 } 571 } 572 573 /** 574 * amdgpu_mm_rdoorbell - read a doorbell dword 575 * 576 * @adev: amdgpu_device pointer 577 * @index: doorbell index 578 * 579 * Returns the value in the doorbell aperture at the 580 * requested doorbell index (CIK). 581 */ 582 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) 583 { 584 if (amdgpu_device_skip_hw_access(adev)) 585 return 0; 586 587 if (index < adev->doorbell.num_doorbells) { 588 return readl(adev->doorbell.ptr + index); 589 } else { 590 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 591 return 0; 592 } 593 } 594 595 /** 596 * amdgpu_mm_wdoorbell - write a doorbell dword 597 * 598 * @adev: amdgpu_device pointer 599 * @index: doorbell index 600 * @v: value to write 601 * 602 * Writes @v to the doorbell aperture at the 603 * requested doorbell index (CIK). 604 */ 605 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) 606 { 607 if (amdgpu_device_skip_hw_access(adev)) 608 return; 609 610 if (index < adev->doorbell.num_doorbells) { 611 writel(v, adev->doorbell.ptr + index); 612 } else { 613 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 614 } 615 } 616 617 /** 618 * amdgpu_mm_rdoorbell64 - read a doorbell Qword 619 * 620 * @adev: amdgpu_device pointer 621 * @index: doorbell index 622 * 623 * Returns the value in the doorbell aperture at the 624 * requested doorbell index (VEGA10+). 625 */ 626 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) 627 { 628 if (amdgpu_device_skip_hw_access(adev)) 629 return 0; 630 631 if (index < adev->doorbell.num_doorbells) { 632 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); 633 } else { 634 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 635 return 0; 636 } 637 } 638 639 /** 640 * amdgpu_mm_wdoorbell64 - write a doorbell Qword 641 * 642 * @adev: amdgpu_device pointer 643 * @index: doorbell index 644 * @v: value to write 645 * 646 * Writes @v to the doorbell aperture at the 647 * requested doorbell index (VEGA10+). 648 */ 649 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) 650 { 651 if (amdgpu_device_skip_hw_access(adev)) 652 return; 653 654 if (index < adev->doorbell.num_doorbells) { 655 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); 656 } else { 657 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 658 } 659 } 660 661 /** 662 * amdgpu_device_indirect_rreg - read an indirect register 663 * 664 * @adev: amdgpu_device pointer 665 * @pcie_index: mmio register offset 666 * @pcie_data: mmio register offset 667 * @reg_addr: indirect register address to read from 668 * 669 * Returns the value of indirect register @reg_addr 670 */ 671 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, 672 u32 pcie_index, u32 pcie_data, 673 u32 reg_addr) 674 { 675 unsigned long flags; 676 u32 r; 677 void __iomem *pcie_index_offset; 678 void __iomem *pcie_data_offset; 679 680 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 681 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 682 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 683 684 writel(reg_addr, pcie_index_offset); 685 readl(pcie_index_offset); 686 r = readl(pcie_data_offset); 687 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 688 689 return r; 690 } 691 692 /** 693 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register 694 * 695 * @adev: amdgpu_device pointer 696 * @pcie_index: mmio register offset 697 * @pcie_data: mmio register offset 698 * @reg_addr: indirect register address to read from 699 * 700 * Returns the value of indirect register @reg_addr 701 */ 702 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, 703 u32 pcie_index, u32 pcie_data, 704 u32 reg_addr) 705 { 706 unsigned long flags; 707 u64 r; 708 void __iomem *pcie_index_offset; 709 void __iomem *pcie_data_offset; 710 711 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 712 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 713 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 714 715 /* read low 32 bits */ 716 writel(reg_addr, pcie_index_offset); 717 readl(pcie_index_offset); 718 r = readl(pcie_data_offset); 719 /* read high 32 bits */ 720 writel(reg_addr + 4, pcie_index_offset); 721 readl(pcie_index_offset); 722 r |= ((u64)readl(pcie_data_offset) << 32); 723 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 724 725 return r; 726 } 727 728 /** 729 * amdgpu_device_indirect_wreg - write an indirect register address 730 * 731 * @adev: amdgpu_device pointer 732 * @pcie_index: mmio register offset 733 * @pcie_data: mmio register offset 734 * @reg_addr: indirect register offset 735 * @reg_data: indirect register data 736 * 737 */ 738 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, 739 u32 pcie_index, u32 pcie_data, 740 u32 reg_addr, u32 reg_data) 741 { 742 unsigned long flags; 743 void __iomem *pcie_index_offset; 744 void __iomem *pcie_data_offset; 745 746 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 747 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 748 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 749 750 writel(reg_addr, pcie_index_offset); 751 readl(pcie_index_offset); 752 writel(reg_data, pcie_data_offset); 753 readl(pcie_data_offset); 754 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 755 } 756 757 /** 758 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address 759 * 760 * @adev: amdgpu_device pointer 761 * @pcie_index: mmio register offset 762 * @pcie_data: mmio register offset 763 * @reg_addr: indirect register offset 764 * @reg_data: indirect register data 765 * 766 */ 767 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, 768 u32 pcie_index, u32 pcie_data, 769 u32 reg_addr, u64 reg_data) 770 { 771 unsigned long flags; 772 void __iomem *pcie_index_offset; 773 void __iomem *pcie_data_offset; 774 775 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 776 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 777 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 778 779 /* write low 32 bits */ 780 writel(reg_addr, pcie_index_offset); 781 readl(pcie_index_offset); 782 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset); 783 readl(pcie_data_offset); 784 /* write high 32 bits */ 785 writel(reg_addr + 4, pcie_index_offset); 786 readl(pcie_index_offset); 787 writel((u32)(reg_data >> 32), pcie_data_offset); 788 readl(pcie_data_offset); 789 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 790 } 791 792 /** 793 * amdgpu_invalid_rreg - dummy reg read function 794 * 795 * @adev: amdgpu_device pointer 796 * @reg: offset of register 797 * 798 * Dummy register read function. Used for register blocks 799 * that certain asics don't have (all asics). 800 * Returns the value in the register. 801 */ 802 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 803 { 804 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 805 BUG(); 806 return 0; 807 } 808 809 /** 810 * amdgpu_invalid_wreg - dummy reg write function 811 * 812 * @adev: amdgpu_device pointer 813 * @reg: offset of register 814 * @v: value to write to the register 815 * 816 * Dummy register read function. Used for register blocks 817 * that certain asics don't have (all asics). 818 */ 819 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 820 { 821 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 822 reg, v); 823 BUG(); 824 } 825 826 /** 827 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function 828 * 829 * @adev: amdgpu_device pointer 830 * @reg: offset of register 831 * 832 * Dummy register read function. Used for register blocks 833 * that certain asics don't have (all asics). 834 * Returns the value in the register. 835 */ 836 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) 837 { 838 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); 839 BUG(); 840 return 0; 841 } 842 843 /** 844 * amdgpu_invalid_wreg64 - dummy reg write function 845 * 846 * @adev: amdgpu_device pointer 847 * @reg: offset of register 848 * @v: value to write to the register 849 * 850 * Dummy register read function. Used for register blocks 851 * that certain asics don't have (all asics). 852 */ 853 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) 854 { 855 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", 856 reg, v); 857 BUG(); 858 } 859 860 /** 861 * amdgpu_block_invalid_rreg - dummy reg read function 862 * 863 * @adev: amdgpu_device pointer 864 * @block: offset of instance 865 * @reg: offset of register 866 * 867 * Dummy register read function. Used for register blocks 868 * that certain asics don't have (all asics). 869 * Returns the value in the register. 870 */ 871 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 872 uint32_t block, uint32_t reg) 873 { 874 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 875 reg, block); 876 BUG(); 877 return 0; 878 } 879 880 /** 881 * amdgpu_block_invalid_wreg - dummy reg write function 882 * 883 * @adev: amdgpu_device pointer 884 * @block: offset of instance 885 * @reg: offset of register 886 * @v: value to write to the register 887 * 888 * Dummy register read function. Used for register blocks 889 * that certain asics don't have (all asics). 890 */ 891 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, 892 uint32_t block, 893 uint32_t reg, uint32_t v) 894 { 895 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 896 reg, block, v); 897 BUG(); 898 } 899 900 /** 901 * amdgpu_device_asic_init - Wrapper for atom asic_init 902 * 903 * @adev: amdgpu_device pointer 904 * 905 * Does any asic specific work and then calls atom asic init. 906 */ 907 static int amdgpu_device_asic_init(struct amdgpu_device *adev) 908 { 909 amdgpu_asic_pre_asic_init(adev); 910 911 return amdgpu_atom_asic_init(adev->mode_info.atom_context); 912 } 913 914 /** 915 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page 916 * 917 * @adev: amdgpu_device pointer 918 * 919 * Allocates a scratch page of VRAM for use by various things in the 920 * driver. 921 */ 922 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) 923 { 924 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, 925 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 926 &adev->vram_scratch.robj, 927 &adev->vram_scratch.gpu_addr, 928 (void **)&adev->vram_scratch.ptr); 929 } 930 931 /** 932 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page 933 * 934 * @adev: amdgpu_device pointer 935 * 936 * Frees the VRAM scratch page. 937 */ 938 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) 939 { 940 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); 941 } 942 943 /** 944 * amdgpu_device_program_register_sequence - program an array of registers. 945 * 946 * @adev: amdgpu_device pointer 947 * @registers: pointer to the register array 948 * @array_size: size of the register array 949 * 950 * Programs an array or registers with and and or masks. 951 * This is a helper for setting golden registers. 952 */ 953 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 954 const u32 *registers, 955 const u32 array_size) 956 { 957 u32 tmp, reg, and_mask, or_mask; 958 int i; 959 960 if (array_size % 3) 961 return; 962 963 for (i = 0; i < array_size; i +=3) { 964 reg = registers[i + 0]; 965 and_mask = registers[i + 1]; 966 or_mask = registers[i + 2]; 967 968 if (and_mask == 0xffffffff) { 969 tmp = or_mask; 970 } else { 971 tmp = RREG32(reg); 972 tmp &= ~and_mask; 973 if (adev->family >= AMDGPU_FAMILY_AI) 974 tmp |= (or_mask & and_mask); 975 else 976 tmp |= or_mask; 977 } 978 WREG32(reg, tmp); 979 } 980 } 981 982 /** 983 * amdgpu_device_pci_config_reset - reset the GPU 984 * 985 * @adev: amdgpu_device pointer 986 * 987 * Resets the GPU using the pci config reset sequence. 988 * Only applicable to asics prior to vega10. 989 */ 990 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) 991 { 992 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 993 } 994 995 /** 996 * amdgpu_device_pci_reset - reset the GPU using generic PCI means 997 * 998 * @adev: amdgpu_device pointer 999 * 1000 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.). 1001 */ 1002 int amdgpu_device_pci_reset(struct amdgpu_device *adev) 1003 { 1004 return pci_reset_function(adev->pdev); 1005 } 1006 1007 /* 1008 * GPU doorbell aperture helpers function. 1009 */ 1010 /** 1011 * amdgpu_device_doorbell_init - Init doorbell driver information. 1012 * 1013 * @adev: amdgpu_device pointer 1014 * 1015 * Init doorbell driver information (CIK) 1016 * Returns 0 on success, error on failure. 1017 */ 1018 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) 1019 { 1020 1021 /* No doorbell on SI hardware generation */ 1022 if (adev->asic_type < CHIP_BONAIRE) { 1023 adev->doorbell.base = 0; 1024 adev->doorbell.size = 0; 1025 adev->doorbell.num_doorbells = 0; 1026 adev->doorbell.ptr = NULL; 1027 return 0; 1028 } 1029 1030 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) 1031 return -EINVAL; 1032 1033 amdgpu_asic_init_doorbell_index(adev); 1034 1035 /* doorbell bar mapping */ 1036 adev->doorbell.base = pci_resource_start(adev->pdev, 2); 1037 adev->doorbell.size = pci_resource_len(adev->pdev, 2); 1038 1039 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), 1040 adev->doorbell_index.max_assignment+1); 1041 if (adev->doorbell.num_doorbells == 0) 1042 return -EINVAL; 1043 1044 /* For Vega, reserve and map two pages on doorbell BAR since SDMA 1045 * paging queue doorbell use the second page. The 1046 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the 1047 * doorbells are in the first page. So with paging queue enabled, 1048 * the max num_doorbells should + 1 page (0x400 in dword) 1049 */ 1050 if (adev->asic_type >= CHIP_VEGA10) 1051 adev->doorbell.num_doorbells += 0x400; 1052 1053 adev->doorbell.ptr = ioremap(adev->doorbell.base, 1054 adev->doorbell.num_doorbells * 1055 sizeof(u32)); 1056 if (adev->doorbell.ptr == NULL) 1057 return -ENOMEM; 1058 1059 return 0; 1060 } 1061 1062 /** 1063 * amdgpu_device_doorbell_fini - Tear down doorbell driver information. 1064 * 1065 * @adev: amdgpu_device pointer 1066 * 1067 * Tear down doorbell driver information (CIK) 1068 */ 1069 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) 1070 { 1071 iounmap(adev->doorbell.ptr); 1072 adev->doorbell.ptr = NULL; 1073 } 1074 1075 1076 1077 /* 1078 * amdgpu_device_wb_*() 1079 * Writeback is the method by which the GPU updates special pages in memory 1080 * with the status of certain GPU events (fences, ring pointers,etc.). 1081 */ 1082 1083 /** 1084 * amdgpu_device_wb_fini - Disable Writeback and free memory 1085 * 1086 * @adev: amdgpu_device pointer 1087 * 1088 * Disables Writeback and frees the Writeback memory (all asics). 1089 * Used at driver shutdown. 1090 */ 1091 static void amdgpu_device_wb_fini(struct amdgpu_device *adev) 1092 { 1093 if (adev->wb.wb_obj) { 1094 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 1095 &adev->wb.gpu_addr, 1096 (void **)&adev->wb.wb); 1097 adev->wb.wb_obj = NULL; 1098 } 1099 } 1100 1101 /** 1102 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory 1103 * 1104 * @adev: amdgpu_device pointer 1105 * 1106 * Initializes writeback and allocates writeback memory (all asics). 1107 * Used at driver startup. 1108 * Returns 0 on success or an -error on failure. 1109 */ 1110 static int amdgpu_device_wb_init(struct amdgpu_device *adev) 1111 { 1112 int r; 1113 1114 if (adev->wb.wb_obj == NULL) { 1115 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 1116 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 1117 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1118 &adev->wb.wb_obj, &adev->wb.gpu_addr, 1119 (void **)&adev->wb.wb); 1120 if (r) { 1121 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 1122 return r; 1123 } 1124 1125 adev->wb.num_wb = AMDGPU_MAX_WB; 1126 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 1127 1128 /* clear wb memory */ 1129 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); 1130 } 1131 1132 return 0; 1133 } 1134 1135 /** 1136 * amdgpu_device_wb_get - Allocate a wb entry 1137 * 1138 * @adev: amdgpu_device pointer 1139 * @wb: wb index 1140 * 1141 * Allocate a wb slot for use by the driver (all asics). 1142 * Returns 0 on success or -EINVAL on failure. 1143 */ 1144 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) 1145 { 1146 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 1147 1148 if (offset < adev->wb.num_wb) { 1149 __set_bit(offset, adev->wb.used); 1150 *wb = offset << 3; /* convert to dw offset */ 1151 return 0; 1152 } else { 1153 return -EINVAL; 1154 } 1155 } 1156 1157 /** 1158 * amdgpu_device_wb_free - Free a wb entry 1159 * 1160 * @adev: amdgpu_device pointer 1161 * @wb: wb index 1162 * 1163 * Free a wb slot allocated for use by the driver (all asics) 1164 */ 1165 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 1166 { 1167 wb >>= 3; 1168 if (wb < adev->wb.num_wb) 1169 __clear_bit(wb, adev->wb.used); 1170 } 1171 1172 /** 1173 * amdgpu_device_resize_fb_bar - try to resize FB BAR 1174 * 1175 * @adev: amdgpu_device pointer 1176 * 1177 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not 1178 * to fail, but if any of the BARs is not accessible after the size we abort 1179 * driver loading by returning -ENODEV. 1180 */ 1181 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) 1182 { 1183 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); 1184 struct pci_bus *root; 1185 struct resource *res; 1186 unsigned i; 1187 u16 cmd; 1188 int r; 1189 1190 /* Bypass for VF */ 1191 if (amdgpu_sriov_vf(adev)) 1192 return 0; 1193 1194 /* skip if the bios has already enabled large BAR */ 1195 if (adev->gmc.real_vram_size && 1196 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) 1197 return 0; 1198 1199 /* Check if the root BUS has 64bit memory resources */ 1200 root = adev->pdev->bus; 1201 while (root->parent) 1202 root = root->parent; 1203 1204 pci_bus_for_each_resource(root, res, i) { 1205 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && 1206 res->start > 0x100000000ull) 1207 break; 1208 } 1209 1210 /* Trying to resize is pointless without a root hub window above 4GB */ 1211 if (!res) 1212 return 0; 1213 1214 /* Limit the BAR size to what is available */ 1215 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, 1216 rbar_size); 1217 1218 /* Disable memory decoding while we change the BAR addresses and size */ 1219 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); 1220 pci_write_config_word(adev->pdev, PCI_COMMAND, 1221 cmd & ~PCI_COMMAND_MEMORY); 1222 1223 /* Free the VRAM and doorbell BAR, we most likely need to move both. */ 1224 amdgpu_device_doorbell_fini(adev); 1225 if (adev->asic_type >= CHIP_BONAIRE) 1226 pci_release_resource(adev->pdev, 2); 1227 1228 pci_release_resource(adev->pdev, 0); 1229 1230 r = pci_resize_resource(adev->pdev, 0, rbar_size); 1231 if (r == -ENOSPC) 1232 DRM_INFO("Not enough PCI address space for a large BAR."); 1233 else if (r && r != -ENOTSUPP) 1234 DRM_ERROR("Problem resizing BAR0 (%d).", r); 1235 1236 pci_assign_unassigned_bus_resources(adev->pdev->bus); 1237 1238 /* When the doorbell or fb BAR isn't available we have no chance of 1239 * using the device. 1240 */ 1241 r = amdgpu_device_doorbell_init(adev); 1242 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) 1243 return -ENODEV; 1244 1245 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); 1246 1247 return 0; 1248 } 1249 1250 /* 1251 * GPU helpers function. 1252 */ 1253 /** 1254 * amdgpu_device_need_post - check if the hw need post or not 1255 * 1256 * @adev: amdgpu_device pointer 1257 * 1258 * Check if the asic has been initialized (all asics) at driver startup 1259 * or post is needed if hw reset is performed. 1260 * Returns true if need or false if not. 1261 */ 1262 bool amdgpu_device_need_post(struct amdgpu_device *adev) 1263 { 1264 uint32_t reg; 1265 1266 if (amdgpu_sriov_vf(adev)) 1267 return false; 1268 1269 if (amdgpu_passthrough(adev)) { 1270 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 1271 * some old smc fw still need driver do vPost otherwise gpu hang, while 1272 * those smc fw version above 22.15 doesn't have this flaw, so we force 1273 * vpost executed for smc version below 22.15 1274 */ 1275 if (adev->asic_type == CHIP_FIJI) { 1276 int err; 1277 uint32_t fw_ver; 1278 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 1279 /* force vPost if error occured */ 1280 if (err) 1281 return true; 1282 1283 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 1284 if (fw_ver < 0x00160e00) 1285 return true; 1286 } 1287 } 1288 1289 /* Don't post if we need to reset whole hive on init */ 1290 if (adev->gmc.xgmi.pending_reset) 1291 return false; 1292 1293 if (adev->has_hw_reset) { 1294 adev->has_hw_reset = false; 1295 return true; 1296 } 1297 1298 /* bios scratch used on CIK+ */ 1299 if (adev->asic_type >= CHIP_BONAIRE) 1300 return amdgpu_atombios_scratch_need_asic_init(adev); 1301 1302 /* check MEM_SIZE for older asics */ 1303 reg = amdgpu_asic_get_config_memsize(adev); 1304 1305 if ((reg != 0) && (reg != 0xffffffff)) 1306 return false; 1307 1308 return true; 1309 } 1310 1311 /* if we get transitioned to only one device, take VGA back */ 1312 /** 1313 * amdgpu_device_vga_set_decode - enable/disable vga decode 1314 * 1315 * @pdev: PCI device pointer 1316 * @state: enable/disable vga decode 1317 * 1318 * Enable/disable vga decode (all asics). 1319 * Returns VGA resource flags. 1320 */ 1321 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, 1322 bool state) 1323 { 1324 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); 1325 amdgpu_asic_set_vga_state(adev, state); 1326 if (state) 1327 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1328 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1329 else 1330 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1331 } 1332 1333 /** 1334 * amdgpu_device_check_block_size - validate the vm block size 1335 * 1336 * @adev: amdgpu_device pointer 1337 * 1338 * Validates the vm block size specified via module parameter. 1339 * The vm block size defines number of bits in page table versus page directory, 1340 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1341 * page table and the remaining bits are in the page directory. 1342 */ 1343 static void amdgpu_device_check_block_size(struct amdgpu_device *adev) 1344 { 1345 /* defines number of bits in page table versus page directory, 1346 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1347 * page table and the remaining bits are in the page directory */ 1348 if (amdgpu_vm_block_size == -1) 1349 return; 1350 1351 if (amdgpu_vm_block_size < 9) { 1352 dev_warn(adev->dev, "VM page table size (%d) too small\n", 1353 amdgpu_vm_block_size); 1354 amdgpu_vm_block_size = -1; 1355 } 1356 } 1357 1358 /** 1359 * amdgpu_device_check_vm_size - validate the vm size 1360 * 1361 * @adev: amdgpu_device pointer 1362 * 1363 * Validates the vm size in GB specified via module parameter. 1364 * The VM size is the size of the GPU virtual memory space in GB. 1365 */ 1366 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) 1367 { 1368 /* no need to check the default value */ 1369 if (amdgpu_vm_size == -1) 1370 return; 1371 1372 if (amdgpu_vm_size < 1) { 1373 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 1374 amdgpu_vm_size); 1375 amdgpu_vm_size = -1; 1376 } 1377 } 1378 1379 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) 1380 { 1381 struct sysinfo si; 1382 bool is_os_64 = (sizeof(void *) == 8); 1383 uint64_t total_memory; 1384 uint64_t dram_size_seven_GB = 0x1B8000000; 1385 uint64_t dram_size_three_GB = 0xB8000000; 1386 1387 if (amdgpu_smu_memory_pool_size == 0) 1388 return; 1389 1390 if (!is_os_64) { 1391 DRM_WARN("Not 64-bit OS, feature not supported\n"); 1392 goto def_value; 1393 } 1394 si_meminfo(&si); 1395 total_memory = (uint64_t)si.totalram * si.mem_unit; 1396 1397 if ((amdgpu_smu_memory_pool_size == 1) || 1398 (amdgpu_smu_memory_pool_size == 2)) { 1399 if (total_memory < dram_size_three_GB) 1400 goto def_value1; 1401 } else if ((amdgpu_smu_memory_pool_size == 4) || 1402 (amdgpu_smu_memory_pool_size == 8)) { 1403 if (total_memory < dram_size_seven_GB) 1404 goto def_value1; 1405 } else { 1406 DRM_WARN("Smu memory pool size not supported\n"); 1407 goto def_value; 1408 } 1409 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; 1410 1411 return; 1412 1413 def_value1: 1414 DRM_WARN("No enough system memory\n"); 1415 def_value: 1416 adev->pm.smu_prv_buffer_size = 0; 1417 } 1418 1419 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) 1420 { 1421 if (!(adev->flags & AMD_IS_APU) || 1422 adev->asic_type < CHIP_RAVEN) 1423 return 0; 1424 1425 switch (adev->asic_type) { 1426 case CHIP_RAVEN: 1427 if (adev->pdev->device == 0x15dd) 1428 adev->apu_flags |= AMD_APU_IS_RAVEN; 1429 if (adev->pdev->device == 0x15d8) 1430 adev->apu_flags |= AMD_APU_IS_PICASSO; 1431 break; 1432 case CHIP_RENOIR: 1433 if ((adev->pdev->device == 0x1636) || 1434 (adev->pdev->device == 0x164c)) 1435 adev->apu_flags |= AMD_APU_IS_RENOIR; 1436 else 1437 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; 1438 break; 1439 case CHIP_VANGOGH: 1440 adev->apu_flags |= AMD_APU_IS_VANGOGH; 1441 break; 1442 case CHIP_YELLOW_CARP: 1443 break; 1444 case CHIP_CYAN_SKILLFISH: 1445 if (adev->pdev->device == 0x13FE) 1446 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; 1447 break; 1448 default: 1449 return -EINVAL; 1450 } 1451 1452 return 0; 1453 } 1454 1455 /** 1456 * amdgpu_device_check_arguments - validate module params 1457 * 1458 * @adev: amdgpu_device pointer 1459 * 1460 * Validates certain module parameters and updates 1461 * the associated values used by the driver (all asics). 1462 */ 1463 static int amdgpu_device_check_arguments(struct amdgpu_device *adev) 1464 { 1465 if (amdgpu_sched_jobs < 4) { 1466 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1467 amdgpu_sched_jobs); 1468 amdgpu_sched_jobs = 4; 1469 } else if (!is_power_of_2(amdgpu_sched_jobs)){ 1470 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 1471 amdgpu_sched_jobs); 1472 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1473 } 1474 1475 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1476 /* gart size must be greater or equal to 32M */ 1477 dev_warn(adev->dev, "gart size (%d) too small\n", 1478 amdgpu_gart_size); 1479 amdgpu_gart_size = -1; 1480 } 1481 1482 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 1483 /* gtt size must be greater or equal to 32M */ 1484 dev_warn(adev->dev, "gtt size (%d) too small\n", 1485 amdgpu_gtt_size); 1486 amdgpu_gtt_size = -1; 1487 } 1488 1489 /* valid range is between 4 and 9 inclusive */ 1490 if (amdgpu_vm_fragment_size != -1 && 1491 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 1492 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 1493 amdgpu_vm_fragment_size = -1; 1494 } 1495 1496 if (amdgpu_sched_hw_submission < 2) { 1497 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", 1498 amdgpu_sched_hw_submission); 1499 amdgpu_sched_hw_submission = 2; 1500 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) { 1501 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", 1502 amdgpu_sched_hw_submission); 1503 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission); 1504 } 1505 1506 amdgpu_device_check_smu_prv_buffer_size(adev); 1507 1508 amdgpu_device_check_vm_size(adev); 1509 1510 amdgpu_device_check_block_size(adev); 1511 1512 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1513 1514 amdgpu_gmc_tmz_set(adev); 1515 1516 amdgpu_gmc_noretry_set(adev); 1517 1518 return 0; 1519 } 1520 1521 /** 1522 * amdgpu_switcheroo_set_state - set switcheroo state 1523 * 1524 * @pdev: pci dev pointer 1525 * @state: vga_switcheroo state 1526 * 1527 * Callback for the switcheroo driver. Suspends or resumes the 1528 * the asics before or after it is powered up using ACPI methods. 1529 */ 1530 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, 1531 enum vga_switcheroo_state state) 1532 { 1533 struct drm_device *dev = pci_get_drvdata(pdev); 1534 int r; 1535 1536 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF) 1537 return; 1538 1539 if (state == VGA_SWITCHEROO_ON) { 1540 pr_info("switched on\n"); 1541 /* don't suspend or resume card normally */ 1542 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1543 1544 pci_set_power_state(pdev, PCI_D0); 1545 amdgpu_device_load_pci_state(pdev); 1546 r = pci_enable_device(pdev); 1547 if (r) 1548 DRM_WARN("pci_enable_device failed (%d)\n", r); 1549 amdgpu_device_resume(dev, true); 1550 1551 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1552 } else { 1553 pr_info("switched off\n"); 1554 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1555 amdgpu_device_suspend(dev, true); 1556 amdgpu_device_cache_pci_state(pdev); 1557 /* Shut down the device */ 1558 pci_disable_device(pdev); 1559 pci_set_power_state(pdev, PCI_D3cold); 1560 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1561 } 1562 } 1563 1564 /** 1565 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1566 * 1567 * @pdev: pci dev pointer 1568 * 1569 * Callback for the switcheroo driver. Check of the switcheroo 1570 * state can be changed. 1571 * Returns true if the state can be changed, false if not. 1572 */ 1573 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1574 { 1575 struct drm_device *dev = pci_get_drvdata(pdev); 1576 1577 /* 1578 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1579 * locking inversion with the driver load path. And the access here is 1580 * completely racy anyway. So don't bother with locking for now. 1581 */ 1582 return atomic_read(&dev->open_count) == 0; 1583 } 1584 1585 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1586 .set_gpu_state = amdgpu_switcheroo_set_state, 1587 .reprobe = NULL, 1588 .can_switch = amdgpu_switcheroo_can_switch, 1589 }; 1590 1591 /** 1592 * amdgpu_device_ip_set_clockgating_state - set the CG state 1593 * 1594 * @dev: amdgpu_device pointer 1595 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1596 * @state: clockgating state (gate or ungate) 1597 * 1598 * Sets the requested clockgating state for all instances of 1599 * the hardware IP specified. 1600 * Returns the error code from the last instance. 1601 */ 1602 int amdgpu_device_ip_set_clockgating_state(void *dev, 1603 enum amd_ip_block_type block_type, 1604 enum amd_clockgating_state state) 1605 { 1606 struct amdgpu_device *adev = dev; 1607 int i, r = 0; 1608 1609 for (i = 0; i < adev->num_ip_blocks; i++) { 1610 if (!adev->ip_blocks[i].status.valid) 1611 continue; 1612 if (adev->ip_blocks[i].version->type != block_type) 1613 continue; 1614 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) 1615 continue; 1616 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 1617 (void *)adev, state); 1618 if (r) 1619 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 1620 adev->ip_blocks[i].version->funcs->name, r); 1621 } 1622 return r; 1623 } 1624 1625 /** 1626 * amdgpu_device_ip_set_powergating_state - set the PG state 1627 * 1628 * @dev: amdgpu_device pointer 1629 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1630 * @state: powergating state (gate or ungate) 1631 * 1632 * Sets the requested powergating state for all instances of 1633 * the hardware IP specified. 1634 * Returns the error code from the last instance. 1635 */ 1636 int amdgpu_device_ip_set_powergating_state(void *dev, 1637 enum amd_ip_block_type block_type, 1638 enum amd_powergating_state state) 1639 { 1640 struct amdgpu_device *adev = dev; 1641 int i, r = 0; 1642 1643 for (i = 0; i < adev->num_ip_blocks; i++) { 1644 if (!adev->ip_blocks[i].status.valid) 1645 continue; 1646 if (adev->ip_blocks[i].version->type != block_type) 1647 continue; 1648 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) 1649 continue; 1650 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 1651 (void *)adev, state); 1652 if (r) 1653 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 1654 adev->ip_blocks[i].version->funcs->name, r); 1655 } 1656 return r; 1657 } 1658 1659 /** 1660 * amdgpu_device_ip_get_clockgating_state - get the CG state 1661 * 1662 * @adev: amdgpu_device pointer 1663 * @flags: clockgating feature flags 1664 * 1665 * Walks the list of IPs on the device and updates the clockgating 1666 * flags for each IP. 1667 * Updates @flags with the feature flags for each hardware IP where 1668 * clockgating is enabled. 1669 */ 1670 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 1671 u32 *flags) 1672 { 1673 int i; 1674 1675 for (i = 0; i < adev->num_ip_blocks; i++) { 1676 if (!adev->ip_blocks[i].status.valid) 1677 continue; 1678 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) 1679 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); 1680 } 1681 } 1682 1683 /** 1684 * amdgpu_device_ip_wait_for_idle - wait for idle 1685 * 1686 * @adev: amdgpu_device pointer 1687 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1688 * 1689 * Waits for the request hardware IP to be idle. 1690 * Returns 0 for success or a negative error code on failure. 1691 */ 1692 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 1693 enum amd_ip_block_type block_type) 1694 { 1695 int i, r; 1696 1697 for (i = 0; i < adev->num_ip_blocks; i++) { 1698 if (!adev->ip_blocks[i].status.valid) 1699 continue; 1700 if (adev->ip_blocks[i].version->type == block_type) { 1701 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); 1702 if (r) 1703 return r; 1704 break; 1705 } 1706 } 1707 return 0; 1708 1709 } 1710 1711 /** 1712 * amdgpu_device_ip_is_idle - is the hardware IP idle 1713 * 1714 * @adev: amdgpu_device pointer 1715 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1716 * 1717 * Check if the hardware IP is idle or not. 1718 * Returns true if it the IP is idle, false if not. 1719 */ 1720 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 1721 enum amd_ip_block_type block_type) 1722 { 1723 int i; 1724 1725 for (i = 0; i < adev->num_ip_blocks; i++) { 1726 if (!adev->ip_blocks[i].status.valid) 1727 continue; 1728 if (adev->ip_blocks[i].version->type == block_type) 1729 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); 1730 } 1731 return true; 1732 1733 } 1734 1735 /** 1736 * amdgpu_device_ip_get_ip_block - get a hw IP pointer 1737 * 1738 * @adev: amdgpu_device pointer 1739 * @type: Type of hardware IP (SMU, GFX, UVD, etc.) 1740 * 1741 * Returns a pointer to the hardware IP block structure 1742 * if it exists for the asic, otherwise NULL. 1743 */ 1744 struct amdgpu_ip_block * 1745 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 1746 enum amd_ip_block_type type) 1747 { 1748 int i; 1749 1750 for (i = 0; i < adev->num_ip_blocks; i++) 1751 if (adev->ip_blocks[i].version->type == type) 1752 return &adev->ip_blocks[i]; 1753 1754 return NULL; 1755 } 1756 1757 /** 1758 * amdgpu_device_ip_block_version_cmp 1759 * 1760 * @adev: amdgpu_device pointer 1761 * @type: enum amd_ip_block_type 1762 * @major: major version 1763 * @minor: minor version 1764 * 1765 * return 0 if equal or greater 1766 * return 1 if smaller or the ip_block doesn't exist 1767 */ 1768 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 1769 enum amd_ip_block_type type, 1770 u32 major, u32 minor) 1771 { 1772 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type); 1773 1774 if (ip_block && ((ip_block->version->major > major) || 1775 ((ip_block->version->major == major) && 1776 (ip_block->version->minor >= minor)))) 1777 return 0; 1778 1779 return 1; 1780 } 1781 1782 /** 1783 * amdgpu_device_ip_block_add 1784 * 1785 * @adev: amdgpu_device pointer 1786 * @ip_block_version: pointer to the IP to add 1787 * 1788 * Adds the IP block driver information to the collection of IPs 1789 * on the asic. 1790 */ 1791 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 1792 const struct amdgpu_ip_block_version *ip_block_version) 1793 { 1794 if (!ip_block_version) 1795 return -EINVAL; 1796 1797 switch (ip_block_version->type) { 1798 case AMD_IP_BLOCK_TYPE_VCN: 1799 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) 1800 return 0; 1801 break; 1802 case AMD_IP_BLOCK_TYPE_JPEG: 1803 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK) 1804 return 0; 1805 break; 1806 default: 1807 break; 1808 } 1809 1810 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, 1811 ip_block_version->funcs->name); 1812 1813 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 1814 1815 return 0; 1816 } 1817 1818 /** 1819 * amdgpu_device_enable_virtual_display - enable virtual display feature 1820 * 1821 * @adev: amdgpu_device pointer 1822 * 1823 * Enabled the virtual display feature if the user has enabled it via 1824 * the module parameter virtual_display. This feature provides a virtual 1825 * display hardware on headless boards or in virtualized environments. 1826 * This function parses and validates the configuration string specified by 1827 * the user and configues the virtual display configuration (number of 1828 * virtual connectors, crtcs, etc.) specified. 1829 */ 1830 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 1831 { 1832 adev->enable_virtual_display = false; 1833 1834 if (amdgpu_virtual_display) { 1835 const char *pci_address_name = pci_name(adev->pdev); 1836 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 1837 1838 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1839 pciaddstr_tmp = pciaddstr; 1840 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 1841 pciaddname = strsep(&pciaddname_tmp, ","); 1842 if (!strcmp("all", pciaddname) 1843 || !strcmp(pci_address_name, pciaddname)) { 1844 long num_crtc; 1845 int res = -1; 1846 1847 adev->enable_virtual_display = true; 1848 1849 if (pciaddname_tmp) 1850 res = kstrtol(pciaddname_tmp, 10, 1851 &num_crtc); 1852 1853 if (!res) { 1854 if (num_crtc < 1) 1855 num_crtc = 1; 1856 if (num_crtc > 6) 1857 num_crtc = 6; 1858 adev->mode_info.num_crtc = num_crtc; 1859 } else { 1860 adev->mode_info.num_crtc = 1; 1861 } 1862 break; 1863 } 1864 } 1865 1866 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 1867 amdgpu_virtual_display, pci_address_name, 1868 adev->enable_virtual_display, adev->mode_info.num_crtc); 1869 1870 kfree(pciaddstr); 1871 } 1872 } 1873 1874 /** 1875 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware 1876 * 1877 * @adev: amdgpu_device pointer 1878 * 1879 * Parses the asic configuration parameters specified in the gpu info 1880 * firmware and makes them availale to the driver for use in configuring 1881 * the asic. 1882 * Returns 0 on success, -EINVAL on failure. 1883 */ 1884 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 1885 { 1886 const char *chip_name; 1887 char fw_name[40]; 1888 int err; 1889 const struct gpu_info_firmware_header_v1_0 *hdr; 1890 1891 adev->firmware.gpu_info_fw = NULL; 1892 1893 if (adev->mman.discovery_bin) { 1894 amdgpu_discovery_get_gfx_info(adev); 1895 1896 /* 1897 * FIXME: The bounding box is still needed by Navi12, so 1898 * temporarily read it from gpu_info firmware. Should be droped 1899 * when DAL no longer needs it. 1900 */ 1901 if (adev->asic_type != CHIP_NAVI12) 1902 return 0; 1903 } 1904 1905 switch (adev->asic_type) { 1906 #ifdef CONFIG_DRM_AMDGPU_SI 1907 case CHIP_VERDE: 1908 case CHIP_TAHITI: 1909 case CHIP_PITCAIRN: 1910 case CHIP_OLAND: 1911 case CHIP_HAINAN: 1912 #endif 1913 #ifdef CONFIG_DRM_AMDGPU_CIK 1914 case CHIP_BONAIRE: 1915 case CHIP_HAWAII: 1916 case CHIP_KAVERI: 1917 case CHIP_KABINI: 1918 case CHIP_MULLINS: 1919 #endif 1920 case CHIP_TOPAZ: 1921 case CHIP_TONGA: 1922 case CHIP_FIJI: 1923 case CHIP_POLARIS10: 1924 case CHIP_POLARIS11: 1925 case CHIP_POLARIS12: 1926 case CHIP_VEGAM: 1927 case CHIP_CARRIZO: 1928 case CHIP_STONEY: 1929 case CHIP_VEGA20: 1930 case CHIP_ALDEBARAN: 1931 case CHIP_SIENNA_CICHLID: 1932 case CHIP_NAVY_FLOUNDER: 1933 case CHIP_DIMGREY_CAVEFISH: 1934 case CHIP_BEIGE_GOBY: 1935 default: 1936 return 0; 1937 case CHIP_VEGA10: 1938 chip_name = "vega10"; 1939 break; 1940 case CHIP_VEGA12: 1941 chip_name = "vega12"; 1942 break; 1943 case CHIP_RAVEN: 1944 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1945 chip_name = "raven2"; 1946 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 1947 chip_name = "picasso"; 1948 else 1949 chip_name = "raven"; 1950 break; 1951 case CHIP_ARCTURUS: 1952 chip_name = "arcturus"; 1953 break; 1954 case CHIP_RENOIR: 1955 if (adev->apu_flags & AMD_APU_IS_RENOIR) 1956 chip_name = "renoir"; 1957 else 1958 chip_name = "green_sardine"; 1959 break; 1960 case CHIP_NAVI10: 1961 chip_name = "navi10"; 1962 break; 1963 case CHIP_NAVI14: 1964 chip_name = "navi14"; 1965 break; 1966 case CHIP_NAVI12: 1967 chip_name = "navi12"; 1968 break; 1969 case CHIP_VANGOGH: 1970 chip_name = "vangogh"; 1971 break; 1972 case CHIP_YELLOW_CARP: 1973 chip_name = "yellow_carp"; 1974 break; 1975 } 1976 1977 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); 1978 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); 1979 if (err) { 1980 dev_err(adev->dev, 1981 "Failed to load gpu_info firmware \"%s\"\n", 1982 fw_name); 1983 goto out; 1984 } 1985 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); 1986 if (err) { 1987 dev_err(adev->dev, 1988 "Failed to validate gpu_info firmware \"%s\"\n", 1989 fw_name); 1990 goto out; 1991 } 1992 1993 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 1994 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 1995 1996 switch (hdr->version_major) { 1997 case 1: 1998 { 1999 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 2000 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 2001 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2002 2003 /* 2004 * Should be droped when DAL no longer needs it. 2005 */ 2006 if (adev->asic_type == CHIP_NAVI12) 2007 goto parse_soc_bounding_box; 2008 2009 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 2010 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 2011 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 2012 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 2013 adev->gfx.config.max_texture_channel_caches = 2014 le32_to_cpu(gpu_info_fw->gc_num_tccs); 2015 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 2016 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 2017 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 2018 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 2019 adev->gfx.config.double_offchip_lds_buf = 2020 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 2021 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 2022 adev->gfx.cu_info.max_waves_per_simd = 2023 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 2024 adev->gfx.cu_info.max_scratch_slots_per_cu = 2025 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 2026 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 2027 if (hdr->version_minor >= 1) { 2028 const struct gpu_info_firmware_v1_1 *gpu_info_fw = 2029 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + 2030 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2031 adev->gfx.config.num_sc_per_sh = 2032 le32_to_cpu(gpu_info_fw->num_sc_per_sh); 2033 adev->gfx.config.num_packer_per_sc = 2034 le32_to_cpu(gpu_info_fw->num_packer_per_sc); 2035 } 2036 2037 parse_soc_bounding_box: 2038 /* 2039 * soc bounding box info is not integrated in disocovery table, 2040 * we always need to parse it from gpu info firmware if needed. 2041 */ 2042 if (hdr->version_minor == 2) { 2043 const struct gpu_info_firmware_v1_2 *gpu_info_fw = 2044 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + 2045 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2046 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; 2047 } 2048 break; 2049 } 2050 default: 2051 dev_err(adev->dev, 2052 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 2053 err = -EINVAL; 2054 goto out; 2055 } 2056 out: 2057 return err; 2058 } 2059 2060 /** 2061 * amdgpu_device_ip_early_init - run early init for hardware IPs 2062 * 2063 * @adev: amdgpu_device pointer 2064 * 2065 * Early initialization pass for hardware IPs. The hardware IPs that make 2066 * up each asic are discovered each IP's early_init callback is run. This 2067 * is the first stage in initializing the asic. 2068 * Returns 0 on success, negative error code on failure. 2069 */ 2070 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) 2071 { 2072 int i, r; 2073 2074 amdgpu_device_enable_virtual_display(adev); 2075 2076 if (amdgpu_sriov_vf(adev)) { 2077 r = amdgpu_virt_request_full_gpu(adev, true); 2078 if (r) 2079 return r; 2080 } 2081 2082 switch (adev->asic_type) { 2083 #ifdef CONFIG_DRM_AMDGPU_SI 2084 case CHIP_VERDE: 2085 case CHIP_TAHITI: 2086 case CHIP_PITCAIRN: 2087 case CHIP_OLAND: 2088 case CHIP_HAINAN: 2089 adev->family = AMDGPU_FAMILY_SI; 2090 r = si_set_ip_blocks(adev); 2091 if (r) 2092 return r; 2093 break; 2094 #endif 2095 #ifdef CONFIG_DRM_AMDGPU_CIK 2096 case CHIP_BONAIRE: 2097 case CHIP_HAWAII: 2098 case CHIP_KAVERI: 2099 case CHIP_KABINI: 2100 case CHIP_MULLINS: 2101 if (adev->flags & AMD_IS_APU) 2102 adev->family = AMDGPU_FAMILY_KV; 2103 else 2104 adev->family = AMDGPU_FAMILY_CI; 2105 2106 r = cik_set_ip_blocks(adev); 2107 if (r) 2108 return r; 2109 break; 2110 #endif 2111 case CHIP_TOPAZ: 2112 case CHIP_TONGA: 2113 case CHIP_FIJI: 2114 case CHIP_POLARIS10: 2115 case CHIP_POLARIS11: 2116 case CHIP_POLARIS12: 2117 case CHIP_VEGAM: 2118 case CHIP_CARRIZO: 2119 case CHIP_STONEY: 2120 if (adev->flags & AMD_IS_APU) 2121 adev->family = AMDGPU_FAMILY_CZ; 2122 else 2123 adev->family = AMDGPU_FAMILY_VI; 2124 2125 r = vi_set_ip_blocks(adev); 2126 if (r) 2127 return r; 2128 break; 2129 case CHIP_VEGA10: 2130 case CHIP_VEGA12: 2131 case CHIP_VEGA20: 2132 case CHIP_RAVEN: 2133 case CHIP_ARCTURUS: 2134 case CHIP_RENOIR: 2135 case CHIP_ALDEBARAN: 2136 if (adev->flags & AMD_IS_APU) 2137 adev->family = AMDGPU_FAMILY_RV; 2138 else 2139 adev->family = AMDGPU_FAMILY_AI; 2140 2141 r = soc15_set_ip_blocks(adev); 2142 if (r) 2143 return r; 2144 break; 2145 case CHIP_NAVI10: 2146 case CHIP_NAVI14: 2147 case CHIP_NAVI12: 2148 case CHIP_SIENNA_CICHLID: 2149 case CHIP_NAVY_FLOUNDER: 2150 case CHIP_DIMGREY_CAVEFISH: 2151 case CHIP_BEIGE_GOBY: 2152 case CHIP_VANGOGH: 2153 case CHIP_YELLOW_CARP: 2154 case CHIP_CYAN_SKILLFISH: 2155 if (adev->asic_type == CHIP_VANGOGH) 2156 adev->family = AMDGPU_FAMILY_VGH; 2157 else if (adev->asic_type == CHIP_YELLOW_CARP) 2158 adev->family = AMDGPU_FAMILY_YC; 2159 else 2160 adev->family = AMDGPU_FAMILY_NV; 2161 2162 r = nv_set_ip_blocks(adev); 2163 if (r) 2164 return r; 2165 break; 2166 default: 2167 /* FIXME: not supported yet */ 2168 return -EINVAL; 2169 } 2170 2171 amdgpu_amdkfd_device_probe(adev); 2172 2173 adev->pm.pp_feature = amdgpu_pp_feature_mask; 2174 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) 2175 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2176 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) 2177 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; 2178 2179 for (i = 0; i < adev->num_ip_blocks; i++) { 2180 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 2181 DRM_ERROR("disabled ip block: %d <%s>\n", 2182 i, adev->ip_blocks[i].version->funcs->name); 2183 adev->ip_blocks[i].status.valid = false; 2184 } else { 2185 if (adev->ip_blocks[i].version->funcs->early_init) { 2186 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); 2187 if (r == -ENOENT) { 2188 adev->ip_blocks[i].status.valid = false; 2189 } else if (r) { 2190 DRM_ERROR("early_init of IP block <%s> failed %d\n", 2191 adev->ip_blocks[i].version->funcs->name, r); 2192 return r; 2193 } else { 2194 adev->ip_blocks[i].status.valid = true; 2195 } 2196 } else { 2197 adev->ip_blocks[i].status.valid = true; 2198 } 2199 } 2200 /* get the vbios after the asic_funcs are set up */ 2201 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 2202 r = amdgpu_device_parse_gpu_info_fw(adev); 2203 if (r) 2204 return r; 2205 2206 /* Read BIOS */ 2207 if (!amdgpu_get_bios(adev)) 2208 return -EINVAL; 2209 2210 r = amdgpu_atombios_init(adev); 2211 if (r) { 2212 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 2213 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); 2214 return r; 2215 } 2216 2217 /*get pf2vf msg info at it's earliest time*/ 2218 if (amdgpu_sriov_vf(adev)) 2219 amdgpu_virt_init_data_exchange(adev); 2220 2221 } 2222 } 2223 2224 adev->cg_flags &= amdgpu_cg_mask; 2225 adev->pg_flags &= amdgpu_pg_mask; 2226 2227 return 0; 2228 } 2229 2230 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) 2231 { 2232 int i, r; 2233 2234 for (i = 0; i < adev->num_ip_blocks; i++) { 2235 if (!adev->ip_blocks[i].status.sw) 2236 continue; 2237 if (adev->ip_blocks[i].status.hw) 2238 continue; 2239 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2240 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || 2241 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 2242 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2243 if (r) { 2244 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2245 adev->ip_blocks[i].version->funcs->name, r); 2246 return r; 2247 } 2248 adev->ip_blocks[i].status.hw = true; 2249 } 2250 } 2251 2252 return 0; 2253 } 2254 2255 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) 2256 { 2257 int i, r; 2258 2259 for (i = 0; i < adev->num_ip_blocks; i++) { 2260 if (!adev->ip_blocks[i].status.sw) 2261 continue; 2262 if (adev->ip_blocks[i].status.hw) 2263 continue; 2264 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2265 if (r) { 2266 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2267 adev->ip_blocks[i].version->funcs->name, r); 2268 return r; 2269 } 2270 adev->ip_blocks[i].status.hw = true; 2271 } 2272 2273 return 0; 2274 } 2275 2276 static int amdgpu_device_fw_loading(struct amdgpu_device *adev) 2277 { 2278 int r = 0; 2279 int i; 2280 uint32_t smu_version; 2281 2282 if (adev->asic_type >= CHIP_VEGA10) { 2283 for (i = 0; i < adev->num_ip_blocks; i++) { 2284 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) 2285 continue; 2286 2287 if (!adev->ip_blocks[i].status.sw) 2288 continue; 2289 2290 /* no need to do the fw loading again if already done*/ 2291 if (adev->ip_blocks[i].status.hw == true) 2292 break; 2293 2294 if (amdgpu_in_reset(adev) || adev->in_suspend) { 2295 r = adev->ip_blocks[i].version->funcs->resume(adev); 2296 if (r) { 2297 DRM_ERROR("resume of IP block <%s> failed %d\n", 2298 adev->ip_blocks[i].version->funcs->name, r); 2299 return r; 2300 } 2301 } else { 2302 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2303 if (r) { 2304 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2305 adev->ip_blocks[i].version->funcs->name, r); 2306 return r; 2307 } 2308 } 2309 2310 adev->ip_blocks[i].status.hw = true; 2311 break; 2312 } 2313 } 2314 2315 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) 2316 r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 2317 2318 return r; 2319 } 2320 2321 /** 2322 * amdgpu_device_ip_init - run init for hardware IPs 2323 * 2324 * @adev: amdgpu_device pointer 2325 * 2326 * Main initialization pass for hardware IPs. The list of all the hardware 2327 * IPs that make up the asic is walked and the sw_init and hw_init callbacks 2328 * are run. sw_init initializes the software state associated with each IP 2329 * and hw_init initializes the hardware associated with each IP. 2330 * Returns 0 on success, negative error code on failure. 2331 */ 2332 static int amdgpu_device_ip_init(struct amdgpu_device *adev) 2333 { 2334 int i, r; 2335 2336 r = amdgpu_ras_init(adev); 2337 if (r) 2338 return r; 2339 2340 for (i = 0; i < adev->num_ip_blocks; i++) { 2341 if (!adev->ip_blocks[i].status.valid) 2342 continue; 2343 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); 2344 if (r) { 2345 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 2346 adev->ip_blocks[i].version->funcs->name, r); 2347 goto init_failed; 2348 } 2349 adev->ip_blocks[i].status.sw = true; 2350 2351 /* need to do gmc hw init early so we can allocate gpu mem */ 2352 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2353 r = amdgpu_device_vram_scratch_init(adev); 2354 if (r) { 2355 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 2356 goto init_failed; 2357 } 2358 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 2359 if (r) { 2360 DRM_ERROR("hw_init %d failed %d\n", i, r); 2361 goto init_failed; 2362 } 2363 r = amdgpu_device_wb_init(adev); 2364 if (r) { 2365 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); 2366 goto init_failed; 2367 } 2368 adev->ip_blocks[i].status.hw = true; 2369 2370 /* right after GMC hw init, we create CSA */ 2371 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 2372 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, 2373 AMDGPU_GEM_DOMAIN_VRAM, 2374 AMDGPU_CSA_SIZE); 2375 if (r) { 2376 DRM_ERROR("allocate CSA failed %d\n", r); 2377 goto init_failed; 2378 } 2379 } 2380 } 2381 } 2382 2383 if (amdgpu_sriov_vf(adev)) 2384 amdgpu_virt_init_data_exchange(adev); 2385 2386 r = amdgpu_ib_pool_init(adev); 2387 if (r) { 2388 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 2389 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 2390 goto init_failed; 2391 } 2392 2393 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ 2394 if (r) 2395 goto init_failed; 2396 2397 r = amdgpu_amdkfd_resume_iommu(adev); 2398 if (r) 2399 goto init_failed; 2400 2401 r = amdgpu_device_ip_hw_init_phase1(adev); 2402 if (r) 2403 goto init_failed; 2404 2405 r = amdgpu_device_fw_loading(adev); 2406 if (r) 2407 goto init_failed; 2408 2409 r = amdgpu_device_ip_hw_init_phase2(adev); 2410 if (r) 2411 goto init_failed; 2412 2413 /* 2414 * retired pages will be loaded from eeprom and reserved here, 2415 * it should be called after amdgpu_device_ip_hw_init_phase2 since 2416 * for some ASICs the RAS EEPROM code relies on SMU fully functioning 2417 * for I2C communication which only true at this point. 2418 * 2419 * amdgpu_ras_recovery_init may fail, but the upper only cares the 2420 * failure from bad gpu situation and stop amdgpu init process 2421 * accordingly. For other failed cases, it will still release all 2422 * the resource and print error message, rather than returning one 2423 * negative value to upper level. 2424 * 2425 * Note: theoretically, this should be called before all vram allocations 2426 * to protect retired page from abusing 2427 */ 2428 r = amdgpu_ras_recovery_init(adev); 2429 if (r) 2430 goto init_failed; 2431 2432 if (adev->gmc.xgmi.num_physical_nodes > 1) 2433 amdgpu_xgmi_add_device(adev); 2434 2435 /* Don't init kfd if whole hive need to be reset during init */ 2436 if (!adev->gmc.xgmi.pending_reset) 2437 amdgpu_amdkfd_device_init(adev); 2438 2439 amdgpu_fru_get_product_info(adev); 2440 2441 init_failed: 2442 if (amdgpu_sriov_vf(adev)) 2443 amdgpu_virt_release_full_gpu(adev, true); 2444 2445 return r; 2446 } 2447 2448 /** 2449 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer 2450 * 2451 * @adev: amdgpu_device pointer 2452 * 2453 * Writes a reset magic value to the gart pointer in VRAM. The driver calls 2454 * this function before a GPU reset. If the value is retained after a 2455 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents. 2456 */ 2457 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) 2458 { 2459 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 2460 } 2461 2462 /** 2463 * amdgpu_device_check_vram_lost - check if vram is valid 2464 * 2465 * @adev: amdgpu_device pointer 2466 * 2467 * Checks the reset magic value written to the gart pointer in VRAM. 2468 * The driver calls this after a GPU reset to see if the contents of 2469 * VRAM is lost or now. 2470 * returns true if vram is lost, false if not. 2471 */ 2472 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) 2473 { 2474 if (memcmp(adev->gart.ptr, adev->reset_magic, 2475 AMDGPU_RESET_MAGIC_NUM)) 2476 return true; 2477 2478 if (!amdgpu_in_reset(adev)) 2479 return false; 2480 2481 /* 2482 * For all ASICs with baco/mode1 reset, the VRAM is 2483 * always assumed to be lost. 2484 */ 2485 switch (amdgpu_asic_reset_method(adev)) { 2486 case AMD_RESET_METHOD_BACO: 2487 case AMD_RESET_METHOD_MODE1: 2488 return true; 2489 default: 2490 return false; 2491 } 2492 } 2493 2494 /** 2495 * amdgpu_device_set_cg_state - set clockgating for amdgpu device 2496 * 2497 * @adev: amdgpu_device pointer 2498 * @state: clockgating state (gate or ungate) 2499 * 2500 * The list of all the hardware IPs that make up the asic is walked and the 2501 * set_clockgating_state callbacks are run. 2502 * Late initialization pass enabling clockgating for hardware IPs. 2503 * Fini or suspend, pass disabling clockgating for hardware IPs. 2504 * Returns 0 on success, negative error code on failure. 2505 */ 2506 2507 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 2508 enum amd_clockgating_state state) 2509 { 2510 int i, j, r; 2511 2512 if (amdgpu_emu_mode == 1) 2513 return 0; 2514 2515 for (j = 0; j < adev->num_ip_blocks; j++) { 2516 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2517 if (!adev->ip_blocks[i].status.late_initialized) 2518 continue; 2519 /* skip CG for GFX on S0ix */ 2520 if (adev->in_s0ix && 2521 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2522 continue; 2523 /* skip CG for VCE/UVD, it's handled specially */ 2524 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2525 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2526 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2527 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2528 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 2529 /* enable clockgating to save power */ 2530 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 2531 state); 2532 if (r) { 2533 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 2534 adev->ip_blocks[i].version->funcs->name, r); 2535 return r; 2536 } 2537 } 2538 } 2539 2540 return 0; 2541 } 2542 2543 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, 2544 enum amd_powergating_state state) 2545 { 2546 int i, j, r; 2547 2548 if (amdgpu_emu_mode == 1) 2549 return 0; 2550 2551 for (j = 0; j < adev->num_ip_blocks; j++) { 2552 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2553 if (!adev->ip_blocks[i].status.late_initialized) 2554 continue; 2555 /* skip PG for GFX on S0ix */ 2556 if (adev->in_s0ix && 2557 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2558 continue; 2559 /* skip CG for VCE/UVD, it's handled specially */ 2560 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2561 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2562 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2563 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2564 adev->ip_blocks[i].version->funcs->set_powergating_state) { 2565 /* enable powergating to save power */ 2566 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, 2567 state); 2568 if (r) { 2569 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", 2570 adev->ip_blocks[i].version->funcs->name, r); 2571 return r; 2572 } 2573 } 2574 } 2575 return 0; 2576 } 2577 2578 static int amdgpu_device_enable_mgpu_fan_boost(void) 2579 { 2580 struct amdgpu_gpu_instance *gpu_ins; 2581 struct amdgpu_device *adev; 2582 int i, ret = 0; 2583 2584 mutex_lock(&mgpu_info.mutex); 2585 2586 /* 2587 * MGPU fan boost feature should be enabled 2588 * only when there are two or more dGPUs in 2589 * the system 2590 */ 2591 if (mgpu_info.num_dgpu < 2) 2592 goto out; 2593 2594 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2595 gpu_ins = &(mgpu_info.gpu_ins[i]); 2596 adev = gpu_ins->adev; 2597 if (!(adev->flags & AMD_IS_APU) && 2598 !gpu_ins->mgpu_fan_enabled) { 2599 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); 2600 if (ret) 2601 break; 2602 2603 gpu_ins->mgpu_fan_enabled = 1; 2604 } 2605 } 2606 2607 out: 2608 mutex_unlock(&mgpu_info.mutex); 2609 2610 return ret; 2611 } 2612 2613 /** 2614 * amdgpu_device_ip_late_init - run late init for hardware IPs 2615 * 2616 * @adev: amdgpu_device pointer 2617 * 2618 * Late initialization pass for hardware IPs. The list of all the hardware 2619 * IPs that make up the asic is walked and the late_init callbacks are run. 2620 * late_init covers any special initialization that an IP requires 2621 * after all of the have been initialized or something that needs to happen 2622 * late in the init process. 2623 * Returns 0 on success, negative error code on failure. 2624 */ 2625 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) 2626 { 2627 struct amdgpu_gpu_instance *gpu_instance; 2628 int i = 0, r; 2629 2630 for (i = 0; i < adev->num_ip_blocks; i++) { 2631 if (!adev->ip_blocks[i].status.hw) 2632 continue; 2633 if (adev->ip_blocks[i].version->funcs->late_init) { 2634 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 2635 if (r) { 2636 DRM_ERROR("late_init of IP block <%s> failed %d\n", 2637 adev->ip_blocks[i].version->funcs->name, r); 2638 return r; 2639 } 2640 } 2641 adev->ip_blocks[i].status.late_initialized = true; 2642 } 2643 2644 amdgpu_ras_set_error_query_ready(adev, true); 2645 2646 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 2647 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); 2648 2649 amdgpu_device_fill_reset_magic(adev); 2650 2651 r = amdgpu_device_enable_mgpu_fan_boost(); 2652 if (r) 2653 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); 2654 2655 /* For XGMI + passthrough configuration on arcturus, enable light SBR */ 2656 if (adev->asic_type == CHIP_ARCTURUS && 2657 amdgpu_passthrough(adev) && 2658 adev->gmc.xgmi.num_physical_nodes > 1) 2659 smu_set_light_sbr(&adev->smu, true); 2660 2661 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2662 mutex_lock(&mgpu_info.mutex); 2663 2664 /* 2665 * Reset device p-state to low as this was booted with high. 2666 * 2667 * This should be performed only after all devices from the same 2668 * hive get initialized. 2669 * 2670 * However, it's unknown how many device in the hive in advance. 2671 * As this is counted one by one during devices initializations. 2672 * 2673 * So, we wait for all XGMI interlinked devices initialized. 2674 * This may bring some delays as those devices may come from 2675 * different hives. But that should be OK. 2676 */ 2677 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { 2678 for (i = 0; i < mgpu_info.num_gpu; i++) { 2679 gpu_instance = &(mgpu_info.gpu_ins[i]); 2680 if (gpu_instance->adev->flags & AMD_IS_APU) 2681 continue; 2682 2683 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 2684 AMDGPU_XGMI_PSTATE_MIN); 2685 if (r) { 2686 DRM_ERROR("pstate setting failed (%d).\n", r); 2687 break; 2688 } 2689 } 2690 } 2691 2692 mutex_unlock(&mgpu_info.mutex); 2693 } 2694 2695 return 0; 2696 } 2697 2698 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) 2699 { 2700 int i, r; 2701 2702 for (i = 0; i < adev->num_ip_blocks; i++) { 2703 if (!adev->ip_blocks[i].version->funcs->early_fini) 2704 continue; 2705 2706 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); 2707 if (r) { 2708 DRM_DEBUG("early_fini of IP block <%s> failed %d\n", 2709 adev->ip_blocks[i].version->funcs->name, r); 2710 } 2711 } 2712 2713 amdgpu_amdkfd_suspend(adev, false); 2714 2715 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2716 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2717 2718 /* need to disable SMC first */ 2719 for (i = 0; i < adev->num_ip_blocks; i++) { 2720 if (!adev->ip_blocks[i].status.hw) 2721 continue; 2722 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 2723 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2724 /* XXX handle errors */ 2725 if (r) { 2726 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2727 adev->ip_blocks[i].version->funcs->name, r); 2728 } 2729 adev->ip_blocks[i].status.hw = false; 2730 break; 2731 } 2732 } 2733 2734 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2735 if (!adev->ip_blocks[i].status.hw) 2736 continue; 2737 2738 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2739 /* XXX handle errors */ 2740 if (r) { 2741 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2742 adev->ip_blocks[i].version->funcs->name, r); 2743 } 2744 2745 adev->ip_blocks[i].status.hw = false; 2746 } 2747 2748 if (amdgpu_sriov_vf(adev)) { 2749 if (amdgpu_virt_release_full_gpu(adev, false)) 2750 DRM_ERROR("failed to release exclusive mode on fini\n"); 2751 } 2752 2753 return 0; 2754 } 2755 2756 /** 2757 * amdgpu_device_ip_fini - run fini for hardware IPs 2758 * 2759 * @adev: amdgpu_device pointer 2760 * 2761 * Main teardown pass for hardware IPs. The list of all the hardware 2762 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks 2763 * are run. hw_fini tears down the hardware associated with each IP 2764 * and sw_fini tears down any software state associated with each IP. 2765 * Returns 0 on success, negative error code on failure. 2766 */ 2767 static int amdgpu_device_ip_fini(struct amdgpu_device *adev) 2768 { 2769 int i, r; 2770 2771 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) 2772 amdgpu_virt_release_ras_err_handler_data(adev); 2773 2774 amdgpu_ras_pre_fini(adev); 2775 2776 if (adev->gmc.xgmi.num_physical_nodes > 1) 2777 amdgpu_xgmi_remove_device(adev); 2778 2779 amdgpu_amdkfd_device_fini_sw(adev); 2780 2781 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2782 if (!adev->ip_blocks[i].status.sw) 2783 continue; 2784 2785 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2786 amdgpu_ucode_free_bo(adev); 2787 amdgpu_free_static_csa(&adev->virt.csa_obj); 2788 amdgpu_device_wb_fini(adev); 2789 amdgpu_device_vram_scratch_fini(adev); 2790 amdgpu_ib_pool_fini(adev); 2791 } 2792 2793 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 2794 /* XXX handle errors */ 2795 if (r) { 2796 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 2797 adev->ip_blocks[i].version->funcs->name, r); 2798 } 2799 adev->ip_blocks[i].status.sw = false; 2800 adev->ip_blocks[i].status.valid = false; 2801 } 2802 2803 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2804 if (!adev->ip_blocks[i].status.late_initialized) 2805 continue; 2806 if (adev->ip_blocks[i].version->funcs->late_fini) 2807 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); 2808 adev->ip_blocks[i].status.late_initialized = false; 2809 } 2810 2811 amdgpu_ras_fini(adev); 2812 2813 return 0; 2814 } 2815 2816 /** 2817 * amdgpu_device_delayed_init_work_handler - work handler for IB tests 2818 * 2819 * @work: work_struct. 2820 */ 2821 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) 2822 { 2823 struct amdgpu_device *adev = 2824 container_of(work, struct amdgpu_device, delayed_init_work.work); 2825 int r; 2826 2827 r = amdgpu_ib_ring_tests(adev); 2828 if (r) 2829 DRM_ERROR("ib ring test failed (%d).\n", r); 2830 } 2831 2832 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) 2833 { 2834 struct amdgpu_device *adev = 2835 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); 2836 2837 WARN_ON_ONCE(adev->gfx.gfx_off_state); 2838 WARN_ON_ONCE(adev->gfx.gfx_off_req_count); 2839 2840 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) 2841 adev->gfx.gfx_off_state = true; 2842 } 2843 2844 /** 2845 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) 2846 * 2847 * @adev: amdgpu_device pointer 2848 * 2849 * Main suspend function for hardware IPs. The list of all the hardware 2850 * IPs that make up the asic is walked, clockgating is disabled and the 2851 * suspend callbacks are run. suspend puts the hardware and software state 2852 * in each IP into a state suitable for suspend. 2853 * Returns 0 on success, negative error code on failure. 2854 */ 2855 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) 2856 { 2857 int i, r; 2858 2859 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2860 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2861 2862 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2863 if (!adev->ip_blocks[i].status.valid) 2864 continue; 2865 2866 /* displays are handled separately */ 2867 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) 2868 continue; 2869 2870 /* XXX handle errors */ 2871 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2872 /* XXX handle errors */ 2873 if (r) { 2874 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2875 adev->ip_blocks[i].version->funcs->name, r); 2876 return r; 2877 } 2878 2879 adev->ip_blocks[i].status.hw = false; 2880 } 2881 2882 return 0; 2883 } 2884 2885 /** 2886 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) 2887 * 2888 * @adev: amdgpu_device pointer 2889 * 2890 * Main suspend function for hardware IPs. The list of all the hardware 2891 * IPs that make up the asic is walked, clockgating is disabled and the 2892 * suspend callbacks are run. suspend puts the hardware and software state 2893 * in each IP into a state suitable for suspend. 2894 * Returns 0 on success, negative error code on failure. 2895 */ 2896 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) 2897 { 2898 int i, r; 2899 2900 if (adev->in_s0ix) 2901 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); 2902 2903 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2904 if (!adev->ip_blocks[i].status.valid) 2905 continue; 2906 /* displays are handled in phase1 */ 2907 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) 2908 continue; 2909 /* PSP lost connection when err_event_athub occurs */ 2910 if (amdgpu_ras_intr_triggered() && 2911 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 2912 adev->ip_blocks[i].status.hw = false; 2913 continue; 2914 } 2915 2916 /* skip unnecessary suspend if we do not initialize them yet */ 2917 if (adev->gmc.xgmi.pending_reset && 2918 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 2919 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || 2920 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2921 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { 2922 adev->ip_blocks[i].status.hw = false; 2923 continue; 2924 } 2925 2926 /* skip suspend of gfx and psp for S0ix 2927 * gfx is in gfxoff state, so on resume it will exit gfxoff just 2928 * like at runtime. PSP is also part of the always on hardware 2929 * so no need to suspend it. 2930 */ 2931 if (adev->in_s0ix && 2932 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || 2933 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)) 2934 continue; 2935 2936 /* XXX handle errors */ 2937 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2938 /* XXX handle errors */ 2939 if (r) { 2940 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2941 adev->ip_blocks[i].version->funcs->name, r); 2942 } 2943 adev->ip_blocks[i].status.hw = false; 2944 /* handle putting the SMC in the appropriate state */ 2945 if(!amdgpu_sriov_vf(adev)){ 2946 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 2947 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); 2948 if (r) { 2949 DRM_ERROR("SMC failed to set mp1 state %d, %d\n", 2950 adev->mp1_state, r); 2951 return r; 2952 } 2953 } 2954 } 2955 } 2956 2957 return 0; 2958 } 2959 2960 /** 2961 * amdgpu_device_ip_suspend - run suspend for hardware IPs 2962 * 2963 * @adev: amdgpu_device pointer 2964 * 2965 * Main suspend function for hardware IPs. The list of all the hardware 2966 * IPs that make up the asic is walked, clockgating is disabled and the 2967 * suspend callbacks are run. suspend puts the hardware and software state 2968 * in each IP into a state suitable for suspend. 2969 * Returns 0 on success, negative error code on failure. 2970 */ 2971 int amdgpu_device_ip_suspend(struct amdgpu_device *adev) 2972 { 2973 int r; 2974 2975 if (amdgpu_sriov_vf(adev)) { 2976 amdgpu_virt_fini_data_exchange(adev); 2977 amdgpu_virt_request_full_gpu(adev, false); 2978 } 2979 2980 r = amdgpu_device_ip_suspend_phase1(adev); 2981 if (r) 2982 return r; 2983 r = amdgpu_device_ip_suspend_phase2(adev); 2984 2985 if (amdgpu_sriov_vf(adev)) 2986 amdgpu_virt_release_full_gpu(adev, false); 2987 2988 return r; 2989 } 2990 2991 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) 2992 { 2993 int i, r; 2994 2995 static enum amd_ip_block_type ip_order[] = { 2996 AMD_IP_BLOCK_TYPE_GMC, 2997 AMD_IP_BLOCK_TYPE_COMMON, 2998 AMD_IP_BLOCK_TYPE_PSP, 2999 AMD_IP_BLOCK_TYPE_IH, 3000 }; 3001 3002 for (i = 0; i < adev->num_ip_blocks; i++) { 3003 int j; 3004 struct amdgpu_ip_block *block; 3005 3006 block = &adev->ip_blocks[i]; 3007 block->status.hw = false; 3008 3009 for (j = 0; j < ARRAY_SIZE(ip_order); j++) { 3010 3011 if (block->version->type != ip_order[j] || 3012 !block->status.valid) 3013 continue; 3014 3015 r = block->version->funcs->hw_init(adev); 3016 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 3017 if (r) 3018 return r; 3019 block->status.hw = true; 3020 } 3021 } 3022 3023 return 0; 3024 } 3025 3026 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) 3027 { 3028 int i, r; 3029 3030 static enum amd_ip_block_type ip_order[] = { 3031 AMD_IP_BLOCK_TYPE_SMC, 3032 AMD_IP_BLOCK_TYPE_DCE, 3033 AMD_IP_BLOCK_TYPE_GFX, 3034 AMD_IP_BLOCK_TYPE_SDMA, 3035 AMD_IP_BLOCK_TYPE_UVD, 3036 AMD_IP_BLOCK_TYPE_VCE, 3037 AMD_IP_BLOCK_TYPE_VCN 3038 }; 3039 3040 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 3041 int j; 3042 struct amdgpu_ip_block *block; 3043 3044 for (j = 0; j < adev->num_ip_blocks; j++) { 3045 block = &adev->ip_blocks[j]; 3046 3047 if (block->version->type != ip_order[i] || 3048 !block->status.valid || 3049 block->status.hw) 3050 continue; 3051 3052 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) 3053 r = block->version->funcs->resume(adev); 3054 else 3055 r = block->version->funcs->hw_init(adev); 3056 3057 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 3058 if (r) 3059 return r; 3060 block->status.hw = true; 3061 } 3062 } 3063 3064 return 0; 3065 } 3066 3067 /** 3068 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs 3069 * 3070 * @adev: amdgpu_device pointer 3071 * 3072 * First resume function for hardware IPs. The list of all the hardware 3073 * IPs that make up the asic is walked and the resume callbacks are run for 3074 * COMMON, GMC, and IH. resume puts the hardware into a functional state 3075 * after a suspend and updates the software state as necessary. This 3076 * function is also used for restoring the GPU after a GPU reset. 3077 * Returns 0 on success, negative error code on failure. 3078 */ 3079 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) 3080 { 3081 int i, r; 3082 3083 for (i = 0; i < adev->num_ip_blocks; i++) { 3084 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3085 continue; 3086 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3087 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3088 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 3089 3090 r = adev->ip_blocks[i].version->funcs->resume(adev); 3091 if (r) { 3092 DRM_ERROR("resume of IP block <%s> failed %d\n", 3093 adev->ip_blocks[i].version->funcs->name, r); 3094 return r; 3095 } 3096 adev->ip_blocks[i].status.hw = true; 3097 } 3098 } 3099 3100 return 0; 3101 } 3102 3103 /** 3104 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs 3105 * 3106 * @adev: amdgpu_device pointer 3107 * 3108 * First resume function for hardware IPs. The list of all the hardware 3109 * IPs that make up the asic is walked and the resume callbacks are run for 3110 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a 3111 * functional state after a suspend and updates the software state as 3112 * necessary. This function is also used for restoring the GPU after a GPU 3113 * reset. 3114 * Returns 0 on success, negative error code on failure. 3115 */ 3116 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) 3117 { 3118 int i, r; 3119 3120 for (i = 0; i < adev->num_ip_blocks; i++) { 3121 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3122 continue; 3123 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3124 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3125 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3126 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 3127 continue; 3128 r = adev->ip_blocks[i].version->funcs->resume(adev); 3129 if (r) { 3130 DRM_ERROR("resume of IP block <%s> failed %d\n", 3131 adev->ip_blocks[i].version->funcs->name, r); 3132 return r; 3133 } 3134 adev->ip_blocks[i].status.hw = true; 3135 } 3136 3137 return 0; 3138 } 3139 3140 /** 3141 * amdgpu_device_ip_resume - run resume for hardware IPs 3142 * 3143 * @adev: amdgpu_device pointer 3144 * 3145 * Main resume function for hardware IPs. The hardware IPs 3146 * are split into two resume functions because they are 3147 * are also used in in recovering from a GPU reset and some additional 3148 * steps need to be take between them. In this case (S3/S4) they are 3149 * run sequentially. 3150 * Returns 0 on success, negative error code on failure. 3151 */ 3152 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) 3153 { 3154 int r; 3155 3156 r = amdgpu_amdkfd_resume_iommu(adev); 3157 if (r) 3158 return r; 3159 3160 r = amdgpu_device_ip_resume_phase1(adev); 3161 if (r) 3162 return r; 3163 3164 r = amdgpu_device_fw_loading(adev); 3165 if (r) 3166 return r; 3167 3168 r = amdgpu_device_ip_resume_phase2(adev); 3169 3170 return r; 3171 } 3172 3173 /** 3174 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV 3175 * 3176 * @adev: amdgpu_device pointer 3177 * 3178 * Query the VBIOS data tables to determine if the board supports SR-IOV. 3179 */ 3180 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 3181 { 3182 if (amdgpu_sriov_vf(adev)) { 3183 if (adev->is_atom_fw) { 3184 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev)) 3185 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3186 } else { 3187 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 3188 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3189 } 3190 3191 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) 3192 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 3193 } 3194 } 3195 3196 /** 3197 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic 3198 * 3199 * @asic_type: AMD asic type 3200 * 3201 * Check if there is DC (new modesetting infrastructre) support for an asic. 3202 * returns true if DC has support, false if not. 3203 */ 3204 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) 3205 { 3206 switch (asic_type) { 3207 #if defined(CONFIG_DRM_AMD_DC) 3208 #if defined(CONFIG_DRM_AMD_DC_SI) 3209 case CHIP_TAHITI: 3210 case CHIP_PITCAIRN: 3211 case CHIP_VERDE: 3212 case CHIP_OLAND: 3213 #endif 3214 case CHIP_BONAIRE: 3215 case CHIP_KAVERI: 3216 case CHIP_KABINI: 3217 case CHIP_MULLINS: 3218 /* 3219 * We have systems in the wild with these ASICs that require 3220 * LVDS and VGA support which is not supported with DC. 3221 * 3222 * Fallback to the non-DC driver here by default so as not to 3223 * cause regressions. 3224 */ 3225 return amdgpu_dc > 0; 3226 case CHIP_HAWAII: 3227 case CHIP_CARRIZO: 3228 case CHIP_STONEY: 3229 case CHIP_POLARIS10: 3230 case CHIP_POLARIS11: 3231 case CHIP_POLARIS12: 3232 case CHIP_VEGAM: 3233 case CHIP_TONGA: 3234 case CHIP_FIJI: 3235 case CHIP_VEGA10: 3236 case CHIP_VEGA12: 3237 case CHIP_VEGA20: 3238 #if defined(CONFIG_DRM_AMD_DC_DCN) 3239 case CHIP_RAVEN: 3240 case CHIP_NAVI10: 3241 case CHIP_NAVI14: 3242 case CHIP_NAVI12: 3243 case CHIP_RENOIR: 3244 case CHIP_SIENNA_CICHLID: 3245 case CHIP_NAVY_FLOUNDER: 3246 case CHIP_DIMGREY_CAVEFISH: 3247 case CHIP_BEIGE_GOBY: 3248 case CHIP_VANGOGH: 3249 case CHIP_YELLOW_CARP: 3250 #endif 3251 return amdgpu_dc != 0; 3252 #endif 3253 default: 3254 if (amdgpu_dc > 0) 3255 DRM_INFO_ONCE("Display Core has been requested via kernel parameter " 3256 "but isn't supported by ASIC, ignoring\n"); 3257 return false; 3258 } 3259 } 3260 3261 /** 3262 * amdgpu_device_has_dc_support - check if dc is supported 3263 * 3264 * @adev: amdgpu_device pointer 3265 * 3266 * Returns true for supported, false for not supported 3267 */ 3268 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 3269 { 3270 if (amdgpu_sriov_vf(adev) || 3271 adev->enable_virtual_display || 3272 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 3273 return false; 3274 3275 return amdgpu_device_asic_has_dc_support(adev->asic_type); 3276 } 3277 3278 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) 3279 { 3280 struct amdgpu_device *adev = 3281 container_of(__work, struct amdgpu_device, xgmi_reset_work); 3282 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 3283 3284 /* It's a bug to not have a hive within this function */ 3285 if (WARN_ON(!hive)) 3286 return; 3287 3288 /* 3289 * Use task barrier to synchronize all xgmi reset works across the 3290 * hive. task_barrier_enter and task_barrier_exit will block 3291 * until all the threads running the xgmi reset works reach 3292 * those points. task_barrier_full will do both blocks. 3293 */ 3294 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 3295 3296 task_barrier_enter(&hive->tb); 3297 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); 3298 3299 if (adev->asic_reset_res) 3300 goto fail; 3301 3302 task_barrier_exit(&hive->tb); 3303 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); 3304 3305 if (adev->asic_reset_res) 3306 goto fail; 3307 3308 if (adev->mmhub.ras_funcs && 3309 adev->mmhub.ras_funcs->reset_ras_error_count) 3310 adev->mmhub.ras_funcs->reset_ras_error_count(adev); 3311 } else { 3312 3313 task_barrier_full(&hive->tb); 3314 adev->asic_reset_res = amdgpu_asic_reset(adev); 3315 } 3316 3317 fail: 3318 if (adev->asic_reset_res) 3319 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", 3320 adev->asic_reset_res, adev_to_drm(adev)->unique); 3321 amdgpu_put_xgmi_hive(hive); 3322 } 3323 3324 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) 3325 { 3326 char *input = amdgpu_lockup_timeout; 3327 char *timeout_setting = NULL; 3328 int index = 0; 3329 long timeout; 3330 int ret = 0; 3331 3332 /* 3333 * By default timeout for non compute jobs is 10000 3334 * and 60000 for compute jobs. 3335 * In SR-IOV or passthrough mode, timeout for compute 3336 * jobs are 60000 by default. 3337 */ 3338 adev->gfx_timeout = msecs_to_jiffies(10000); 3339 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 3340 if (amdgpu_sriov_vf(adev)) 3341 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? 3342 msecs_to_jiffies(60000) : msecs_to_jiffies(10000); 3343 else 3344 adev->compute_timeout = msecs_to_jiffies(60000); 3345 3346 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3347 while ((timeout_setting = strsep(&input, ",")) && 3348 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3349 ret = kstrtol(timeout_setting, 0, &timeout); 3350 if (ret) 3351 return ret; 3352 3353 if (timeout == 0) { 3354 index++; 3355 continue; 3356 } else if (timeout < 0) { 3357 timeout = MAX_SCHEDULE_TIMEOUT; 3358 } else { 3359 timeout = msecs_to_jiffies(timeout); 3360 } 3361 3362 switch (index++) { 3363 case 0: 3364 adev->gfx_timeout = timeout; 3365 break; 3366 case 1: 3367 adev->compute_timeout = timeout; 3368 break; 3369 case 2: 3370 adev->sdma_timeout = timeout; 3371 break; 3372 case 3: 3373 adev->video_timeout = timeout; 3374 break; 3375 default: 3376 break; 3377 } 3378 } 3379 /* 3380 * There is only one value specified and 3381 * it should apply to all non-compute jobs. 3382 */ 3383 if (index == 1) { 3384 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 3385 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) 3386 adev->compute_timeout = adev->gfx_timeout; 3387 } 3388 } 3389 3390 return ret; 3391 } 3392 3393 static const struct attribute *amdgpu_dev_attributes[] = { 3394 &dev_attr_product_name.attr, 3395 &dev_attr_product_number.attr, 3396 &dev_attr_serial_number.attr, 3397 &dev_attr_pcie_replay_count.attr, 3398 NULL 3399 }; 3400 3401 /** 3402 * amdgpu_device_init - initialize the driver 3403 * 3404 * @adev: amdgpu_device pointer 3405 * @flags: driver flags 3406 * 3407 * Initializes the driver info and hw (all asics). 3408 * Returns 0 for success or an error on failure. 3409 * Called at driver startup. 3410 */ 3411 int amdgpu_device_init(struct amdgpu_device *adev, 3412 uint32_t flags) 3413 { 3414 struct drm_device *ddev = adev_to_drm(adev); 3415 struct pci_dev *pdev = adev->pdev; 3416 int r, i; 3417 bool px = false; 3418 u32 max_MBps; 3419 3420 adev->shutdown = false; 3421 adev->flags = flags; 3422 3423 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) 3424 adev->asic_type = amdgpu_force_asic_type; 3425 else 3426 adev->asic_type = flags & AMD_ASIC_MASK; 3427 3428 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 3429 if (amdgpu_emu_mode == 1) 3430 adev->usec_timeout *= 10; 3431 adev->gmc.gart_size = 512 * 1024 * 1024; 3432 adev->accel_working = false; 3433 adev->num_rings = 0; 3434 adev->mman.buffer_funcs = NULL; 3435 adev->mman.buffer_funcs_ring = NULL; 3436 adev->vm_manager.vm_pte_funcs = NULL; 3437 adev->vm_manager.vm_pte_num_scheds = 0; 3438 adev->gmc.gmc_funcs = NULL; 3439 adev->harvest_ip_mask = 0x0; 3440 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 3441 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 3442 3443 adev->smc_rreg = &amdgpu_invalid_rreg; 3444 adev->smc_wreg = &amdgpu_invalid_wreg; 3445 adev->pcie_rreg = &amdgpu_invalid_rreg; 3446 adev->pcie_wreg = &amdgpu_invalid_wreg; 3447 adev->pciep_rreg = &amdgpu_invalid_rreg; 3448 adev->pciep_wreg = &amdgpu_invalid_wreg; 3449 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; 3450 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; 3451 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; 3452 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 3453 adev->didt_rreg = &amdgpu_invalid_rreg; 3454 adev->didt_wreg = &amdgpu_invalid_wreg; 3455 adev->gc_cac_rreg = &amdgpu_invalid_rreg; 3456 adev->gc_cac_wreg = &amdgpu_invalid_wreg; 3457 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 3458 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 3459 3460 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 3461 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 3462 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 3463 3464 /* mutex initialization are all done here so we 3465 * can recall function without having locking issues */ 3466 mutex_init(&adev->firmware.mutex); 3467 mutex_init(&adev->pm.mutex); 3468 mutex_init(&adev->gfx.gpu_clock_mutex); 3469 mutex_init(&adev->srbm_mutex); 3470 mutex_init(&adev->gfx.pipe_reserve_mutex); 3471 mutex_init(&adev->gfx.gfx_off_mutex); 3472 mutex_init(&adev->grbm_idx_mutex); 3473 mutex_init(&adev->mn_lock); 3474 mutex_init(&adev->virt.vf_errors.lock); 3475 hash_init(adev->mn_hash); 3476 atomic_set(&adev->in_gpu_reset, 0); 3477 init_rwsem(&adev->reset_sem); 3478 mutex_init(&adev->psp.mutex); 3479 mutex_init(&adev->notifier_lock); 3480 3481 r = amdgpu_device_init_apu_flags(adev); 3482 if (r) 3483 return r; 3484 3485 r = amdgpu_device_check_arguments(adev); 3486 if (r) 3487 return r; 3488 3489 spin_lock_init(&adev->mmio_idx_lock); 3490 spin_lock_init(&adev->smc_idx_lock); 3491 spin_lock_init(&adev->pcie_idx_lock); 3492 spin_lock_init(&adev->uvd_ctx_idx_lock); 3493 spin_lock_init(&adev->didt_idx_lock); 3494 spin_lock_init(&adev->gc_cac_idx_lock); 3495 spin_lock_init(&adev->se_cac_idx_lock); 3496 spin_lock_init(&adev->audio_endpt_idx_lock); 3497 spin_lock_init(&adev->mm_stats.lock); 3498 3499 INIT_LIST_HEAD(&adev->shadow_list); 3500 mutex_init(&adev->shadow_list_lock); 3501 3502 INIT_LIST_HEAD(&adev->reset_list); 3503 3504 INIT_DELAYED_WORK(&adev->delayed_init_work, 3505 amdgpu_device_delayed_init_work_handler); 3506 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, 3507 amdgpu_device_delay_enable_gfx_off); 3508 3509 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); 3510 3511 adev->gfx.gfx_off_req_count = 1; 3512 adev->pm.ac_power = power_supply_is_system_supplied() > 0; 3513 3514 atomic_set(&adev->throttling_logging_enabled, 1); 3515 /* 3516 * If throttling continues, logging will be performed every minute 3517 * to avoid log flooding. "-1" is subtracted since the thermal 3518 * throttling interrupt comes every second. Thus, the total logging 3519 * interval is 59 seconds(retelimited printk interval) + 1(waiting 3520 * for throttling interrupt) = 60 seconds. 3521 */ 3522 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); 3523 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); 3524 3525 /* Registers mapping */ 3526 /* TODO: block userspace mapping of io register */ 3527 if (adev->asic_type >= CHIP_BONAIRE) { 3528 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 3529 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 3530 } else { 3531 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 3532 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 3533 } 3534 3535 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 3536 if (adev->rmmio == NULL) { 3537 return -ENOMEM; 3538 } 3539 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 3540 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); 3541 3542 amdgpu_device_get_pcie_info(adev); 3543 3544 if (amdgpu_mcbp) 3545 DRM_INFO("MCBP is enabled\n"); 3546 3547 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10) 3548 adev->enable_mes = true; 3549 3550 /* detect hw virtualization here */ 3551 amdgpu_detect_virtualization(adev); 3552 3553 r = amdgpu_device_get_job_timeout_settings(adev); 3554 if (r) { 3555 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); 3556 return r; 3557 } 3558 3559 /* early init functions */ 3560 r = amdgpu_device_ip_early_init(adev); 3561 if (r) 3562 return r; 3563 3564 /* enable PCIE atomic ops */ 3565 if (amdgpu_sriov_vf(adev)) 3566 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) 3567 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags == 3568 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); 3569 else 3570 adev->have_atomics_support = 3571 !pci_enable_atomic_ops_to_root(adev->pdev, 3572 PCI_EXP_DEVCAP2_ATOMIC_COMP32 | 3573 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 3574 if (!adev->have_atomics_support) 3575 dev_info(adev->dev, "PCIE atomic ops is not supported\n"); 3576 3577 /* doorbell bar mapping and doorbell index init*/ 3578 amdgpu_device_doorbell_init(adev); 3579 3580 if (amdgpu_emu_mode == 1) { 3581 /* post the asic on emulation mode */ 3582 emu_soc_asic_init(adev); 3583 goto fence_driver_init; 3584 } 3585 3586 amdgpu_reset_init(adev); 3587 3588 /* detect if we are with an SRIOV vbios */ 3589 amdgpu_device_detect_sriov_bios(adev); 3590 3591 /* check if we need to reset the asic 3592 * E.g., driver was not cleanly unloaded previously, etc. 3593 */ 3594 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { 3595 if (adev->gmc.xgmi.num_physical_nodes) { 3596 dev_info(adev->dev, "Pending hive reset.\n"); 3597 adev->gmc.xgmi.pending_reset = true; 3598 /* Only need to init necessary block for SMU to handle the reset */ 3599 for (i = 0; i < adev->num_ip_blocks; i++) { 3600 if (!adev->ip_blocks[i].status.valid) 3601 continue; 3602 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3603 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3604 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3605 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { 3606 DRM_DEBUG("IP %s disabled for hw_init.\n", 3607 adev->ip_blocks[i].version->funcs->name); 3608 adev->ip_blocks[i].status.hw = true; 3609 } 3610 } 3611 } else { 3612 r = amdgpu_asic_reset(adev); 3613 if (r) { 3614 dev_err(adev->dev, "asic reset on init failed\n"); 3615 goto failed; 3616 } 3617 } 3618 } 3619 3620 pci_enable_pcie_error_reporting(adev->pdev); 3621 3622 /* Post card if necessary */ 3623 if (amdgpu_device_need_post(adev)) { 3624 if (!adev->bios) { 3625 dev_err(adev->dev, "no vBIOS found\n"); 3626 r = -EINVAL; 3627 goto failed; 3628 } 3629 DRM_INFO("GPU posting now...\n"); 3630 r = amdgpu_device_asic_init(adev); 3631 if (r) { 3632 dev_err(adev->dev, "gpu post error!\n"); 3633 goto failed; 3634 } 3635 } 3636 3637 if (adev->is_atom_fw) { 3638 /* Initialize clocks */ 3639 r = amdgpu_atomfirmware_get_clock_info(adev); 3640 if (r) { 3641 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 3642 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3643 goto failed; 3644 } 3645 } else { 3646 /* Initialize clocks */ 3647 r = amdgpu_atombios_get_clock_info(adev); 3648 if (r) { 3649 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 3650 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3651 goto failed; 3652 } 3653 /* init i2c buses */ 3654 if (!amdgpu_device_has_dc_support(adev)) 3655 amdgpu_atombios_i2c_init(adev); 3656 } 3657 3658 fence_driver_init: 3659 /* Fence driver */ 3660 r = amdgpu_fence_driver_sw_init(adev); 3661 if (r) { 3662 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); 3663 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 3664 goto failed; 3665 } 3666 3667 /* init the mode config */ 3668 drm_mode_config_init(adev_to_drm(adev)); 3669 3670 r = amdgpu_device_ip_init(adev); 3671 if (r) { 3672 /* failed in exclusive mode due to timeout */ 3673 if (amdgpu_sriov_vf(adev) && 3674 !amdgpu_sriov_runtime(adev) && 3675 amdgpu_virt_mmio_blocked(adev) && 3676 !amdgpu_virt_wait_reset(adev)) { 3677 dev_err(adev->dev, "VF exclusive mode timeout\n"); 3678 /* Don't send request since VF is inactive. */ 3679 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 3680 adev->virt.ops = NULL; 3681 r = -EAGAIN; 3682 goto release_ras_con; 3683 } 3684 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); 3685 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 3686 goto release_ras_con; 3687 } 3688 3689 amdgpu_fence_driver_hw_init(adev); 3690 3691 dev_info(adev->dev, 3692 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", 3693 adev->gfx.config.max_shader_engines, 3694 adev->gfx.config.max_sh_per_se, 3695 adev->gfx.config.max_cu_per_sh, 3696 adev->gfx.cu_info.number); 3697 3698 adev->accel_working = true; 3699 3700 amdgpu_vm_check_compute_bug(adev); 3701 3702 /* Initialize the buffer migration limit. */ 3703 if (amdgpu_moverate >= 0) 3704 max_MBps = amdgpu_moverate; 3705 else 3706 max_MBps = 8; /* Allow 8 MB/s. */ 3707 /* Get a log2 for easy divisions. */ 3708 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 3709 3710 amdgpu_fbdev_init(adev); 3711 3712 r = amdgpu_pm_sysfs_init(adev); 3713 if (r) { 3714 adev->pm_sysfs_en = false; 3715 DRM_ERROR("registering pm debugfs failed (%d).\n", r); 3716 } else 3717 adev->pm_sysfs_en = true; 3718 3719 r = amdgpu_ucode_sysfs_init(adev); 3720 if (r) { 3721 adev->ucode_sysfs_en = false; 3722 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); 3723 } else 3724 adev->ucode_sysfs_en = true; 3725 3726 if ((amdgpu_testing & 1)) { 3727 if (adev->accel_working) 3728 amdgpu_test_moves(adev); 3729 else 3730 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n"); 3731 } 3732 if (amdgpu_benchmarking) { 3733 if (adev->accel_working) 3734 amdgpu_benchmark(adev, amdgpu_benchmarking); 3735 else 3736 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); 3737 } 3738 3739 /* 3740 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. 3741 * Otherwise the mgpu fan boost feature will be skipped due to the 3742 * gpu instance is counted less. 3743 */ 3744 amdgpu_register_gpu_instance(adev); 3745 3746 /* enable clockgating, etc. after ib tests, etc. since some blocks require 3747 * explicit gating rather than handling it automatically. 3748 */ 3749 if (!adev->gmc.xgmi.pending_reset) { 3750 r = amdgpu_device_ip_late_init(adev); 3751 if (r) { 3752 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); 3753 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 3754 goto release_ras_con; 3755 } 3756 /* must succeed. */ 3757 amdgpu_ras_resume(adev); 3758 queue_delayed_work(system_wq, &adev->delayed_init_work, 3759 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3760 } 3761 3762 if (amdgpu_sriov_vf(adev)) 3763 flush_delayed_work(&adev->delayed_init_work); 3764 3765 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); 3766 if (r) 3767 dev_err(adev->dev, "Could not create amdgpu device attr\n"); 3768 3769 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 3770 r = amdgpu_pmu_init(adev); 3771 if (r) 3772 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); 3773 3774 /* Have stored pci confspace at hand for restore in sudden PCI error */ 3775 if (amdgpu_device_cache_pci_state(adev->pdev)) 3776 pci_restore_state(pdev); 3777 3778 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 3779 /* this will fail for cards that aren't VGA class devices, just 3780 * ignore it */ 3781 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3782 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); 3783 3784 if (amdgpu_device_supports_px(ddev)) { 3785 px = true; 3786 vga_switcheroo_register_client(adev->pdev, 3787 &amdgpu_switcheroo_ops, px); 3788 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 3789 } 3790 3791 if (adev->gmc.xgmi.pending_reset) 3792 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, 3793 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3794 3795 return 0; 3796 3797 release_ras_con: 3798 amdgpu_release_ras_context(adev); 3799 3800 failed: 3801 amdgpu_vf_error_trans_all(adev); 3802 3803 return r; 3804 } 3805 3806 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) 3807 { 3808 /* Clear all CPU mappings pointing to this device */ 3809 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); 3810 3811 /* Unmap all mapped bars - Doorbell, registers and VRAM */ 3812 amdgpu_device_doorbell_fini(adev); 3813 3814 iounmap(adev->rmmio); 3815 adev->rmmio = NULL; 3816 if (adev->mman.aper_base_kaddr) 3817 iounmap(adev->mman.aper_base_kaddr); 3818 adev->mman.aper_base_kaddr = NULL; 3819 3820 /* Memory manager related */ 3821 if (!adev->gmc.xgmi.connected_to_cpu) { 3822 arch_phys_wc_del(adev->gmc.vram_mtrr); 3823 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); 3824 } 3825 } 3826 3827 /** 3828 * amdgpu_device_fini - tear down the driver 3829 * 3830 * @adev: amdgpu_device pointer 3831 * 3832 * Tear down the driver info (all asics). 3833 * Called at driver shutdown. 3834 */ 3835 void amdgpu_device_fini_hw(struct amdgpu_device *adev) 3836 { 3837 dev_info(adev->dev, "amdgpu: finishing device.\n"); 3838 flush_delayed_work(&adev->delayed_init_work); 3839 if (adev->mman.initialized) { 3840 flush_delayed_work(&adev->mman.bdev.wq); 3841 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 3842 } 3843 adev->shutdown = true; 3844 3845 /* make sure IB test finished before entering exclusive mode 3846 * to avoid preemption on IB test 3847 * */ 3848 if (amdgpu_sriov_vf(adev)) { 3849 amdgpu_virt_request_full_gpu(adev, false); 3850 amdgpu_virt_fini_data_exchange(adev); 3851 } 3852 3853 /* disable all interrupts */ 3854 amdgpu_irq_disable_all(adev); 3855 if (adev->mode_info.mode_config_initialized){ 3856 if (!amdgpu_device_has_dc_support(adev)) 3857 drm_helper_force_disable_all(adev_to_drm(adev)); 3858 else 3859 drm_atomic_helper_shutdown(adev_to_drm(adev)); 3860 } 3861 amdgpu_fence_driver_hw_fini(adev); 3862 3863 if (adev->pm_sysfs_en) 3864 amdgpu_pm_sysfs_fini(adev); 3865 if (adev->ucode_sysfs_en) 3866 amdgpu_ucode_sysfs_fini(adev); 3867 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); 3868 3869 amdgpu_fbdev_fini(adev); 3870 3871 amdgpu_irq_fini_hw(adev); 3872 3873 amdgpu_device_ip_fini_early(adev); 3874 3875 amdgpu_gart_dummy_page_fini(adev); 3876 3877 amdgpu_device_unmap_mmio(adev); 3878 } 3879 3880 void amdgpu_device_fini_sw(struct amdgpu_device *adev) 3881 { 3882 amdgpu_device_ip_fini(adev); 3883 amdgpu_fence_driver_sw_fini(adev); 3884 release_firmware(adev->firmware.gpu_info_fw); 3885 adev->firmware.gpu_info_fw = NULL; 3886 adev->accel_working = false; 3887 3888 amdgpu_reset_fini(adev); 3889 3890 /* free i2c buses */ 3891 if (!amdgpu_device_has_dc_support(adev)) 3892 amdgpu_i2c_fini(adev); 3893 3894 if (amdgpu_emu_mode != 1) 3895 amdgpu_atombios_fini(adev); 3896 3897 kfree(adev->bios); 3898 adev->bios = NULL; 3899 if (amdgpu_device_supports_px(adev_to_drm(adev))) { 3900 vga_switcheroo_unregister_client(adev->pdev); 3901 vga_switcheroo_fini_domain_pm_ops(adev->dev); 3902 } 3903 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3904 vga_client_unregister(adev->pdev); 3905 3906 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 3907 amdgpu_pmu_fini(adev); 3908 if (adev->mman.discovery_bin) 3909 amdgpu_discovery_fini(adev); 3910 3911 kfree(adev->pci_state); 3912 3913 } 3914 3915 3916 /* 3917 * Suspend & resume. 3918 */ 3919 /** 3920 * amdgpu_device_suspend - initiate device suspend 3921 * 3922 * @dev: drm dev pointer 3923 * @fbcon : notify the fbdev of suspend 3924 * 3925 * Puts the hw in the suspend state (all asics). 3926 * Returns 0 for success or an error on failure. 3927 * Called at driver suspend. 3928 */ 3929 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) 3930 { 3931 struct amdgpu_device *adev = drm_to_adev(dev); 3932 3933 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 3934 return 0; 3935 3936 adev->in_suspend = true; 3937 3938 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) 3939 DRM_WARN("smart shift update failed\n"); 3940 3941 drm_kms_helper_poll_disable(dev); 3942 3943 if (fbcon) 3944 amdgpu_fbdev_set_suspend(adev, 1); 3945 3946 cancel_delayed_work_sync(&adev->delayed_init_work); 3947 3948 amdgpu_ras_suspend(adev); 3949 3950 amdgpu_device_ip_suspend_phase1(adev); 3951 3952 if (!adev->in_s0ix) 3953 amdgpu_amdkfd_suspend(adev, adev->in_runpm); 3954 3955 /* evict vram memory */ 3956 amdgpu_bo_evict_vram(adev); 3957 3958 amdgpu_fence_driver_hw_fini(adev); 3959 3960 amdgpu_device_ip_suspend_phase2(adev); 3961 /* evict remaining vram memory 3962 * This second call to evict vram is to evict the gart page table 3963 * using the CPU. 3964 */ 3965 amdgpu_bo_evict_vram(adev); 3966 3967 return 0; 3968 } 3969 3970 /** 3971 * amdgpu_device_resume - initiate device resume 3972 * 3973 * @dev: drm dev pointer 3974 * @fbcon : notify the fbdev of resume 3975 * 3976 * Bring the hw back to operating state (all asics). 3977 * Returns 0 for success or an error on failure. 3978 * Called at driver resume. 3979 */ 3980 int amdgpu_device_resume(struct drm_device *dev, bool fbcon) 3981 { 3982 struct amdgpu_device *adev = drm_to_adev(dev); 3983 int r = 0; 3984 3985 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 3986 return 0; 3987 3988 if (adev->in_s0ix) 3989 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry); 3990 3991 /* post card */ 3992 if (amdgpu_device_need_post(adev)) { 3993 r = amdgpu_device_asic_init(adev); 3994 if (r) 3995 dev_err(adev->dev, "amdgpu asic init failed\n"); 3996 } 3997 3998 r = amdgpu_device_ip_resume(adev); 3999 if (r) { 4000 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); 4001 return r; 4002 } 4003 amdgpu_fence_driver_hw_init(adev); 4004 4005 r = amdgpu_device_ip_late_init(adev); 4006 if (r) 4007 return r; 4008 4009 queue_delayed_work(system_wq, &adev->delayed_init_work, 4010 msecs_to_jiffies(AMDGPU_RESUME_MS)); 4011 4012 if (!adev->in_s0ix) { 4013 r = amdgpu_amdkfd_resume(adev, adev->in_runpm); 4014 if (r) 4015 return r; 4016 } 4017 4018 /* Make sure IB tests flushed */ 4019 flush_delayed_work(&adev->delayed_init_work); 4020 4021 if (fbcon) 4022 amdgpu_fbdev_set_suspend(adev, 0); 4023 4024 drm_kms_helper_poll_enable(dev); 4025 4026 amdgpu_ras_resume(adev); 4027 4028 /* 4029 * Most of the connector probing functions try to acquire runtime pm 4030 * refs to ensure that the GPU is powered on when connector polling is 4031 * performed. Since we're calling this from a runtime PM callback, 4032 * trying to acquire rpm refs will cause us to deadlock. 4033 * 4034 * Since we're guaranteed to be holding the rpm lock, it's safe to 4035 * temporarily disable the rpm helpers so this doesn't deadlock us. 4036 */ 4037 #ifdef CONFIG_PM 4038 dev->dev->power.disable_depth++; 4039 #endif 4040 if (!amdgpu_device_has_dc_support(adev)) 4041 drm_helper_hpd_irq_event(dev); 4042 else 4043 drm_kms_helper_hotplug_event(dev); 4044 #ifdef CONFIG_PM 4045 dev->dev->power.disable_depth--; 4046 #endif 4047 adev->in_suspend = false; 4048 4049 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) 4050 DRM_WARN("smart shift update failed\n"); 4051 4052 return 0; 4053 } 4054 4055 /** 4056 * amdgpu_device_ip_check_soft_reset - did soft reset succeed 4057 * 4058 * @adev: amdgpu_device pointer 4059 * 4060 * The list of all the hardware IPs that make up the asic is walked and 4061 * the check_soft_reset callbacks are run. check_soft_reset determines 4062 * if the asic is still hung or not. 4063 * Returns true if any of the IPs are still in a hung state, false if not. 4064 */ 4065 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) 4066 { 4067 int i; 4068 bool asic_hang = false; 4069 4070 if (amdgpu_sriov_vf(adev)) 4071 return true; 4072 4073 if (amdgpu_asic_need_full_reset(adev)) 4074 return true; 4075 4076 for (i = 0; i < adev->num_ip_blocks; i++) { 4077 if (!adev->ip_blocks[i].status.valid) 4078 continue; 4079 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 4080 adev->ip_blocks[i].status.hang = 4081 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); 4082 if (adev->ip_blocks[i].status.hang) { 4083 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 4084 asic_hang = true; 4085 } 4086 } 4087 return asic_hang; 4088 } 4089 4090 /** 4091 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset 4092 * 4093 * @adev: amdgpu_device pointer 4094 * 4095 * The list of all the hardware IPs that make up the asic is walked and the 4096 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset 4097 * handles any IP specific hardware or software state changes that are 4098 * necessary for a soft reset to succeed. 4099 * Returns 0 on success, negative error code on failure. 4100 */ 4101 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) 4102 { 4103 int i, r = 0; 4104 4105 for (i = 0; i < adev->num_ip_blocks; i++) { 4106 if (!adev->ip_blocks[i].status.valid) 4107 continue; 4108 if (adev->ip_blocks[i].status.hang && 4109 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 4110 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); 4111 if (r) 4112 return r; 4113 } 4114 } 4115 4116 return 0; 4117 } 4118 4119 /** 4120 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed 4121 * 4122 * @adev: amdgpu_device pointer 4123 * 4124 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu 4125 * reset is necessary to recover. 4126 * Returns true if a full asic reset is required, false if not. 4127 */ 4128 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) 4129 { 4130 int i; 4131 4132 if (amdgpu_asic_need_full_reset(adev)) 4133 return true; 4134 4135 for (i = 0; i < adev->num_ip_blocks; i++) { 4136 if (!adev->ip_blocks[i].status.valid) 4137 continue; 4138 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 4139 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 4140 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 4141 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 4142 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 4143 if (adev->ip_blocks[i].status.hang) { 4144 dev_info(adev->dev, "Some block need full reset!\n"); 4145 return true; 4146 } 4147 } 4148 } 4149 return false; 4150 } 4151 4152 /** 4153 * amdgpu_device_ip_soft_reset - do a soft reset 4154 * 4155 * @adev: amdgpu_device pointer 4156 * 4157 * The list of all the hardware IPs that make up the asic is walked and the 4158 * soft_reset callbacks are run if the block is hung. soft_reset handles any 4159 * IP specific hardware or software state changes that are necessary to soft 4160 * reset the IP. 4161 * Returns 0 on success, negative error code on failure. 4162 */ 4163 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) 4164 { 4165 int i, r = 0; 4166 4167 for (i = 0; i < adev->num_ip_blocks; i++) { 4168 if (!adev->ip_blocks[i].status.valid) 4169 continue; 4170 if (adev->ip_blocks[i].status.hang && 4171 adev->ip_blocks[i].version->funcs->soft_reset) { 4172 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); 4173 if (r) 4174 return r; 4175 } 4176 } 4177 4178 return 0; 4179 } 4180 4181 /** 4182 * amdgpu_device_ip_post_soft_reset - clean up from soft reset 4183 * 4184 * @adev: amdgpu_device pointer 4185 * 4186 * The list of all the hardware IPs that make up the asic is walked and the 4187 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset 4188 * handles any IP specific hardware or software state changes that are 4189 * necessary after the IP has been soft reset. 4190 * Returns 0 on success, negative error code on failure. 4191 */ 4192 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) 4193 { 4194 int i, r = 0; 4195 4196 for (i = 0; i < adev->num_ip_blocks; i++) { 4197 if (!adev->ip_blocks[i].status.valid) 4198 continue; 4199 if (adev->ip_blocks[i].status.hang && 4200 adev->ip_blocks[i].version->funcs->post_soft_reset) 4201 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); 4202 if (r) 4203 return r; 4204 } 4205 4206 return 0; 4207 } 4208 4209 /** 4210 * amdgpu_device_recover_vram - Recover some VRAM contents 4211 * 4212 * @adev: amdgpu_device pointer 4213 * 4214 * Restores the contents of VRAM buffers from the shadows in GTT. Used to 4215 * restore things like GPUVM page tables after a GPU reset where 4216 * the contents of VRAM might be lost. 4217 * 4218 * Returns: 4219 * 0 on success, negative error code on failure. 4220 */ 4221 static int amdgpu_device_recover_vram(struct amdgpu_device *adev) 4222 { 4223 struct dma_fence *fence = NULL, *next = NULL; 4224 struct amdgpu_bo *shadow; 4225 struct amdgpu_bo_vm *vmbo; 4226 long r = 1, tmo; 4227 4228 if (amdgpu_sriov_runtime(adev)) 4229 tmo = msecs_to_jiffies(8000); 4230 else 4231 tmo = msecs_to_jiffies(100); 4232 4233 dev_info(adev->dev, "recover vram bo from shadow start\n"); 4234 mutex_lock(&adev->shadow_list_lock); 4235 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { 4236 shadow = &vmbo->bo; 4237 /* No need to recover an evicted BO */ 4238 if (shadow->tbo.resource->mem_type != TTM_PL_TT || 4239 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || 4240 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) 4241 continue; 4242 4243 r = amdgpu_bo_restore_shadow(shadow, &next); 4244 if (r) 4245 break; 4246 4247 if (fence) { 4248 tmo = dma_fence_wait_timeout(fence, false, tmo); 4249 dma_fence_put(fence); 4250 fence = next; 4251 if (tmo == 0) { 4252 r = -ETIMEDOUT; 4253 break; 4254 } else if (tmo < 0) { 4255 r = tmo; 4256 break; 4257 } 4258 } else { 4259 fence = next; 4260 } 4261 } 4262 mutex_unlock(&adev->shadow_list_lock); 4263 4264 if (fence) 4265 tmo = dma_fence_wait_timeout(fence, false, tmo); 4266 dma_fence_put(fence); 4267 4268 if (r < 0 || tmo <= 0) { 4269 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); 4270 return -EIO; 4271 } 4272 4273 dev_info(adev->dev, "recover vram bo from shadow done\n"); 4274 return 0; 4275 } 4276 4277 4278 /** 4279 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 4280 * 4281 * @adev: amdgpu_device pointer 4282 * @from_hypervisor: request from hypervisor 4283 * 4284 * do VF FLR and reinitialize Asic 4285 * return 0 means succeeded otherwise failed 4286 */ 4287 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 4288 bool from_hypervisor) 4289 { 4290 int r; 4291 4292 if (from_hypervisor) 4293 r = amdgpu_virt_request_full_gpu(adev, true); 4294 else 4295 r = amdgpu_virt_reset_gpu(adev); 4296 if (r) 4297 return r; 4298 4299 amdgpu_amdkfd_pre_reset(adev); 4300 4301 /* Resume IP prior to SMC */ 4302 r = amdgpu_device_ip_reinit_early_sriov(adev); 4303 if (r) 4304 goto error; 4305 4306 amdgpu_virt_init_data_exchange(adev); 4307 /* we need recover gart prior to run SMC/CP/SDMA resume */ 4308 amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); 4309 4310 r = amdgpu_device_fw_loading(adev); 4311 if (r) 4312 return r; 4313 4314 /* now we are okay to resume SMC/CP/SDMA */ 4315 r = amdgpu_device_ip_reinit_late_sriov(adev); 4316 if (r) 4317 goto error; 4318 4319 amdgpu_irq_gpu_reset_resume_helper(adev); 4320 r = amdgpu_ib_ring_tests(adev); 4321 amdgpu_amdkfd_post_reset(adev); 4322 4323 error: 4324 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 4325 amdgpu_inc_vram_lost(adev); 4326 r = amdgpu_device_recover_vram(adev); 4327 } 4328 amdgpu_virt_release_full_gpu(adev, true); 4329 4330 return r; 4331 } 4332 4333 /** 4334 * amdgpu_device_has_job_running - check if there is any job in mirror list 4335 * 4336 * @adev: amdgpu_device pointer 4337 * 4338 * check if there is any job in mirror list 4339 */ 4340 bool amdgpu_device_has_job_running(struct amdgpu_device *adev) 4341 { 4342 int i; 4343 struct drm_sched_job *job; 4344 4345 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4346 struct amdgpu_ring *ring = adev->rings[i]; 4347 4348 if (!ring || !ring->sched.thread) 4349 continue; 4350 4351 spin_lock(&ring->sched.job_list_lock); 4352 job = list_first_entry_or_null(&ring->sched.pending_list, 4353 struct drm_sched_job, list); 4354 spin_unlock(&ring->sched.job_list_lock); 4355 if (job) 4356 return true; 4357 } 4358 return false; 4359 } 4360 4361 /** 4362 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery 4363 * 4364 * @adev: amdgpu_device pointer 4365 * 4366 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover 4367 * a hung GPU. 4368 */ 4369 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) 4370 { 4371 if (!amdgpu_device_ip_check_soft_reset(adev)) { 4372 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n"); 4373 return false; 4374 } 4375 4376 if (amdgpu_gpu_recovery == 0) 4377 goto disabled; 4378 4379 if (amdgpu_sriov_vf(adev)) 4380 return true; 4381 4382 if (amdgpu_gpu_recovery == -1) { 4383 switch (adev->asic_type) { 4384 case CHIP_BONAIRE: 4385 case CHIP_HAWAII: 4386 case CHIP_TOPAZ: 4387 case CHIP_TONGA: 4388 case CHIP_FIJI: 4389 case CHIP_POLARIS10: 4390 case CHIP_POLARIS11: 4391 case CHIP_POLARIS12: 4392 case CHIP_VEGAM: 4393 case CHIP_VEGA20: 4394 case CHIP_VEGA10: 4395 case CHIP_VEGA12: 4396 case CHIP_RAVEN: 4397 case CHIP_ARCTURUS: 4398 case CHIP_RENOIR: 4399 case CHIP_NAVI10: 4400 case CHIP_NAVI14: 4401 case CHIP_NAVI12: 4402 case CHIP_SIENNA_CICHLID: 4403 case CHIP_NAVY_FLOUNDER: 4404 case CHIP_DIMGREY_CAVEFISH: 4405 case CHIP_BEIGE_GOBY: 4406 case CHIP_VANGOGH: 4407 case CHIP_ALDEBARAN: 4408 break; 4409 default: 4410 goto disabled; 4411 } 4412 } 4413 4414 return true; 4415 4416 disabled: 4417 dev_info(adev->dev, "GPU recovery disabled.\n"); 4418 return false; 4419 } 4420 4421 int amdgpu_device_mode1_reset(struct amdgpu_device *adev) 4422 { 4423 u32 i; 4424 int ret = 0; 4425 4426 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 4427 4428 dev_info(adev->dev, "GPU mode1 reset\n"); 4429 4430 /* disable BM */ 4431 pci_clear_master(adev->pdev); 4432 4433 amdgpu_device_cache_pci_state(adev->pdev); 4434 4435 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 4436 dev_info(adev->dev, "GPU smu mode1 reset\n"); 4437 ret = amdgpu_dpm_mode1_reset(adev); 4438 } else { 4439 dev_info(adev->dev, "GPU psp mode1 reset\n"); 4440 ret = psp_gpu_reset(adev); 4441 } 4442 4443 if (ret) 4444 dev_err(adev->dev, "GPU mode1 reset failed\n"); 4445 4446 amdgpu_device_load_pci_state(adev->pdev); 4447 4448 /* wait for asic to come out of reset */ 4449 for (i = 0; i < adev->usec_timeout; i++) { 4450 u32 memsize = adev->nbio.funcs->get_memsize(adev); 4451 4452 if (memsize != 0xffffffff) 4453 break; 4454 udelay(1); 4455 } 4456 4457 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 4458 return ret; 4459 } 4460 4461 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 4462 struct amdgpu_reset_context *reset_context) 4463 { 4464 int i, j, r = 0; 4465 struct amdgpu_job *job = NULL; 4466 bool need_full_reset = 4467 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4468 4469 if (reset_context->reset_req_dev == adev) 4470 job = reset_context->job; 4471 4472 /* no need to dump if device is not in good state during probe period */ 4473 if (!adev->gmc.xgmi.pending_reset) 4474 amdgpu_debugfs_wait_dump(adev); 4475 4476 if (amdgpu_sriov_vf(adev)) { 4477 /* stop the data exchange thread */ 4478 amdgpu_virt_fini_data_exchange(adev); 4479 } 4480 4481 /* block all schedulers and reset given job's ring */ 4482 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4483 struct amdgpu_ring *ring = adev->rings[i]; 4484 4485 if (!ring || !ring->sched.thread) 4486 continue; 4487 4488 /*clear job fence from fence drv to avoid force_completion 4489 *leave NULL and vm flush fence in fence drv */ 4490 for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) { 4491 struct dma_fence *old, **ptr; 4492 4493 ptr = &ring->fence_drv.fences[j]; 4494 old = rcu_dereference_protected(*ptr, 1); 4495 if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) { 4496 RCU_INIT_POINTER(*ptr, NULL); 4497 } 4498 } 4499 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 4500 amdgpu_fence_driver_force_completion(ring); 4501 } 4502 4503 if (job && job->vm) 4504 drm_sched_increase_karma(&job->base); 4505 4506 r = amdgpu_reset_prepare_hwcontext(adev, reset_context); 4507 /* If reset handler not implemented, continue; otherwise return */ 4508 if (r == -ENOSYS) 4509 r = 0; 4510 else 4511 return r; 4512 4513 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ 4514 if (!amdgpu_sriov_vf(adev)) { 4515 4516 if (!need_full_reset) 4517 need_full_reset = amdgpu_device_ip_need_full_reset(adev); 4518 4519 if (!need_full_reset) { 4520 amdgpu_device_ip_pre_soft_reset(adev); 4521 r = amdgpu_device_ip_soft_reset(adev); 4522 amdgpu_device_ip_post_soft_reset(adev); 4523 if (r || amdgpu_device_ip_check_soft_reset(adev)) { 4524 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); 4525 need_full_reset = true; 4526 } 4527 } 4528 4529 if (need_full_reset) 4530 r = amdgpu_device_ip_suspend(adev); 4531 if (need_full_reset) 4532 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4533 else 4534 clear_bit(AMDGPU_NEED_FULL_RESET, 4535 &reset_context->flags); 4536 } 4537 4538 return r; 4539 } 4540 4541 int amdgpu_do_asic_reset(struct list_head *device_list_handle, 4542 struct amdgpu_reset_context *reset_context) 4543 { 4544 struct amdgpu_device *tmp_adev = NULL; 4545 bool need_full_reset, skip_hw_reset, vram_lost = false; 4546 int r = 0; 4547 4548 /* Try reset handler method first */ 4549 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 4550 reset_list); 4551 r = amdgpu_reset_perform_reset(tmp_adev, reset_context); 4552 /* If reset handler not implemented, continue; otherwise return */ 4553 if (r == -ENOSYS) 4554 r = 0; 4555 else 4556 return r; 4557 4558 /* Reset handler not implemented, use the default method */ 4559 need_full_reset = 4560 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4561 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); 4562 4563 /* 4564 * ASIC reset has to be done on all XGMI hive nodes ASAP 4565 * to allow proper links negotiation in FW (within 1 sec) 4566 */ 4567 if (!skip_hw_reset && need_full_reset) { 4568 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4569 /* For XGMI run all resets in parallel to speed up the process */ 4570 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 4571 tmp_adev->gmc.xgmi.pending_reset = false; 4572 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) 4573 r = -EALREADY; 4574 } else 4575 r = amdgpu_asic_reset(tmp_adev); 4576 4577 if (r) { 4578 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", 4579 r, adev_to_drm(tmp_adev)->unique); 4580 break; 4581 } 4582 } 4583 4584 /* For XGMI wait for all resets to complete before proceed */ 4585 if (!r) { 4586 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4587 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 4588 flush_work(&tmp_adev->xgmi_reset_work); 4589 r = tmp_adev->asic_reset_res; 4590 if (r) 4591 break; 4592 } 4593 } 4594 } 4595 } 4596 4597 if (!r && amdgpu_ras_intr_triggered()) { 4598 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4599 if (tmp_adev->mmhub.ras_funcs && 4600 tmp_adev->mmhub.ras_funcs->reset_ras_error_count) 4601 tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev); 4602 } 4603 4604 amdgpu_ras_intr_cleared(); 4605 } 4606 4607 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4608 if (need_full_reset) { 4609 /* post card */ 4610 r = amdgpu_device_asic_init(tmp_adev); 4611 if (r) { 4612 dev_warn(tmp_adev->dev, "asic atom init failed!"); 4613 } else { 4614 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); 4615 r = amdgpu_amdkfd_resume_iommu(tmp_adev); 4616 if (r) 4617 goto out; 4618 4619 r = amdgpu_device_ip_resume_phase1(tmp_adev); 4620 if (r) 4621 goto out; 4622 4623 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 4624 if (vram_lost) { 4625 DRM_INFO("VRAM is lost due to GPU reset!\n"); 4626 amdgpu_inc_vram_lost(tmp_adev); 4627 } 4628 4629 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT)); 4630 if (r) 4631 goto out; 4632 4633 r = amdgpu_device_fw_loading(tmp_adev); 4634 if (r) 4635 return r; 4636 4637 r = amdgpu_device_ip_resume_phase2(tmp_adev); 4638 if (r) 4639 goto out; 4640 4641 if (vram_lost) 4642 amdgpu_device_fill_reset_magic(tmp_adev); 4643 4644 /* 4645 * Add this ASIC as tracked as reset was already 4646 * complete successfully. 4647 */ 4648 amdgpu_register_gpu_instance(tmp_adev); 4649 4650 if (!reset_context->hive && 4651 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 4652 amdgpu_xgmi_add_device(tmp_adev); 4653 4654 r = amdgpu_device_ip_late_init(tmp_adev); 4655 if (r) 4656 goto out; 4657 4658 amdgpu_fbdev_set_suspend(tmp_adev, 0); 4659 4660 /* 4661 * The GPU enters bad state once faulty pages 4662 * by ECC has reached the threshold, and ras 4663 * recovery is scheduled next. So add one check 4664 * here to break recovery if it indeed exceeds 4665 * bad page threshold, and remind user to 4666 * retire this GPU or setting one bigger 4667 * bad_page_threshold value to fix this once 4668 * probing driver again. 4669 */ 4670 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) { 4671 /* must succeed. */ 4672 amdgpu_ras_resume(tmp_adev); 4673 } else { 4674 r = -EINVAL; 4675 goto out; 4676 } 4677 4678 /* Update PSP FW topology after reset */ 4679 if (reset_context->hive && 4680 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 4681 r = amdgpu_xgmi_update_topology( 4682 reset_context->hive, tmp_adev); 4683 } 4684 } 4685 4686 out: 4687 if (!r) { 4688 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); 4689 r = amdgpu_ib_ring_tests(tmp_adev); 4690 if (r) { 4691 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); 4692 need_full_reset = true; 4693 r = -EAGAIN; 4694 goto end; 4695 } 4696 } 4697 4698 if (!r) 4699 r = amdgpu_device_recover_vram(tmp_adev); 4700 else 4701 tmp_adev->asic_reset_res = r; 4702 } 4703 4704 end: 4705 if (need_full_reset) 4706 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4707 else 4708 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4709 return r; 4710 } 4711 4712 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, 4713 struct amdgpu_hive_info *hive) 4714 { 4715 if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0) 4716 return false; 4717 4718 if (hive) { 4719 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock); 4720 } else { 4721 down_write(&adev->reset_sem); 4722 } 4723 4724 switch (amdgpu_asic_reset_method(adev)) { 4725 case AMD_RESET_METHOD_MODE1: 4726 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; 4727 break; 4728 case AMD_RESET_METHOD_MODE2: 4729 adev->mp1_state = PP_MP1_STATE_RESET; 4730 break; 4731 default: 4732 adev->mp1_state = PP_MP1_STATE_NONE; 4733 break; 4734 } 4735 4736 return true; 4737 } 4738 4739 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) 4740 { 4741 amdgpu_vf_error_trans_all(adev); 4742 adev->mp1_state = PP_MP1_STATE_NONE; 4743 atomic_set(&adev->in_gpu_reset, 0); 4744 up_write(&adev->reset_sem); 4745 } 4746 4747 /* 4748 * to lockup a list of amdgpu devices in a hive safely, if not a hive 4749 * with multiple nodes, it will be similar as amdgpu_device_lock_adev. 4750 * 4751 * unlock won't require roll back. 4752 */ 4753 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive) 4754 { 4755 struct amdgpu_device *tmp_adev = NULL; 4756 4757 if (adev->gmc.xgmi.num_physical_nodes > 1) { 4758 if (!hive) { 4759 dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes"); 4760 return -ENODEV; 4761 } 4762 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 4763 if (!amdgpu_device_lock_adev(tmp_adev, hive)) 4764 goto roll_back; 4765 } 4766 } else if (!amdgpu_device_lock_adev(adev, hive)) 4767 return -EAGAIN; 4768 4769 return 0; 4770 roll_back: 4771 if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) { 4772 /* 4773 * if the lockup iteration break in the middle of a hive, 4774 * it may means there may has a race issue, 4775 * or a hive device locked up independently. 4776 * we may be in trouble and may not, so will try to roll back 4777 * the lock and give out a warnning. 4778 */ 4779 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock"); 4780 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) { 4781 amdgpu_device_unlock_adev(tmp_adev); 4782 } 4783 } 4784 return -EAGAIN; 4785 } 4786 4787 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) 4788 { 4789 struct pci_dev *p = NULL; 4790 4791 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 4792 adev->pdev->bus->number, 1); 4793 if (p) { 4794 pm_runtime_enable(&(p->dev)); 4795 pm_runtime_resume(&(p->dev)); 4796 } 4797 } 4798 4799 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) 4800 { 4801 enum amd_reset_method reset_method; 4802 struct pci_dev *p = NULL; 4803 u64 expires; 4804 4805 /* 4806 * For now, only BACO and mode1 reset are confirmed 4807 * to suffer the audio issue without proper suspended. 4808 */ 4809 reset_method = amdgpu_asic_reset_method(adev); 4810 if ((reset_method != AMD_RESET_METHOD_BACO) && 4811 (reset_method != AMD_RESET_METHOD_MODE1)) 4812 return -EINVAL; 4813 4814 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 4815 adev->pdev->bus->number, 1); 4816 if (!p) 4817 return -ENODEV; 4818 4819 expires = pm_runtime_autosuspend_expiration(&(p->dev)); 4820 if (!expires) 4821 /* 4822 * If we cannot get the audio device autosuspend delay, 4823 * a fixed 4S interval will be used. Considering 3S is 4824 * the audio controller default autosuspend delay setting. 4825 * 4S used here is guaranteed to cover that. 4826 */ 4827 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL; 4828 4829 while (!pm_runtime_status_suspended(&(p->dev))) { 4830 if (!pm_runtime_suspend(&(p->dev))) 4831 break; 4832 4833 if (expires < ktime_get_mono_fast_ns()) { 4834 dev_warn(adev->dev, "failed to suspend display audio\n"); 4835 /* TODO: abort the succeeding gpu reset? */ 4836 return -ETIMEDOUT; 4837 } 4838 } 4839 4840 pm_runtime_disable(&(p->dev)); 4841 4842 return 0; 4843 } 4844 4845 static void amdgpu_device_recheck_guilty_jobs( 4846 struct amdgpu_device *adev, struct list_head *device_list_handle, 4847 struct amdgpu_reset_context *reset_context) 4848 { 4849 int i, r = 0; 4850 4851 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4852 struct amdgpu_ring *ring = adev->rings[i]; 4853 int ret = 0; 4854 struct drm_sched_job *s_job; 4855 4856 if (!ring || !ring->sched.thread) 4857 continue; 4858 4859 s_job = list_first_entry_or_null(&ring->sched.pending_list, 4860 struct drm_sched_job, list); 4861 if (s_job == NULL) 4862 continue; 4863 4864 /* clear job's guilty and depend the folowing step to decide the real one */ 4865 drm_sched_reset_karma(s_job); 4866 drm_sched_resubmit_jobs_ext(&ring->sched, 1); 4867 4868 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); 4869 if (ret == 0) { /* timeout */ 4870 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n", 4871 ring->sched.name, s_job->id); 4872 4873 /* set guilty */ 4874 drm_sched_increase_karma(s_job); 4875 retry: 4876 /* do hw reset */ 4877 if (amdgpu_sriov_vf(adev)) { 4878 amdgpu_virt_fini_data_exchange(adev); 4879 r = amdgpu_device_reset_sriov(adev, false); 4880 if (r) 4881 adev->asic_reset_res = r; 4882 } else { 4883 clear_bit(AMDGPU_SKIP_HW_RESET, 4884 &reset_context->flags); 4885 r = amdgpu_do_asic_reset(device_list_handle, 4886 reset_context); 4887 if (r && r == -EAGAIN) 4888 goto retry; 4889 } 4890 4891 /* 4892 * add reset counter so that the following 4893 * resubmitted job could flush vmid 4894 */ 4895 atomic_inc(&adev->gpu_reset_counter); 4896 continue; 4897 } 4898 4899 /* got the hw fence, signal finished fence */ 4900 atomic_dec(ring->sched.score); 4901 dma_fence_get(&s_job->s_fence->finished); 4902 dma_fence_signal(&s_job->s_fence->finished); 4903 dma_fence_put(&s_job->s_fence->finished); 4904 4905 /* remove node from list and free the job */ 4906 spin_lock(&ring->sched.job_list_lock); 4907 list_del_init(&s_job->list); 4908 spin_unlock(&ring->sched.job_list_lock); 4909 ring->sched.ops->free_job(s_job); 4910 } 4911 } 4912 4913 /** 4914 * amdgpu_device_gpu_recover - reset the asic and recover scheduler 4915 * 4916 * @adev: amdgpu_device pointer 4917 * @job: which job trigger hang 4918 * 4919 * Attempt to reset the GPU if it has hung (all asics). 4920 * Attempt to do soft-reset or full-reset and reinitialize Asic 4921 * Returns 0 for success or an error on failure. 4922 */ 4923 4924 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 4925 struct amdgpu_job *job) 4926 { 4927 struct list_head device_list, *device_list_handle = NULL; 4928 bool job_signaled = false; 4929 struct amdgpu_hive_info *hive = NULL; 4930 struct amdgpu_device *tmp_adev = NULL; 4931 int i, r = 0; 4932 bool need_emergency_restart = false; 4933 bool audio_suspended = false; 4934 int tmp_vram_lost_counter; 4935 struct amdgpu_reset_context reset_context; 4936 4937 memset(&reset_context, 0, sizeof(reset_context)); 4938 4939 /* 4940 * Special case: RAS triggered and full reset isn't supported 4941 */ 4942 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); 4943 4944 /* 4945 * Flush RAM to disk so that after reboot 4946 * the user can read log and see why the system rebooted. 4947 */ 4948 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) { 4949 DRM_WARN("Emergency reboot."); 4950 4951 ksys_sync_helper(); 4952 emergency_restart(); 4953 } 4954 4955 dev_info(adev->dev, "GPU %s begin!\n", 4956 need_emergency_restart ? "jobs stop":"reset"); 4957 4958 /* 4959 * Here we trylock to avoid chain of resets executing from 4960 * either trigger by jobs on different adevs in XGMI hive or jobs on 4961 * different schedulers for same device while this TO handler is running. 4962 * We always reset all schedulers for device and all devices for XGMI 4963 * hive so that should take care of them too. 4964 */ 4965 hive = amdgpu_get_xgmi_hive(adev); 4966 if (hive) { 4967 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) { 4968 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", 4969 job ? job->base.id : -1, hive->hive_id); 4970 amdgpu_put_xgmi_hive(hive); 4971 if (job && job->vm) 4972 drm_sched_increase_karma(&job->base); 4973 return 0; 4974 } 4975 mutex_lock(&hive->hive_lock); 4976 } 4977 4978 reset_context.method = AMD_RESET_METHOD_NONE; 4979 reset_context.reset_req_dev = adev; 4980 reset_context.job = job; 4981 reset_context.hive = hive; 4982 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 4983 4984 /* 4985 * lock the device before we try to operate the linked list 4986 * if didn't get the device lock, don't touch the linked list since 4987 * others may iterating it. 4988 */ 4989 r = amdgpu_device_lock_hive_adev(adev, hive); 4990 if (r) { 4991 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress", 4992 job ? job->base.id : -1); 4993 4994 /* even we skipped this reset, still need to set the job to guilty */ 4995 if (job && job->vm) 4996 drm_sched_increase_karma(&job->base); 4997 goto skip_recovery; 4998 } 4999 5000 /* 5001 * Build list of devices to reset. 5002 * In case we are in XGMI hive mode, resort the device list 5003 * to put adev in the 1st position. 5004 */ 5005 INIT_LIST_HEAD(&device_list); 5006 if (adev->gmc.xgmi.num_physical_nodes > 1) { 5007 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 5008 list_add_tail(&tmp_adev->reset_list, &device_list); 5009 if (!list_is_first(&adev->reset_list, &device_list)) 5010 list_rotate_to_front(&adev->reset_list, &device_list); 5011 device_list_handle = &device_list; 5012 } else { 5013 list_add_tail(&adev->reset_list, &device_list); 5014 device_list_handle = &device_list; 5015 } 5016 5017 /* block all schedulers and reset given job's ring */ 5018 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5019 /* 5020 * Try to put the audio codec into suspend state 5021 * before gpu reset started. 5022 * 5023 * Due to the power domain of the graphics device 5024 * is shared with AZ power domain. Without this, 5025 * we may change the audio hardware from behind 5026 * the audio driver's back. That will trigger 5027 * some audio codec errors. 5028 */ 5029 if (!amdgpu_device_suspend_display_audio(tmp_adev)) 5030 audio_suspended = true; 5031 5032 amdgpu_ras_set_error_query_ready(tmp_adev, false); 5033 5034 cancel_delayed_work_sync(&tmp_adev->delayed_init_work); 5035 5036 if (!amdgpu_sriov_vf(tmp_adev)) 5037 amdgpu_amdkfd_pre_reset(tmp_adev); 5038 5039 /* 5040 * Mark these ASICs to be reseted as untracked first 5041 * And add them back after reset completed 5042 */ 5043 amdgpu_unregister_gpu_instance(tmp_adev); 5044 5045 amdgpu_fbdev_set_suspend(tmp_adev, 1); 5046 5047 /* disable ras on ALL IPs */ 5048 if (!need_emergency_restart && 5049 amdgpu_device_ip_need_full_reset(tmp_adev)) 5050 amdgpu_ras_suspend(tmp_adev); 5051 5052 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5053 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5054 5055 if (!ring || !ring->sched.thread) 5056 continue; 5057 5058 drm_sched_stop(&ring->sched, job ? &job->base : NULL); 5059 5060 if (need_emergency_restart) 5061 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); 5062 } 5063 atomic_inc(&tmp_adev->gpu_reset_counter); 5064 } 5065 5066 if (need_emergency_restart) 5067 goto skip_sched_resume; 5068 5069 /* 5070 * Must check guilty signal here since after this point all old 5071 * HW fences are force signaled. 5072 * 5073 * job->base holds a reference to parent fence 5074 */ 5075 if (job && job->base.s_fence->parent && 5076 dma_fence_is_signaled(job->base.s_fence->parent)) { 5077 job_signaled = true; 5078 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); 5079 goto skip_hw_reset; 5080 } 5081 5082 retry: /* Rest of adevs pre asic reset from XGMI hive. */ 5083 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5084 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context); 5085 /*TODO Should we stop ?*/ 5086 if (r) { 5087 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", 5088 r, adev_to_drm(tmp_adev)->unique); 5089 tmp_adev->asic_reset_res = r; 5090 } 5091 } 5092 5093 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); 5094 /* Actual ASIC resets if needed.*/ 5095 /* TODO Implement XGMI hive reset logic for SRIOV */ 5096 if (amdgpu_sriov_vf(adev)) { 5097 r = amdgpu_device_reset_sriov(adev, job ? false : true); 5098 if (r) 5099 adev->asic_reset_res = r; 5100 } else { 5101 r = amdgpu_do_asic_reset(device_list_handle, &reset_context); 5102 if (r && r == -EAGAIN) 5103 goto retry; 5104 } 5105 5106 skip_hw_reset: 5107 5108 /* Post ASIC reset for all devs .*/ 5109 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5110 5111 /* 5112 * Sometimes a later bad compute job can block a good gfx job as gfx 5113 * and compute ring share internal GC HW mutually. We add an additional 5114 * guilty jobs recheck step to find the real guilty job, it synchronously 5115 * submits and pends for the first job being signaled. If it gets timeout, 5116 * we identify it as a real guilty job. 5117 */ 5118 if (amdgpu_gpu_recovery == 2 && 5119 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter))) 5120 amdgpu_device_recheck_guilty_jobs( 5121 tmp_adev, device_list_handle, &reset_context); 5122 5123 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5124 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5125 5126 if (!ring || !ring->sched.thread) 5127 continue; 5128 5129 /* No point to resubmit jobs if we didn't HW reset*/ 5130 if (!tmp_adev->asic_reset_res && !job_signaled) 5131 drm_sched_resubmit_jobs(&ring->sched); 5132 5133 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); 5134 } 5135 5136 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { 5137 drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); 5138 } 5139 5140 tmp_adev->asic_reset_res = 0; 5141 5142 if (r) { 5143 /* bad news, how to tell it to userspace ? */ 5144 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); 5145 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 5146 } else { 5147 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); 5148 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) 5149 DRM_WARN("smart shift update failed\n"); 5150 } 5151 } 5152 5153 skip_sched_resume: 5154 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5155 /* unlock kfd: SRIOV would do it separately */ 5156 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) 5157 amdgpu_amdkfd_post_reset(tmp_adev); 5158 5159 /* kfd_post_reset will do nothing if kfd device is not initialized, 5160 * need to bring up kfd here if it's not be initialized before 5161 */ 5162 if (!adev->kfd.init_complete) 5163 amdgpu_amdkfd_device_init(adev); 5164 5165 if (audio_suspended) 5166 amdgpu_device_resume_display_audio(tmp_adev); 5167 amdgpu_device_unlock_adev(tmp_adev); 5168 } 5169 5170 skip_recovery: 5171 if (hive) { 5172 atomic_set(&hive->in_reset, 0); 5173 mutex_unlock(&hive->hive_lock); 5174 amdgpu_put_xgmi_hive(hive); 5175 } 5176 5177 if (r && r != -EAGAIN) 5178 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); 5179 return r; 5180 } 5181 5182 /** 5183 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot 5184 * 5185 * @adev: amdgpu_device pointer 5186 * 5187 * Fetchs and stores in the driver the PCIE capabilities (gen speed 5188 * and lanes) of the slot the device is in. Handles APUs and 5189 * virtualized environments where PCIE config space may not be available. 5190 */ 5191 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 5192 { 5193 struct pci_dev *pdev; 5194 enum pci_bus_speed speed_cap, platform_speed_cap; 5195 enum pcie_link_width platform_link_width; 5196 5197 if (amdgpu_pcie_gen_cap) 5198 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 5199 5200 if (amdgpu_pcie_lane_cap) 5201 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 5202 5203 /* covers APUs as well */ 5204 if (pci_is_root_bus(adev->pdev->bus)) { 5205 if (adev->pm.pcie_gen_mask == 0) 5206 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 5207 if (adev->pm.pcie_mlw_mask == 0) 5208 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 5209 return; 5210 } 5211 5212 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) 5213 return; 5214 5215 pcie_bandwidth_available(adev->pdev, NULL, 5216 &platform_speed_cap, &platform_link_width); 5217 5218 if (adev->pm.pcie_gen_mask == 0) { 5219 /* asic caps */ 5220 pdev = adev->pdev; 5221 speed_cap = pcie_get_speed_cap(pdev); 5222 if (speed_cap == PCI_SPEED_UNKNOWN) { 5223 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5224 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5225 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 5226 } else { 5227 if (speed_cap == PCIE_SPEED_32_0GT) 5228 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5229 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5230 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5231 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 | 5232 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5); 5233 else if (speed_cap == PCIE_SPEED_16_0GT) 5234 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5235 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5236 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5237 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4); 5238 else if (speed_cap == PCIE_SPEED_8_0GT) 5239 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5240 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5241 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 5242 else if (speed_cap == PCIE_SPEED_5_0GT) 5243 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5244 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2); 5245 else 5246 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; 5247 } 5248 /* platform caps */ 5249 if (platform_speed_cap == PCI_SPEED_UNKNOWN) { 5250 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5251 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 5252 } else { 5253 if (platform_speed_cap == PCIE_SPEED_32_0GT) 5254 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5255 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5256 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5257 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 | 5258 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5); 5259 else if (platform_speed_cap == PCIE_SPEED_16_0GT) 5260 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5261 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5262 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5263 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); 5264 else if (platform_speed_cap == PCIE_SPEED_8_0GT) 5265 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5266 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5267 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); 5268 else if (platform_speed_cap == PCIE_SPEED_5_0GT) 5269 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5270 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 5271 else 5272 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 5273 5274 } 5275 } 5276 if (adev->pm.pcie_mlw_mask == 0) { 5277 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { 5278 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; 5279 } else { 5280 switch (platform_link_width) { 5281 case PCIE_LNK_X32: 5282 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 5283 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 5284 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5285 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5286 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5287 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5288 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5289 break; 5290 case PCIE_LNK_X16: 5291 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 5292 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5293 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5294 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5295 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5296 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5297 break; 5298 case PCIE_LNK_X12: 5299 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5300 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5301 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5302 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5303 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5304 break; 5305 case PCIE_LNK_X8: 5306 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5307 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5308 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5309 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5310 break; 5311 case PCIE_LNK_X4: 5312 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5313 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5314 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5315 break; 5316 case PCIE_LNK_X2: 5317 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5318 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5319 break; 5320 case PCIE_LNK_X1: 5321 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 5322 break; 5323 default: 5324 break; 5325 } 5326 } 5327 } 5328 } 5329 5330 int amdgpu_device_baco_enter(struct drm_device *dev) 5331 { 5332 struct amdgpu_device *adev = drm_to_adev(dev); 5333 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 5334 5335 if (!amdgpu_device_supports_baco(adev_to_drm(adev))) 5336 return -ENOTSUPP; 5337 5338 if (ras && adev->ras_enabled && 5339 adev->nbio.funcs->enable_doorbell_interrupt) 5340 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 5341 5342 return amdgpu_dpm_baco_enter(adev); 5343 } 5344 5345 int amdgpu_device_baco_exit(struct drm_device *dev) 5346 { 5347 struct amdgpu_device *adev = drm_to_adev(dev); 5348 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 5349 int ret = 0; 5350 5351 if (!amdgpu_device_supports_baco(adev_to_drm(adev))) 5352 return -ENOTSUPP; 5353 5354 ret = amdgpu_dpm_baco_exit(adev); 5355 if (ret) 5356 return ret; 5357 5358 if (ras && adev->ras_enabled && 5359 adev->nbio.funcs->enable_doorbell_interrupt) 5360 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 5361 5362 if (amdgpu_passthrough(adev) && 5363 adev->nbio.funcs->clear_doorbell_interrupt) 5364 adev->nbio.funcs->clear_doorbell_interrupt(adev); 5365 5366 return 0; 5367 } 5368 5369 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev) 5370 { 5371 int i; 5372 5373 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5374 struct amdgpu_ring *ring = adev->rings[i]; 5375 5376 if (!ring || !ring->sched.thread) 5377 continue; 5378 5379 cancel_delayed_work_sync(&ring->sched.work_tdr); 5380 } 5381 } 5382 5383 /** 5384 * amdgpu_pci_error_detected - Called when a PCI error is detected. 5385 * @pdev: PCI device struct 5386 * @state: PCI channel state 5387 * 5388 * Description: Called when a PCI error is detected. 5389 * 5390 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 5391 */ 5392 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5393 { 5394 struct drm_device *dev = pci_get_drvdata(pdev); 5395 struct amdgpu_device *adev = drm_to_adev(dev); 5396 int i; 5397 5398 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state); 5399 5400 if (adev->gmc.xgmi.num_physical_nodes > 1) { 5401 DRM_WARN("No support for XGMI hive yet..."); 5402 return PCI_ERS_RESULT_DISCONNECT; 5403 } 5404 5405 switch (state) { 5406 case pci_channel_io_normal: 5407 return PCI_ERS_RESULT_CAN_RECOVER; 5408 /* Fatal error, prepare for slot reset */ 5409 case pci_channel_io_frozen: 5410 /* 5411 * Cancel and wait for all TDRs in progress if failing to 5412 * set adev->in_gpu_reset in amdgpu_device_lock_adev 5413 * 5414 * Locking adev->reset_sem will prevent any external access 5415 * to GPU during PCI error recovery 5416 */ 5417 while (!amdgpu_device_lock_adev(adev, NULL)) 5418 amdgpu_cancel_all_tdr(adev); 5419 5420 /* 5421 * Block any work scheduling as we do for regular GPU reset 5422 * for the duration of the recovery 5423 */ 5424 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5425 struct amdgpu_ring *ring = adev->rings[i]; 5426 5427 if (!ring || !ring->sched.thread) 5428 continue; 5429 5430 drm_sched_stop(&ring->sched, NULL); 5431 } 5432 atomic_inc(&adev->gpu_reset_counter); 5433 return PCI_ERS_RESULT_NEED_RESET; 5434 case pci_channel_io_perm_failure: 5435 /* Permanent error, prepare for device removal */ 5436 return PCI_ERS_RESULT_DISCONNECT; 5437 } 5438 5439 return PCI_ERS_RESULT_NEED_RESET; 5440 } 5441 5442 /** 5443 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers 5444 * @pdev: pointer to PCI device 5445 */ 5446 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev) 5447 { 5448 5449 DRM_INFO("PCI error: mmio enabled callback!!\n"); 5450 5451 /* TODO - dump whatever for debugging purposes */ 5452 5453 /* This called only if amdgpu_pci_error_detected returns 5454 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 5455 * works, no need to reset slot. 5456 */ 5457 5458 return PCI_ERS_RESULT_RECOVERED; 5459 } 5460 5461 /** 5462 * amdgpu_pci_slot_reset - Called when PCI slot has been reset. 5463 * @pdev: PCI device struct 5464 * 5465 * Description: This routine is called by the pci error recovery 5466 * code after the PCI slot has been reset, just before we 5467 * should resume normal operations. 5468 */ 5469 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) 5470 { 5471 struct drm_device *dev = pci_get_drvdata(pdev); 5472 struct amdgpu_device *adev = drm_to_adev(dev); 5473 int r, i; 5474 struct amdgpu_reset_context reset_context; 5475 u32 memsize; 5476 struct list_head device_list; 5477 5478 DRM_INFO("PCI error: slot reset callback!!\n"); 5479 5480 memset(&reset_context, 0, sizeof(reset_context)); 5481 5482 INIT_LIST_HEAD(&device_list); 5483 list_add_tail(&adev->reset_list, &device_list); 5484 5485 /* wait for asic to come out of reset */ 5486 msleep(500); 5487 5488 /* Restore PCI confspace */ 5489 amdgpu_device_load_pci_state(pdev); 5490 5491 /* confirm ASIC came out of reset */ 5492 for (i = 0; i < adev->usec_timeout; i++) { 5493 memsize = amdgpu_asic_get_config_memsize(adev); 5494 5495 if (memsize != 0xffffffff) 5496 break; 5497 udelay(1); 5498 } 5499 if (memsize == 0xffffffff) { 5500 r = -ETIME; 5501 goto out; 5502 } 5503 5504 reset_context.method = AMD_RESET_METHOD_NONE; 5505 reset_context.reset_req_dev = adev; 5506 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 5507 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); 5508 5509 adev->no_hw_access = true; 5510 r = amdgpu_device_pre_asic_reset(adev, &reset_context); 5511 adev->no_hw_access = false; 5512 if (r) 5513 goto out; 5514 5515 r = amdgpu_do_asic_reset(&device_list, &reset_context); 5516 5517 out: 5518 if (!r) { 5519 if (amdgpu_device_cache_pci_state(adev->pdev)) 5520 pci_restore_state(adev->pdev); 5521 5522 DRM_INFO("PCIe error recovery succeeded\n"); 5523 } else { 5524 DRM_ERROR("PCIe error recovery failed, err:%d", r); 5525 amdgpu_device_unlock_adev(adev); 5526 } 5527 5528 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 5529 } 5530 5531 /** 5532 * amdgpu_pci_resume() - resume normal ops after PCI reset 5533 * @pdev: pointer to PCI device 5534 * 5535 * Called when the error recovery driver tells us that its 5536 * OK to resume normal operation. 5537 */ 5538 void amdgpu_pci_resume(struct pci_dev *pdev) 5539 { 5540 struct drm_device *dev = pci_get_drvdata(pdev); 5541 struct amdgpu_device *adev = drm_to_adev(dev); 5542 int i; 5543 5544 5545 DRM_INFO("PCI error: resume callback!!\n"); 5546 5547 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5548 struct amdgpu_ring *ring = adev->rings[i]; 5549 5550 if (!ring || !ring->sched.thread) 5551 continue; 5552 5553 5554 drm_sched_resubmit_jobs(&ring->sched); 5555 drm_sched_start(&ring->sched, true); 5556 } 5557 5558 amdgpu_device_unlock_adev(adev); 5559 } 5560 5561 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) 5562 { 5563 struct drm_device *dev = pci_get_drvdata(pdev); 5564 struct amdgpu_device *adev = drm_to_adev(dev); 5565 int r; 5566 5567 r = pci_save_state(pdev); 5568 if (!r) { 5569 kfree(adev->pci_state); 5570 5571 adev->pci_state = pci_store_saved_state(pdev); 5572 5573 if (!adev->pci_state) { 5574 DRM_ERROR("Failed to store PCI saved state"); 5575 return false; 5576 } 5577 } else { 5578 DRM_WARN("Failed to save PCI state, err:%d\n", r); 5579 return false; 5580 } 5581 5582 return true; 5583 } 5584 5585 bool amdgpu_device_load_pci_state(struct pci_dev *pdev) 5586 { 5587 struct drm_device *dev = pci_get_drvdata(pdev); 5588 struct amdgpu_device *adev = drm_to_adev(dev); 5589 int r; 5590 5591 if (!adev->pci_state) 5592 return false; 5593 5594 r = pci_load_saved_state(pdev, adev->pci_state); 5595 5596 if (!r) { 5597 pci_restore_state(pdev); 5598 } else { 5599 DRM_WARN("Failed to load PCI state, err:%d\n", r); 5600 return false; 5601 } 5602 5603 return true; 5604 } 5605 5606 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, 5607 struct amdgpu_ring *ring) 5608 { 5609 #ifdef CONFIG_X86_64 5610 if (adev->flags & AMD_IS_APU) 5611 return; 5612 #endif 5613 if (adev->gmc.xgmi.connected_to_cpu) 5614 return; 5615 5616 if (ring && ring->funcs->emit_hdp_flush) 5617 amdgpu_ring_emit_hdp_flush(ring); 5618 else 5619 amdgpu_asic_flush_hdp(adev, ring); 5620 } 5621 5622 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, 5623 struct amdgpu_ring *ring) 5624 { 5625 #ifdef CONFIG_X86_64 5626 if (adev->flags & AMD_IS_APU) 5627 return; 5628 #endif 5629 if (adev->gmc.xgmi.connected_to_cpu) 5630 return; 5631 5632 amdgpu_asic_invalidate_hdp(adev, ring); 5633 } 5634