1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/kthread.h> 29 #include <linux/console.h> 30 #include <linux/slab.h> 31 #include <linux/debugfs.h> 32 #include <drm/drmP.h> 33 #include <drm/drm_crtc_helper.h> 34 #include <drm/drm_atomic_helper.h> 35 #include <drm/amdgpu_drm.h> 36 #include <linux/vgaarb.h> 37 #include <linux/vga_switcheroo.h> 38 #include <linux/efi.h> 39 #include "amdgpu.h" 40 #include "amdgpu_trace.h" 41 #include "amdgpu_i2c.h" 42 #include "atom.h" 43 #include "amdgpu_atombios.h" 44 #include "amdgpu_atomfirmware.h" 45 #include "amd_pcie.h" 46 #ifdef CONFIG_DRM_AMDGPU_SI 47 #include "si.h" 48 #endif 49 #ifdef CONFIG_DRM_AMDGPU_CIK 50 #include "cik.h" 51 #endif 52 #include "vi.h" 53 #include "soc15.h" 54 #include "bif/bif_4_1_d.h" 55 #include <linux/pci.h> 56 #include <linux/firmware.h> 57 #include "amdgpu_vf_error.h" 58 59 #include "amdgpu_amdkfd.h" 60 61 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 62 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 63 64 #define AMDGPU_RESUME_MS 2000 65 66 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); 67 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); 68 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev); 69 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev); 70 71 static const char *amdgpu_asic_name[] = { 72 "TAHITI", 73 "PITCAIRN", 74 "VERDE", 75 "OLAND", 76 "HAINAN", 77 "BONAIRE", 78 "KAVERI", 79 "KABINI", 80 "HAWAII", 81 "MULLINS", 82 "TOPAZ", 83 "TONGA", 84 "FIJI", 85 "CARRIZO", 86 "STONEY", 87 "POLARIS10", 88 "POLARIS11", 89 "POLARIS12", 90 "VEGA10", 91 "RAVEN", 92 "LAST", 93 }; 94 95 bool amdgpu_device_is_px(struct drm_device *dev) 96 { 97 struct amdgpu_device *adev = dev->dev_private; 98 99 if (adev->flags & AMD_IS_PX) 100 return true; 101 return false; 102 } 103 104 /* 105 * MMIO register access helper functions. 106 */ 107 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 108 uint32_t acc_flags) 109 { 110 uint32_t ret; 111 112 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { 113 BUG_ON(in_interrupt()); 114 return amdgpu_virt_kiq_rreg(adev, reg); 115 } 116 117 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 118 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 119 else { 120 unsigned long flags; 121 122 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 123 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 124 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 125 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 126 } 127 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); 128 return ret; 129 } 130 131 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 132 uint32_t acc_flags) 133 { 134 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); 135 136 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { 137 adev->last_mm_index = v; 138 } 139 140 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { 141 BUG_ON(in_interrupt()); 142 return amdgpu_virt_kiq_wreg(adev, reg, v); 143 } 144 145 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 146 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 147 else { 148 unsigned long flags; 149 150 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 151 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 152 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 153 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 154 } 155 156 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { 157 udelay(500); 158 } 159 } 160 161 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) 162 { 163 if ((reg * 4) < adev->rio_mem_size) 164 return ioread32(adev->rio_mem + (reg * 4)); 165 else { 166 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); 167 return ioread32(adev->rio_mem + (mmMM_DATA * 4)); 168 } 169 } 170 171 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 172 { 173 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { 174 adev->last_mm_index = v; 175 } 176 177 if ((reg * 4) < adev->rio_mem_size) 178 iowrite32(v, adev->rio_mem + (reg * 4)); 179 else { 180 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); 181 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); 182 } 183 184 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { 185 udelay(500); 186 } 187 } 188 189 /** 190 * amdgpu_mm_rdoorbell - read a doorbell dword 191 * 192 * @adev: amdgpu_device pointer 193 * @index: doorbell index 194 * 195 * Returns the value in the doorbell aperture at the 196 * requested doorbell index (CIK). 197 */ 198 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) 199 { 200 if (index < adev->doorbell.num_doorbells) { 201 return readl(adev->doorbell.ptr + index); 202 } else { 203 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 204 return 0; 205 } 206 } 207 208 /** 209 * amdgpu_mm_wdoorbell - write a doorbell dword 210 * 211 * @adev: amdgpu_device pointer 212 * @index: doorbell index 213 * @v: value to write 214 * 215 * Writes @v to the doorbell aperture at the 216 * requested doorbell index (CIK). 217 */ 218 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) 219 { 220 if (index < adev->doorbell.num_doorbells) { 221 writel(v, adev->doorbell.ptr + index); 222 } else { 223 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 224 } 225 } 226 227 /** 228 * amdgpu_mm_rdoorbell64 - read a doorbell Qword 229 * 230 * @adev: amdgpu_device pointer 231 * @index: doorbell index 232 * 233 * Returns the value in the doorbell aperture at the 234 * requested doorbell index (VEGA10+). 235 */ 236 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) 237 { 238 if (index < adev->doorbell.num_doorbells) { 239 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); 240 } else { 241 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 242 return 0; 243 } 244 } 245 246 /** 247 * amdgpu_mm_wdoorbell64 - write a doorbell Qword 248 * 249 * @adev: amdgpu_device pointer 250 * @index: doorbell index 251 * @v: value to write 252 * 253 * Writes @v to the doorbell aperture at the 254 * requested doorbell index (VEGA10+). 255 */ 256 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) 257 { 258 if (index < adev->doorbell.num_doorbells) { 259 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); 260 } else { 261 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 262 } 263 } 264 265 /** 266 * amdgpu_invalid_rreg - dummy reg read function 267 * 268 * @adev: amdgpu device pointer 269 * @reg: offset of register 270 * 271 * Dummy register read function. Used for register blocks 272 * that certain asics don't have (all asics). 273 * Returns the value in the register. 274 */ 275 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 276 { 277 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 278 BUG(); 279 return 0; 280 } 281 282 /** 283 * amdgpu_invalid_wreg - dummy reg write function 284 * 285 * @adev: amdgpu device pointer 286 * @reg: offset of register 287 * @v: value to write to the register 288 * 289 * Dummy register read function. Used for register blocks 290 * that certain asics don't have (all asics). 291 */ 292 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 293 { 294 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 295 reg, v); 296 BUG(); 297 } 298 299 /** 300 * amdgpu_block_invalid_rreg - dummy reg read function 301 * 302 * @adev: amdgpu device pointer 303 * @block: offset of instance 304 * @reg: offset of register 305 * 306 * Dummy register read function. Used for register blocks 307 * that certain asics don't have (all asics). 308 * Returns the value in the register. 309 */ 310 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 311 uint32_t block, uint32_t reg) 312 { 313 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 314 reg, block); 315 BUG(); 316 return 0; 317 } 318 319 /** 320 * amdgpu_block_invalid_wreg - dummy reg write function 321 * 322 * @adev: amdgpu device pointer 323 * @block: offset of instance 324 * @reg: offset of register 325 * @v: value to write to the register 326 * 327 * Dummy register read function. Used for register blocks 328 * that certain asics don't have (all asics). 329 */ 330 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, 331 uint32_t block, 332 uint32_t reg, uint32_t v) 333 { 334 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 335 reg, block, v); 336 BUG(); 337 } 338 339 static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) 340 { 341 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, 342 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 343 &adev->vram_scratch.robj, 344 &adev->vram_scratch.gpu_addr, 345 (void **)&adev->vram_scratch.ptr); 346 } 347 348 static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) 349 { 350 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); 351 } 352 353 /** 354 * amdgpu_program_register_sequence - program an array of registers. 355 * 356 * @adev: amdgpu_device pointer 357 * @registers: pointer to the register array 358 * @array_size: size of the register array 359 * 360 * Programs an array or registers with and and or masks. 361 * This is a helper for setting golden registers. 362 */ 363 void amdgpu_program_register_sequence(struct amdgpu_device *adev, 364 const u32 *registers, 365 const u32 array_size) 366 { 367 u32 tmp, reg, and_mask, or_mask; 368 int i; 369 370 if (array_size % 3) 371 return; 372 373 for (i = 0; i < array_size; i +=3) { 374 reg = registers[i + 0]; 375 and_mask = registers[i + 1]; 376 or_mask = registers[i + 2]; 377 378 if (and_mask == 0xffffffff) { 379 tmp = or_mask; 380 } else { 381 tmp = RREG32(reg); 382 tmp &= ~and_mask; 383 tmp |= or_mask; 384 } 385 WREG32(reg, tmp); 386 } 387 } 388 389 void amdgpu_pci_config_reset(struct amdgpu_device *adev) 390 { 391 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 392 } 393 394 /* 395 * GPU doorbell aperture helpers function. 396 */ 397 /** 398 * amdgpu_doorbell_init - Init doorbell driver information. 399 * 400 * @adev: amdgpu_device pointer 401 * 402 * Init doorbell driver information (CIK) 403 * Returns 0 on success, error on failure. 404 */ 405 static int amdgpu_doorbell_init(struct amdgpu_device *adev) 406 { 407 /* No doorbell on SI hardware generation */ 408 if (adev->asic_type < CHIP_BONAIRE) { 409 adev->doorbell.base = 0; 410 adev->doorbell.size = 0; 411 adev->doorbell.num_doorbells = 0; 412 adev->doorbell.ptr = NULL; 413 return 0; 414 } 415 416 /* doorbell bar mapping */ 417 adev->doorbell.base = pci_resource_start(adev->pdev, 2); 418 adev->doorbell.size = pci_resource_len(adev->pdev, 2); 419 420 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), 421 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1); 422 if (adev->doorbell.num_doorbells == 0) 423 return -EINVAL; 424 425 adev->doorbell.ptr = ioremap(adev->doorbell.base, 426 adev->doorbell.num_doorbells * 427 sizeof(u32)); 428 if (adev->doorbell.ptr == NULL) 429 return -ENOMEM; 430 431 return 0; 432 } 433 434 /** 435 * amdgpu_doorbell_fini - Tear down doorbell driver information. 436 * 437 * @adev: amdgpu_device pointer 438 * 439 * Tear down doorbell driver information (CIK) 440 */ 441 static void amdgpu_doorbell_fini(struct amdgpu_device *adev) 442 { 443 iounmap(adev->doorbell.ptr); 444 adev->doorbell.ptr = NULL; 445 } 446 447 /** 448 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to 449 * setup amdkfd 450 * 451 * @adev: amdgpu_device pointer 452 * @aperture_base: output returning doorbell aperture base physical address 453 * @aperture_size: output returning doorbell aperture size in bytes 454 * @start_offset: output returning # of doorbell bytes reserved for amdgpu. 455 * 456 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, 457 * takes doorbells required for its own rings and reports the setup to amdkfd. 458 * amdgpu reserved doorbells are at the start of the doorbell aperture. 459 */ 460 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, 461 phys_addr_t *aperture_base, 462 size_t *aperture_size, 463 size_t *start_offset) 464 { 465 /* 466 * The first num_doorbells are used by amdgpu. 467 * amdkfd takes whatever's left in the aperture. 468 */ 469 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) { 470 *aperture_base = adev->doorbell.base; 471 *aperture_size = adev->doorbell.size; 472 *start_offset = adev->doorbell.num_doorbells * sizeof(u32); 473 } else { 474 *aperture_base = 0; 475 *aperture_size = 0; 476 *start_offset = 0; 477 } 478 } 479 480 /* 481 * amdgpu_wb_*() 482 * Writeback is the method by which the GPU updates special pages in memory 483 * with the status of certain GPU events (fences, ring pointers,etc.). 484 */ 485 486 /** 487 * amdgpu_wb_fini - Disable Writeback and free memory 488 * 489 * @adev: amdgpu_device pointer 490 * 491 * Disables Writeback and frees the Writeback memory (all asics). 492 * Used at driver shutdown. 493 */ 494 static void amdgpu_wb_fini(struct amdgpu_device *adev) 495 { 496 if (adev->wb.wb_obj) { 497 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 498 &adev->wb.gpu_addr, 499 (void **)&adev->wb.wb); 500 adev->wb.wb_obj = NULL; 501 } 502 } 503 504 /** 505 * amdgpu_wb_init- Init Writeback driver info and allocate memory 506 * 507 * @adev: amdgpu_device pointer 508 * 509 * Initializes writeback and allocates writeback memory (all asics). 510 * Used at driver startup. 511 * Returns 0 on success or an -error on failure. 512 */ 513 static int amdgpu_wb_init(struct amdgpu_device *adev) 514 { 515 int r; 516 517 if (adev->wb.wb_obj == NULL) { 518 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 519 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 520 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 521 &adev->wb.wb_obj, &adev->wb.gpu_addr, 522 (void **)&adev->wb.wb); 523 if (r) { 524 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 525 return r; 526 } 527 528 adev->wb.num_wb = AMDGPU_MAX_WB; 529 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 530 531 /* clear wb memory */ 532 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); 533 } 534 535 return 0; 536 } 537 538 /** 539 * amdgpu_wb_get - Allocate a wb entry 540 * 541 * @adev: amdgpu_device pointer 542 * @wb: wb index 543 * 544 * Allocate a wb slot for use by the driver (all asics). 545 * Returns 0 on success or -EINVAL on failure. 546 */ 547 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) 548 { 549 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 550 551 if (offset < adev->wb.num_wb) { 552 __set_bit(offset, adev->wb.used); 553 *wb = offset * 8; /* convert to dw offset */ 554 return 0; 555 } else { 556 return -EINVAL; 557 } 558 } 559 560 /** 561 * amdgpu_wb_free - Free a wb entry 562 * 563 * @adev: amdgpu_device pointer 564 * @wb: wb index 565 * 566 * Free a wb slot allocated for use by the driver (all asics) 567 */ 568 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) 569 { 570 if (wb < adev->wb.num_wb) 571 __clear_bit(wb, adev->wb.used); 572 } 573 574 /** 575 * amdgpu_vram_location - try to find VRAM location 576 * @adev: amdgpu device structure holding all necessary informations 577 * @mc: memory controller structure holding memory informations 578 * @base: base address at which to put VRAM 579 * 580 * Function will try to place VRAM at base address provided 581 * as parameter (which is so far either PCI aperture address or 582 * for IGP TOM base address). 583 * 584 * If there is not enough space to fit the unvisible VRAM in the 32bits 585 * address space then we limit the VRAM size to the aperture. 586 * 587 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, 588 * this shouldn't be a problem as we are using the PCI aperture as a reference. 589 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 590 * not IGP. 591 * 592 * Note: we use mc_vram_size as on some board we need to program the mc to 593 * cover the whole aperture even if VRAM size is inferior to aperture size 594 * Novell bug 204882 + along with lots of ubuntu ones 595 * 596 * Note: when limiting vram it's safe to overwritte real_vram_size because 597 * we are not in case where real_vram_size is inferior to mc_vram_size (ie 598 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 599 * ones) 600 * 601 * Note: IGP TOM addr should be the same as the aperture addr, we don't 602 * explicitly check for that though. 603 * 604 * FIXME: when reducing VRAM size align new size on power of 2. 605 */ 606 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base) 607 { 608 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; 609 610 mc->vram_start = base; 611 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) { 612 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n"); 613 mc->real_vram_size = mc->aper_size; 614 mc->mc_vram_size = mc->aper_size; 615 } 616 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 617 if (limit && limit < mc->real_vram_size) 618 mc->real_vram_size = limit; 619 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 620 mc->mc_vram_size >> 20, mc->vram_start, 621 mc->vram_end, mc->real_vram_size >> 20); 622 } 623 624 /** 625 * amdgpu_gart_location - try to find GTT location 626 * @adev: amdgpu device structure holding all necessary informations 627 * @mc: memory controller structure holding memory informations 628 * 629 * Function will place try to place GTT before or after VRAM. 630 * 631 * If GTT size is bigger than space left then we ajust GTT size. 632 * Thus function will never fails. 633 * 634 * FIXME: when reducing GTT size align new size on power of 2. 635 */ 636 void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) 637 { 638 u64 size_af, size_bf; 639 640 size_af = adev->mc.mc_mask - mc->vram_end; 641 size_bf = mc->vram_start; 642 if (size_bf > size_af) { 643 if (mc->gart_size > size_bf) { 644 dev_warn(adev->dev, "limiting GTT\n"); 645 mc->gart_size = size_bf; 646 } 647 mc->gart_start = 0; 648 } else { 649 if (mc->gart_size > size_af) { 650 dev_warn(adev->dev, "limiting GTT\n"); 651 mc->gart_size = size_af; 652 } 653 mc->gart_start = mc->vram_end + 1; 654 } 655 mc->gart_end = mc->gart_start + mc->gart_size - 1; 656 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", 657 mc->gart_size >> 20, mc->gart_start, mc->gart_end); 658 } 659 660 /* 661 * GPU helpers function. 662 */ 663 /** 664 * amdgpu_need_post - check if the hw need post or not 665 * 666 * @adev: amdgpu_device pointer 667 * 668 * Check if the asic has been initialized (all asics) at driver startup 669 * or post is needed if hw reset is performed. 670 * Returns true if need or false if not. 671 */ 672 bool amdgpu_need_post(struct amdgpu_device *adev) 673 { 674 uint32_t reg; 675 676 if (adev->has_hw_reset) { 677 adev->has_hw_reset = false; 678 return true; 679 } 680 681 /* bios scratch used on CIK+ */ 682 if (adev->asic_type >= CHIP_BONAIRE) 683 return amdgpu_atombios_scratch_need_asic_init(adev); 684 685 /* check MEM_SIZE for older asics */ 686 reg = amdgpu_asic_get_config_memsize(adev); 687 688 if ((reg != 0) && (reg != 0xffffffff)) 689 return false; 690 691 return true; 692 693 } 694 695 static bool amdgpu_vpost_needed(struct amdgpu_device *adev) 696 { 697 if (amdgpu_sriov_vf(adev)) 698 return false; 699 700 if (amdgpu_passthrough(adev)) { 701 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 702 * some old smc fw still need driver do vPost otherwise gpu hang, while 703 * those smc fw version above 22.15 doesn't have this flaw, so we force 704 * vpost executed for smc version below 22.15 705 */ 706 if (adev->asic_type == CHIP_FIJI) { 707 int err; 708 uint32_t fw_ver; 709 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 710 /* force vPost if error occured */ 711 if (err) 712 return true; 713 714 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 715 if (fw_ver < 0x00160e00) 716 return true; 717 } 718 } 719 return amdgpu_need_post(adev); 720 } 721 722 /** 723 * amdgpu_dummy_page_init - init dummy page used by the driver 724 * 725 * @adev: amdgpu_device pointer 726 * 727 * Allocate the dummy page used by the driver (all asics). 728 * This dummy page is used by the driver as a filler for gart entries 729 * when pages are taken out of the GART 730 * Returns 0 on sucess, -ENOMEM on failure. 731 */ 732 int amdgpu_dummy_page_init(struct amdgpu_device *adev) 733 { 734 if (adev->dummy_page.page) 735 return 0; 736 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 737 if (adev->dummy_page.page == NULL) 738 return -ENOMEM; 739 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page, 740 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 741 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) { 742 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 743 __free_page(adev->dummy_page.page); 744 adev->dummy_page.page = NULL; 745 return -ENOMEM; 746 } 747 return 0; 748 } 749 750 /** 751 * amdgpu_dummy_page_fini - free dummy page used by the driver 752 * 753 * @adev: amdgpu_device pointer 754 * 755 * Frees the dummy page used by the driver (all asics). 756 */ 757 void amdgpu_dummy_page_fini(struct amdgpu_device *adev) 758 { 759 if (adev->dummy_page.page == NULL) 760 return; 761 pci_unmap_page(adev->pdev, adev->dummy_page.addr, 762 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 763 __free_page(adev->dummy_page.page); 764 adev->dummy_page.page = NULL; 765 } 766 767 768 /* ATOM accessor methods */ 769 /* 770 * ATOM is an interpreted byte code stored in tables in the vbios. The 771 * driver registers callbacks to access registers and the interpreter 772 * in the driver parses the tables and executes then to program specific 773 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c, 774 * atombios.h, and atom.c 775 */ 776 777 /** 778 * cail_pll_read - read PLL register 779 * 780 * @info: atom card_info pointer 781 * @reg: PLL register offset 782 * 783 * Provides a PLL register accessor for the atom interpreter (r4xx+). 784 * Returns the value of the PLL register. 785 */ 786 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 787 { 788 return 0; 789 } 790 791 /** 792 * cail_pll_write - write PLL register 793 * 794 * @info: atom card_info pointer 795 * @reg: PLL register offset 796 * @val: value to write to the pll register 797 * 798 * Provides a PLL register accessor for the atom interpreter (r4xx+). 799 */ 800 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 801 { 802 803 } 804 805 /** 806 * cail_mc_read - read MC (Memory Controller) register 807 * 808 * @info: atom card_info pointer 809 * @reg: MC register offset 810 * 811 * Provides an MC register accessor for the atom interpreter (r4xx+). 812 * Returns the value of the MC register. 813 */ 814 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 815 { 816 return 0; 817 } 818 819 /** 820 * cail_mc_write - write MC (Memory Controller) register 821 * 822 * @info: atom card_info pointer 823 * @reg: MC register offset 824 * @val: value to write to the pll register 825 * 826 * Provides a MC register accessor for the atom interpreter (r4xx+). 827 */ 828 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 829 { 830 831 } 832 833 /** 834 * cail_reg_write - write MMIO register 835 * 836 * @info: atom card_info pointer 837 * @reg: MMIO register offset 838 * @val: value to write to the pll register 839 * 840 * Provides a MMIO register accessor for the atom interpreter (r4xx+). 841 */ 842 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 843 { 844 struct amdgpu_device *adev = info->dev->dev_private; 845 846 WREG32(reg, val); 847 } 848 849 /** 850 * cail_reg_read - read MMIO register 851 * 852 * @info: atom card_info pointer 853 * @reg: MMIO register offset 854 * 855 * Provides an MMIO register accessor for the atom interpreter (r4xx+). 856 * Returns the value of the MMIO register. 857 */ 858 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 859 { 860 struct amdgpu_device *adev = info->dev->dev_private; 861 uint32_t r; 862 863 r = RREG32(reg); 864 return r; 865 } 866 867 /** 868 * cail_ioreg_write - write IO register 869 * 870 * @info: atom card_info pointer 871 * @reg: IO register offset 872 * @val: value to write to the pll register 873 * 874 * Provides a IO register accessor for the atom interpreter (r4xx+). 875 */ 876 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 877 { 878 struct amdgpu_device *adev = info->dev->dev_private; 879 880 WREG32_IO(reg, val); 881 } 882 883 /** 884 * cail_ioreg_read - read IO register 885 * 886 * @info: atom card_info pointer 887 * @reg: IO register offset 888 * 889 * Provides an IO register accessor for the atom interpreter (r4xx+). 890 * Returns the value of the IO register. 891 */ 892 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 893 { 894 struct amdgpu_device *adev = info->dev->dev_private; 895 uint32_t r; 896 897 r = RREG32_IO(reg); 898 return r; 899 } 900 901 static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, 902 struct device_attribute *attr, 903 char *buf) 904 { 905 struct drm_device *ddev = dev_get_drvdata(dev); 906 struct amdgpu_device *adev = ddev->dev_private; 907 struct atom_context *ctx = adev->mode_info.atom_context; 908 909 return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version); 910 } 911 912 static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, 913 NULL); 914 915 /** 916 * amdgpu_atombios_fini - free the driver info and callbacks for atombios 917 * 918 * @adev: amdgpu_device pointer 919 * 920 * Frees the driver info and register access callbacks for the ATOM 921 * interpreter (r4xx+). 922 * Called at driver shutdown. 923 */ 924 static void amdgpu_atombios_fini(struct amdgpu_device *adev) 925 { 926 if (adev->mode_info.atom_context) { 927 kfree(adev->mode_info.atom_context->scratch); 928 kfree(adev->mode_info.atom_context->iio); 929 } 930 kfree(adev->mode_info.atom_context); 931 adev->mode_info.atom_context = NULL; 932 kfree(adev->mode_info.atom_card_info); 933 adev->mode_info.atom_card_info = NULL; 934 device_remove_file(adev->dev, &dev_attr_vbios_version); 935 } 936 937 /** 938 * amdgpu_atombios_init - init the driver info and callbacks for atombios 939 * 940 * @adev: amdgpu_device pointer 941 * 942 * Initializes the driver info and register access callbacks for the 943 * ATOM interpreter (r4xx+). 944 * Returns 0 on sucess, -ENOMEM on failure. 945 * Called at driver startup. 946 */ 947 static int amdgpu_atombios_init(struct amdgpu_device *adev) 948 { 949 struct card_info *atom_card_info = 950 kzalloc(sizeof(struct card_info), GFP_KERNEL); 951 int ret; 952 953 if (!atom_card_info) 954 return -ENOMEM; 955 956 adev->mode_info.atom_card_info = atom_card_info; 957 atom_card_info->dev = adev->ddev; 958 atom_card_info->reg_read = cail_reg_read; 959 atom_card_info->reg_write = cail_reg_write; 960 /* needed for iio ops */ 961 if (adev->rio_mem) { 962 atom_card_info->ioreg_read = cail_ioreg_read; 963 atom_card_info->ioreg_write = cail_ioreg_write; 964 } else { 965 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n"); 966 atom_card_info->ioreg_read = cail_reg_read; 967 atom_card_info->ioreg_write = cail_reg_write; 968 } 969 atom_card_info->mc_read = cail_mc_read; 970 atom_card_info->mc_write = cail_mc_write; 971 atom_card_info->pll_read = cail_pll_read; 972 atom_card_info->pll_write = cail_pll_write; 973 974 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios); 975 if (!adev->mode_info.atom_context) { 976 amdgpu_atombios_fini(adev); 977 return -ENOMEM; 978 } 979 980 mutex_init(&adev->mode_info.atom_context->mutex); 981 if (adev->is_atom_fw) { 982 amdgpu_atomfirmware_scratch_regs_init(adev); 983 amdgpu_atomfirmware_allocate_fb_scratch(adev); 984 } else { 985 amdgpu_atombios_scratch_regs_init(adev); 986 amdgpu_atombios_allocate_fb_scratch(adev); 987 } 988 989 ret = device_create_file(adev->dev, &dev_attr_vbios_version); 990 if (ret) { 991 DRM_ERROR("Failed to create device file for VBIOS version\n"); 992 return ret; 993 } 994 995 return 0; 996 } 997 998 /* if we get transitioned to only one device, take VGA back */ 999 /** 1000 * amdgpu_vga_set_decode - enable/disable vga decode 1001 * 1002 * @cookie: amdgpu_device pointer 1003 * @state: enable/disable vga decode 1004 * 1005 * Enable/disable vga decode (all asics). 1006 * Returns VGA resource flags. 1007 */ 1008 static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) 1009 { 1010 struct amdgpu_device *adev = cookie; 1011 amdgpu_asic_set_vga_state(adev, state); 1012 if (state) 1013 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1014 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1015 else 1016 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1017 } 1018 1019 static void amdgpu_check_block_size(struct amdgpu_device *adev) 1020 { 1021 /* defines number of bits in page table versus page directory, 1022 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1023 * page table and the remaining bits are in the page directory */ 1024 if (amdgpu_vm_block_size == -1) 1025 return; 1026 1027 if (amdgpu_vm_block_size < 9) { 1028 dev_warn(adev->dev, "VM page table size (%d) too small\n", 1029 amdgpu_vm_block_size); 1030 goto def_value; 1031 } 1032 1033 if (amdgpu_vm_block_size > 24 || 1034 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) { 1035 dev_warn(adev->dev, "VM page table size (%d) too large\n", 1036 amdgpu_vm_block_size); 1037 goto def_value; 1038 } 1039 1040 return; 1041 1042 def_value: 1043 amdgpu_vm_block_size = -1; 1044 } 1045 1046 static void amdgpu_check_vm_size(struct amdgpu_device *adev) 1047 { 1048 /* no need to check the default value */ 1049 if (amdgpu_vm_size == -1) 1050 return; 1051 1052 if (!is_power_of_2(amdgpu_vm_size)) { 1053 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", 1054 amdgpu_vm_size); 1055 goto def_value; 1056 } 1057 1058 if (amdgpu_vm_size < 1) { 1059 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 1060 amdgpu_vm_size); 1061 goto def_value; 1062 } 1063 1064 /* 1065 * Max GPUVM size for Cayman, SI, CI VI are 40 bits. 1066 */ 1067 if (amdgpu_vm_size > 1024) { 1068 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n", 1069 amdgpu_vm_size); 1070 goto def_value; 1071 } 1072 1073 return; 1074 1075 def_value: 1076 amdgpu_vm_size = -1; 1077 } 1078 1079 /** 1080 * amdgpu_check_arguments - validate module params 1081 * 1082 * @adev: amdgpu_device pointer 1083 * 1084 * Validates certain module parameters and updates 1085 * the associated values used by the driver (all asics). 1086 */ 1087 static void amdgpu_check_arguments(struct amdgpu_device *adev) 1088 { 1089 if (amdgpu_sched_jobs < 4) { 1090 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1091 amdgpu_sched_jobs); 1092 amdgpu_sched_jobs = 4; 1093 } else if (!is_power_of_2(amdgpu_sched_jobs)){ 1094 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 1095 amdgpu_sched_jobs); 1096 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1097 } 1098 1099 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1100 /* gart size must be greater or equal to 32M */ 1101 dev_warn(adev->dev, "gart size (%d) too small\n", 1102 amdgpu_gart_size); 1103 amdgpu_gart_size = -1; 1104 } 1105 1106 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 1107 /* gtt size must be greater or equal to 32M */ 1108 dev_warn(adev->dev, "gtt size (%d) too small\n", 1109 amdgpu_gtt_size); 1110 amdgpu_gtt_size = -1; 1111 } 1112 1113 /* valid range is between 4 and 9 inclusive */ 1114 if (amdgpu_vm_fragment_size != -1 && 1115 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 1116 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 1117 amdgpu_vm_fragment_size = -1; 1118 } 1119 1120 amdgpu_check_vm_size(adev); 1121 1122 amdgpu_check_block_size(adev); 1123 1124 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || 1125 !is_power_of_2(amdgpu_vram_page_split))) { 1126 dev_warn(adev->dev, "invalid VRAM page split (%d)\n", 1127 amdgpu_vram_page_split); 1128 amdgpu_vram_page_split = 1024; 1129 } 1130 } 1131 1132 /** 1133 * amdgpu_switcheroo_set_state - set switcheroo state 1134 * 1135 * @pdev: pci dev pointer 1136 * @state: vga_switcheroo state 1137 * 1138 * Callback for the switcheroo driver. Suspends or resumes the 1139 * the asics before or after it is powered up using ACPI methods. 1140 */ 1141 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1142 { 1143 struct drm_device *dev = pci_get_drvdata(pdev); 1144 1145 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) 1146 return; 1147 1148 if (state == VGA_SWITCHEROO_ON) { 1149 pr_info("amdgpu: switched on\n"); 1150 /* don't suspend or resume card normally */ 1151 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1152 1153 amdgpu_device_resume(dev, true, true); 1154 1155 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1156 drm_kms_helper_poll_enable(dev); 1157 } else { 1158 pr_info("amdgpu: switched off\n"); 1159 drm_kms_helper_poll_disable(dev); 1160 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1161 amdgpu_device_suspend(dev, true, true); 1162 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1163 } 1164 } 1165 1166 /** 1167 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1168 * 1169 * @pdev: pci dev pointer 1170 * 1171 * Callback for the switcheroo driver. Check of the switcheroo 1172 * state can be changed. 1173 * Returns true if the state can be changed, false if not. 1174 */ 1175 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1176 { 1177 struct drm_device *dev = pci_get_drvdata(pdev); 1178 1179 /* 1180 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1181 * locking inversion with the driver load path. And the access here is 1182 * completely racy anyway. So don't bother with locking for now. 1183 */ 1184 return dev->open_count == 0; 1185 } 1186 1187 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1188 .set_gpu_state = amdgpu_switcheroo_set_state, 1189 .reprobe = NULL, 1190 .can_switch = amdgpu_switcheroo_can_switch, 1191 }; 1192 1193 int amdgpu_set_clockgating_state(struct amdgpu_device *adev, 1194 enum amd_ip_block_type block_type, 1195 enum amd_clockgating_state state) 1196 { 1197 int i, r = 0; 1198 1199 for (i = 0; i < adev->num_ip_blocks; i++) { 1200 if (!adev->ip_blocks[i].status.valid) 1201 continue; 1202 if (adev->ip_blocks[i].version->type != block_type) 1203 continue; 1204 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) 1205 continue; 1206 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 1207 (void *)adev, state); 1208 if (r) 1209 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 1210 adev->ip_blocks[i].version->funcs->name, r); 1211 } 1212 return r; 1213 } 1214 1215 int amdgpu_set_powergating_state(struct amdgpu_device *adev, 1216 enum amd_ip_block_type block_type, 1217 enum amd_powergating_state state) 1218 { 1219 int i, r = 0; 1220 1221 for (i = 0; i < adev->num_ip_blocks; i++) { 1222 if (!adev->ip_blocks[i].status.valid) 1223 continue; 1224 if (adev->ip_blocks[i].version->type != block_type) 1225 continue; 1226 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) 1227 continue; 1228 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 1229 (void *)adev, state); 1230 if (r) 1231 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 1232 adev->ip_blocks[i].version->funcs->name, r); 1233 } 1234 return r; 1235 } 1236 1237 void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags) 1238 { 1239 int i; 1240 1241 for (i = 0; i < adev->num_ip_blocks; i++) { 1242 if (!adev->ip_blocks[i].status.valid) 1243 continue; 1244 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) 1245 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); 1246 } 1247 } 1248 1249 int amdgpu_wait_for_idle(struct amdgpu_device *adev, 1250 enum amd_ip_block_type block_type) 1251 { 1252 int i, r; 1253 1254 for (i = 0; i < adev->num_ip_blocks; i++) { 1255 if (!adev->ip_blocks[i].status.valid) 1256 continue; 1257 if (adev->ip_blocks[i].version->type == block_type) { 1258 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); 1259 if (r) 1260 return r; 1261 break; 1262 } 1263 } 1264 return 0; 1265 1266 } 1267 1268 bool amdgpu_is_idle(struct amdgpu_device *adev, 1269 enum amd_ip_block_type block_type) 1270 { 1271 int i; 1272 1273 for (i = 0; i < adev->num_ip_blocks; i++) { 1274 if (!adev->ip_blocks[i].status.valid) 1275 continue; 1276 if (adev->ip_blocks[i].version->type == block_type) 1277 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); 1278 } 1279 return true; 1280 1281 } 1282 1283 struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, 1284 enum amd_ip_block_type type) 1285 { 1286 int i; 1287 1288 for (i = 0; i < adev->num_ip_blocks; i++) 1289 if (adev->ip_blocks[i].version->type == type) 1290 return &adev->ip_blocks[i]; 1291 1292 return NULL; 1293 } 1294 1295 /** 1296 * amdgpu_ip_block_version_cmp 1297 * 1298 * @adev: amdgpu_device pointer 1299 * @type: enum amd_ip_block_type 1300 * @major: major version 1301 * @minor: minor version 1302 * 1303 * return 0 if equal or greater 1304 * return 1 if smaller or the ip_block doesn't exist 1305 */ 1306 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, 1307 enum amd_ip_block_type type, 1308 u32 major, u32 minor) 1309 { 1310 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type); 1311 1312 if (ip_block && ((ip_block->version->major > major) || 1313 ((ip_block->version->major == major) && 1314 (ip_block->version->minor >= minor)))) 1315 return 0; 1316 1317 return 1; 1318 } 1319 1320 /** 1321 * amdgpu_ip_block_add 1322 * 1323 * @adev: amdgpu_device pointer 1324 * @ip_block_version: pointer to the IP to add 1325 * 1326 * Adds the IP block driver information to the collection of IPs 1327 * on the asic. 1328 */ 1329 int amdgpu_ip_block_add(struct amdgpu_device *adev, 1330 const struct amdgpu_ip_block_version *ip_block_version) 1331 { 1332 if (!ip_block_version) 1333 return -EINVAL; 1334 1335 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks, 1336 ip_block_version->funcs->name); 1337 1338 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 1339 1340 return 0; 1341 } 1342 1343 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 1344 { 1345 adev->enable_virtual_display = false; 1346 1347 if (amdgpu_virtual_display) { 1348 struct drm_device *ddev = adev->ddev; 1349 const char *pci_address_name = pci_name(ddev->pdev); 1350 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 1351 1352 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1353 pciaddstr_tmp = pciaddstr; 1354 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 1355 pciaddname = strsep(&pciaddname_tmp, ","); 1356 if (!strcmp("all", pciaddname) 1357 || !strcmp(pci_address_name, pciaddname)) { 1358 long num_crtc; 1359 int res = -1; 1360 1361 adev->enable_virtual_display = true; 1362 1363 if (pciaddname_tmp) 1364 res = kstrtol(pciaddname_tmp, 10, 1365 &num_crtc); 1366 1367 if (!res) { 1368 if (num_crtc < 1) 1369 num_crtc = 1; 1370 if (num_crtc > 6) 1371 num_crtc = 6; 1372 adev->mode_info.num_crtc = num_crtc; 1373 } else { 1374 adev->mode_info.num_crtc = 1; 1375 } 1376 break; 1377 } 1378 } 1379 1380 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 1381 amdgpu_virtual_display, pci_address_name, 1382 adev->enable_virtual_display, adev->mode_info.num_crtc); 1383 1384 kfree(pciaddstr); 1385 } 1386 } 1387 1388 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 1389 { 1390 const char *chip_name; 1391 char fw_name[30]; 1392 int err; 1393 const struct gpu_info_firmware_header_v1_0 *hdr; 1394 1395 adev->firmware.gpu_info_fw = NULL; 1396 1397 switch (adev->asic_type) { 1398 case CHIP_TOPAZ: 1399 case CHIP_TONGA: 1400 case CHIP_FIJI: 1401 case CHIP_POLARIS11: 1402 case CHIP_POLARIS10: 1403 case CHIP_POLARIS12: 1404 case CHIP_CARRIZO: 1405 case CHIP_STONEY: 1406 #ifdef CONFIG_DRM_AMDGPU_SI 1407 case CHIP_VERDE: 1408 case CHIP_TAHITI: 1409 case CHIP_PITCAIRN: 1410 case CHIP_OLAND: 1411 case CHIP_HAINAN: 1412 #endif 1413 #ifdef CONFIG_DRM_AMDGPU_CIK 1414 case CHIP_BONAIRE: 1415 case CHIP_HAWAII: 1416 case CHIP_KAVERI: 1417 case CHIP_KABINI: 1418 case CHIP_MULLINS: 1419 #endif 1420 default: 1421 return 0; 1422 case CHIP_VEGA10: 1423 chip_name = "vega10"; 1424 break; 1425 case CHIP_RAVEN: 1426 chip_name = "raven"; 1427 break; 1428 } 1429 1430 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); 1431 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); 1432 if (err) { 1433 dev_err(adev->dev, 1434 "Failed to load gpu_info firmware \"%s\"\n", 1435 fw_name); 1436 goto out; 1437 } 1438 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); 1439 if (err) { 1440 dev_err(adev->dev, 1441 "Failed to validate gpu_info firmware \"%s\"\n", 1442 fw_name); 1443 goto out; 1444 } 1445 1446 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 1447 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 1448 1449 switch (hdr->version_major) { 1450 case 1: 1451 { 1452 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 1453 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 1454 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1455 1456 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 1457 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 1458 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 1459 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 1460 adev->gfx.config.max_texture_channel_caches = 1461 le32_to_cpu(gpu_info_fw->gc_num_tccs); 1462 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 1463 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 1464 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 1465 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 1466 adev->gfx.config.double_offchip_lds_buf = 1467 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 1468 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 1469 adev->gfx.cu_info.max_waves_per_simd = 1470 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 1471 adev->gfx.cu_info.max_scratch_slots_per_cu = 1472 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 1473 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 1474 break; 1475 } 1476 default: 1477 dev_err(adev->dev, 1478 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 1479 err = -EINVAL; 1480 goto out; 1481 } 1482 out: 1483 return err; 1484 } 1485 1486 static int amdgpu_early_init(struct amdgpu_device *adev) 1487 { 1488 int i, r; 1489 1490 amdgpu_device_enable_virtual_display(adev); 1491 1492 switch (adev->asic_type) { 1493 case CHIP_TOPAZ: 1494 case CHIP_TONGA: 1495 case CHIP_FIJI: 1496 case CHIP_POLARIS11: 1497 case CHIP_POLARIS10: 1498 case CHIP_POLARIS12: 1499 case CHIP_CARRIZO: 1500 case CHIP_STONEY: 1501 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) 1502 adev->family = AMDGPU_FAMILY_CZ; 1503 else 1504 adev->family = AMDGPU_FAMILY_VI; 1505 1506 r = vi_set_ip_blocks(adev); 1507 if (r) 1508 return r; 1509 break; 1510 #ifdef CONFIG_DRM_AMDGPU_SI 1511 case CHIP_VERDE: 1512 case CHIP_TAHITI: 1513 case CHIP_PITCAIRN: 1514 case CHIP_OLAND: 1515 case CHIP_HAINAN: 1516 adev->family = AMDGPU_FAMILY_SI; 1517 r = si_set_ip_blocks(adev); 1518 if (r) 1519 return r; 1520 break; 1521 #endif 1522 #ifdef CONFIG_DRM_AMDGPU_CIK 1523 case CHIP_BONAIRE: 1524 case CHIP_HAWAII: 1525 case CHIP_KAVERI: 1526 case CHIP_KABINI: 1527 case CHIP_MULLINS: 1528 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII)) 1529 adev->family = AMDGPU_FAMILY_CI; 1530 else 1531 adev->family = AMDGPU_FAMILY_KV; 1532 1533 r = cik_set_ip_blocks(adev); 1534 if (r) 1535 return r; 1536 break; 1537 #endif 1538 case CHIP_VEGA10: 1539 case CHIP_RAVEN: 1540 if (adev->asic_type == CHIP_RAVEN) 1541 adev->family = AMDGPU_FAMILY_RV; 1542 else 1543 adev->family = AMDGPU_FAMILY_AI; 1544 1545 r = soc15_set_ip_blocks(adev); 1546 if (r) 1547 return r; 1548 break; 1549 default: 1550 /* FIXME: not supported yet */ 1551 return -EINVAL; 1552 } 1553 1554 r = amdgpu_device_parse_gpu_info_fw(adev); 1555 if (r) 1556 return r; 1557 1558 if (amdgpu_sriov_vf(adev)) { 1559 r = amdgpu_virt_request_full_gpu(adev, true); 1560 if (r) 1561 return r; 1562 } 1563 1564 for (i = 0; i < adev->num_ip_blocks; i++) { 1565 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1566 DRM_ERROR("disabled ip block: %d <%s>\n", 1567 i, adev->ip_blocks[i].version->funcs->name); 1568 adev->ip_blocks[i].status.valid = false; 1569 } else { 1570 if (adev->ip_blocks[i].version->funcs->early_init) { 1571 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); 1572 if (r == -ENOENT) { 1573 adev->ip_blocks[i].status.valid = false; 1574 } else if (r) { 1575 DRM_ERROR("early_init of IP block <%s> failed %d\n", 1576 adev->ip_blocks[i].version->funcs->name, r); 1577 return r; 1578 } else { 1579 adev->ip_blocks[i].status.valid = true; 1580 } 1581 } else { 1582 adev->ip_blocks[i].status.valid = true; 1583 } 1584 } 1585 } 1586 1587 adev->cg_flags &= amdgpu_cg_mask; 1588 adev->pg_flags &= amdgpu_pg_mask; 1589 1590 return 0; 1591 } 1592 1593 static int amdgpu_init(struct amdgpu_device *adev) 1594 { 1595 int i, r; 1596 1597 for (i = 0; i < adev->num_ip_blocks; i++) { 1598 if (!adev->ip_blocks[i].status.valid) 1599 continue; 1600 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); 1601 if (r) { 1602 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 1603 adev->ip_blocks[i].version->funcs->name, r); 1604 return r; 1605 } 1606 adev->ip_blocks[i].status.sw = true; 1607 /* need to do gmc hw init early so we can allocate gpu mem */ 1608 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1609 r = amdgpu_vram_scratch_init(adev); 1610 if (r) { 1611 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 1612 return r; 1613 } 1614 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 1615 if (r) { 1616 DRM_ERROR("hw_init %d failed %d\n", i, r); 1617 return r; 1618 } 1619 r = amdgpu_wb_init(adev); 1620 if (r) { 1621 DRM_ERROR("amdgpu_wb_init failed %d\n", r); 1622 return r; 1623 } 1624 adev->ip_blocks[i].status.hw = true; 1625 1626 /* right after GMC hw init, we create CSA */ 1627 if (amdgpu_sriov_vf(adev)) { 1628 r = amdgpu_allocate_static_csa(adev); 1629 if (r) { 1630 DRM_ERROR("allocate CSA failed %d\n", r); 1631 return r; 1632 } 1633 } 1634 } 1635 } 1636 1637 for (i = 0; i < adev->num_ip_blocks; i++) { 1638 if (!adev->ip_blocks[i].status.sw) 1639 continue; 1640 /* gmc hw init is done early */ 1641 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) 1642 continue; 1643 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 1644 if (r) { 1645 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1646 adev->ip_blocks[i].version->funcs->name, r); 1647 return r; 1648 } 1649 adev->ip_blocks[i].status.hw = true; 1650 } 1651 1652 return 0; 1653 } 1654 1655 static void amdgpu_fill_reset_magic(struct amdgpu_device *adev) 1656 { 1657 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 1658 } 1659 1660 static bool amdgpu_check_vram_lost(struct amdgpu_device *adev) 1661 { 1662 return !!memcmp(adev->gart.ptr, adev->reset_magic, 1663 AMDGPU_RESET_MAGIC_NUM); 1664 } 1665 1666 static int amdgpu_late_set_cg_state(struct amdgpu_device *adev) 1667 { 1668 int i = 0, r; 1669 1670 for (i = 0; i < adev->num_ip_blocks; i++) { 1671 if (!adev->ip_blocks[i].status.valid) 1672 continue; 1673 /* skip CG for VCE/UVD, it's handled specially */ 1674 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1675 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { 1676 /* enable clockgating to save power */ 1677 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1678 AMD_CG_STATE_GATE); 1679 if (r) { 1680 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 1681 adev->ip_blocks[i].version->funcs->name, r); 1682 return r; 1683 } 1684 } 1685 } 1686 return 0; 1687 } 1688 1689 static int amdgpu_late_init(struct amdgpu_device *adev) 1690 { 1691 int i = 0, r; 1692 1693 for (i = 0; i < adev->num_ip_blocks; i++) { 1694 if (!adev->ip_blocks[i].status.valid) 1695 continue; 1696 if (adev->ip_blocks[i].version->funcs->late_init) { 1697 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 1698 if (r) { 1699 DRM_ERROR("late_init of IP block <%s> failed %d\n", 1700 adev->ip_blocks[i].version->funcs->name, r); 1701 return r; 1702 } 1703 adev->ip_blocks[i].status.late_initialized = true; 1704 } 1705 } 1706 1707 mod_delayed_work(system_wq, &adev->late_init_work, 1708 msecs_to_jiffies(AMDGPU_RESUME_MS)); 1709 1710 amdgpu_fill_reset_magic(adev); 1711 1712 return 0; 1713 } 1714 1715 static int amdgpu_fini(struct amdgpu_device *adev) 1716 { 1717 int i, r; 1718 1719 /* need to disable SMC first */ 1720 for (i = 0; i < adev->num_ip_blocks; i++) { 1721 if (!adev->ip_blocks[i].status.hw) 1722 continue; 1723 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 1724 /* ungate blocks before hw fini so that we can shutdown the blocks safely */ 1725 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1726 AMD_CG_STATE_UNGATE); 1727 if (r) { 1728 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", 1729 adev->ip_blocks[i].version->funcs->name, r); 1730 return r; 1731 } 1732 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 1733 /* XXX handle errors */ 1734 if (r) { 1735 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 1736 adev->ip_blocks[i].version->funcs->name, r); 1737 } 1738 adev->ip_blocks[i].status.hw = false; 1739 break; 1740 } 1741 } 1742 1743 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1744 if (!adev->ip_blocks[i].status.hw) 1745 continue; 1746 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1747 amdgpu_wb_fini(adev); 1748 amdgpu_vram_scratch_fini(adev); 1749 } 1750 1751 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1752 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { 1753 /* ungate blocks before hw fini so that we can shutdown the blocks safely */ 1754 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1755 AMD_CG_STATE_UNGATE); 1756 if (r) { 1757 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", 1758 adev->ip_blocks[i].version->funcs->name, r); 1759 return r; 1760 } 1761 } 1762 1763 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 1764 /* XXX handle errors */ 1765 if (r) { 1766 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 1767 adev->ip_blocks[i].version->funcs->name, r); 1768 } 1769 1770 adev->ip_blocks[i].status.hw = false; 1771 } 1772 1773 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1774 if (!adev->ip_blocks[i].status.sw) 1775 continue; 1776 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 1777 /* XXX handle errors */ 1778 if (r) { 1779 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 1780 adev->ip_blocks[i].version->funcs->name, r); 1781 } 1782 adev->ip_blocks[i].status.sw = false; 1783 adev->ip_blocks[i].status.valid = false; 1784 } 1785 1786 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1787 if (!adev->ip_blocks[i].status.late_initialized) 1788 continue; 1789 if (adev->ip_blocks[i].version->funcs->late_fini) 1790 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); 1791 adev->ip_blocks[i].status.late_initialized = false; 1792 } 1793 1794 if (amdgpu_sriov_vf(adev)) 1795 amdgpu_virt_release_full_gpu(adev, false); 1796 1797 return 0; 1798 } 1799 1800 static void amdgpu_late_init_func_handler(struct work_struct *work) 1801 { 1802 struct amdgpu_device *adev = 1803 container_of(work, struct amdgpu_device, late_init_work.work); 1804 amdgpu_late_set_cg_state(adev); 1805 } 1806 1807 int amdgpu_suspend(struct amdgpu_device *adev) 1808 { 1809 int i, r; 1810 1811 if (amdgpu_sriov_vf(adev)) 1812 amdgpu_virt_request_full_gpu(adev, false); 1813 1814 /* ungate SMC block first */ 1815 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, 1816 AMD_CG_STATE_UNGATE); 1817 if (r) { 1818 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r); 1819 } 1820 1821 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1822 if (!adev->ip_blocks[i].status.valid) 1823 continue; 1824 /* ungate blocks so that suspend can properly shut them down */ 1825 if (i != AMD_IP_BLOCK_TYPE_SMC) { 1826 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1827 AMD_CG_STATE_UNGATE); 1828 if (r) { 1829 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", 1830 adev->ip_blocks[i].version->funcs->name, r); 1831 } 1832 } 1833 /* XXX handle errors */ 1834 r = adev->ip_blocks[i].version->funcs->suspend(adev); 1835 /* XXX handle errors */ 1836 if (r) { 1837 DRM_ERROR("suspend of IP block <%s> failed %d\n", 1838 adev->ip_blocks[i].version->funcs->name, r); 1839 } 1840 } 1841 1842 if (amdgpu_sriov_vf(adev)) 1843 amdgpu_virt_release_full_gpu(adev, false); 1844 1845 return 0; 1846 } 1847 1848 static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev) 1849 { 1850 int i, r; 1851 1852 static enum amd_ip_block_type ip_order[] = { 1853 AMD_IP_BLOCK_TYPE_GMC, 1854 AMD_IP_BLOCK_TYPE_COMMON, 1855 AMD_IP_BLOCK_TYPE_IH, 1856 }; 1857 1858 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 1859 int j; 1860 struct amdgpu_ip_block *block; 1861 1862 for (j = 0; j < adev->num_ip_blocks; j++) { 1863 block = &adev->ip_blocks[j]; 1864 1865 if (block->version->type != ip_order[i] || 1866 !block->status.valid) 1867 continue; 1868 1869 r = block->version->funcs->hw_init(adev); 1870 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); 1871 } 1872 } 1873 1874 return 0; 1875 } 1876 1877 static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) 1878 { 1879 int i, r; 1880 1881 static enum amd_ip_block_type ip_order[] = { 1882 AMD_IP_BLOCK_TYPE_SMC, 1883 AMD_IP_BLOCK_TYPE_DCE, 1884 AMD_IP_BLOCK_TYPE_GFX, 1885 AMD_IP_BLOCK_TYPE_SDMA, 1886 AMD_IP_BLOCK_TYPE_UVD, 1887 AMD_IP_BLOCK_TYPE_VCE 1888 }; 1889 1890 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 1891 int j; 1892 struct amdgpu_ip_block *block; 1893 1894 for (j = 0; j < adev->num_ip_blocks; j++) { 1895 block = &adev->ip_blocks[j]; 1896 1897 if (block->version->type != ip_order[i] || 1898 !block->status.valid) 1899 continue; 1900 1901 r = block->version->funcs->hw_init(adev); 1902 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); 1903 } 1904 } 1905 1906 return 0; 1907 } 1908 1909 static int amdgpu_resume_phase1(struct amdgpu_device *adev) 1910 { 1911 int i, r; 1912 1913 for (i = 0; i < adev->num_ip_blocks; i++) { 1914 if (!adev->ip_blocks[i].status.valid) 1915 continue; 1916 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 1917 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 1918 adev->ip_blocks[i].version->type == 1919 AMD_IP_BLOCK_TYPE_IH) { 1920 r = adev->ip_blocks[i].version->funcs->resume(adev); 1921 if (r) { 1922 DRM_ERROR("resume of IP block <%s> failed %d\n", 1923 adev->ip_blocks[i].version->funcs->name, r); 1924 return r; 1925 } 1926 } 1927 } 1928 1929 return 0; 1930 } 1931 1932 static int amdgpu_resume_phase2(struct amdgpu_device *adev) 1933 { 1934 int i, r; 1935 1936 for (i = 0; i < adev->num_ip_blocks; i++) { 1937 if (!adev->ip_blocks[i].status.valid) 1938 continue; 1939 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 1940 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 1941 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ) 1942 continue; 1943 r = adev->ip_blocks[i].version->funcs->resume(adev); 1944 if (r) { 1945 DRM_ERROR("resume of IP block <%s> failed %d\n", 1946 adev->ip_blocks[i].version->funcs->name, r); 1947 return r; 1948 } 1949 } 1950 1951 return 0; 1952 } 1953 1954 static int amdgpu_resume(struct amdgpu_device *adev) 1955 { 1956 int r; 1957 1958 r = amdgpu_resume_phase1(adev); 1959 if (r) 1960 return r; 1961 r = amdgpu_resume_phase2(adev); 1962 1963 return r; 1964 } 1965 1966 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 1967 { 1968 if (adev->is_atom_fw) { 1969 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) 1970 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 1971 } else { 1972 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 1973 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 1974 } 1975 } 1976 1977 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) 1978 { 1979 switch (asic_type) { 1980 #if defined(CONFIG_DRM_AMD_DC) 1981 case CHIP_BONAIRE: 1982 case CHIP_HAWAII: 1983 case CHIP_CARRIZO: 1984 case CHIP_STONEY: 1985 case CHIP_POLARIS11: 1986 case CHIP_POLARIS10: 1987 case CHIP_POLARIS12: 1988 case CHIP_TONGA: 1989 case CHIP_FIJI: 1990 case CHIP_VEGA10: 1991 #if defined(CONFIG_DRM_AMD_DC_PRE_VEGA) 1992 return amdgpu_dc != 0; 1993 #else 1994 return amdgpu_dc > 0; 1995 #endif 1996 #endif 1997 default: 1998 return false; 1999 } 2000 } 2001 2002 /** 2003 * amdgpu_device_has_dc_support - check if dc is supported 2004 * 2005 * @adev: amdgpu_device_pointer 2006 * 2007 * Returns true for supported, false for not supported 2008 */ 2009 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 2010 { 2011 if (amdgpu_sriov_vf(adev)) 2012 return false; 2013 2014 return amdgpu_device_asic_has_dc_support(adev->asic_type); 2015 } 2016 2017 /** 2018 * amdgpu_device_init - initialize the driver 2019 * 2020 * @adev: amdgpu_device pointer 2021 * @pdev: drm dev pointer 2022 * @pdev: pci dev pointer 2023 * @flags: driver flags 2024 * 2025 * Initializes the driver info and hw (all asics). 2026 * Returns 0 for success or an error on failure. 2027 * Called at driver startup. 2028 */ 2029 int amdgpu_device_init(struct amdgpu_device *adev, 2030 struct drm_device *ddev, 2031 struct pci_dev *pdev, 2032 uint32_t flags) 2033 { 2034 int r, i; 2035 bool runtime = false; 2036 u32 max_MBps; 2037 2038 adev->shutdown = false; 2039 adev->dev = &pdev->dev; 2040 adev->ddev = ddev; 2041 adev->pdev = pdev; 2042 adev->flags = flags; 2043 adev->asic_type = flags & AMD_ASIC_MASK; 2044 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 2045 adev->mc.gart_size = 512 * 1024 * 1024; 2046 adev->accel_working = false; 2047 adev->num_rings = 0; 2048 adev->mman.buffer_funcs = NULL; 2049 adev->mman.buffer_funcs_ring = NULL; 2050 adev->vm_manager.vm_pte_funcs = NULL; 2051 adev->vm_manager.vm_pte_num_rings = 0; 2052 adev->gart.gart_funcs = NULL; 2053 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 2054 2055 adev->smc_rreg = &amdgpu_invalid_rreg; 2056 adev->smc_wreg = &amdgpu_invalid_wreg; 2057 adev->pcie_rreg = &amdgpu_invalid_rreg; 2058 adev->pcie_wreg = &amdgpu_invalid_wreg; 2059 adev->pciep_rreg = &amdgpu_invalid_rreg; 2060 adev->pciep_wreg = &amdgpu_invalid_wreg; 2061 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; 2062 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 2063 adev->didt_rreg = &amdgpu_invalid_rreg; 2064 adev->didt_wreg = &amdgpu_invalid_wreg; 2065 adev->gc_cac_rreg = &amdgpu_invalid_rreg; 2066 adev->gc_cac_wreg = &amdgpu_invalid_wreg; 2067 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 2068 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 2069 2070 2071 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 2072 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 2073 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 2074 2075 /* mutex initialization are all done here so we 2076 * can recall function without having locking issues */ 2077 atomic_set(&adev->irq.ih.lock, 0); 2078 mutex_init(&adev->firmware.mutex); 2079 mutex_init(&adev->pm.mutex); 2080 mutex_init(&adev->gfx.gpu_clock_mutex); 2081 mutex_init(&adev->srbm_mutex); 2082 mutex_init(&adev->grbm_idx_mutex); 2083 mutex_init(&adev->mn_lock); 2084 hash_init(adev->mn_hash); 2085 2086 amdgpu_check_arguments(adev); 2087 2088 spin_lock_init(&adev->mmio_idx_lock); 2089 spin_lock_init(&adev->smc_idx_lock); 2090 spin_lock_init(&adev->pcie_idx_lock); 2091 spin_lock_init(&adev->uvd_ctx_idx_lock); 2092 spin_lock_init(&adev->didt_idx_lock); 2093 spin_lock_init(&adev->gc_cac_idx_lock); 2094 spin_lock_init(&adev->se_cac_idx_lock); 2095 spin_lock_init(&adev->audio_endpt_idx_lock); 2096 spin_lock_init(&adev->mm_stats.lock); 2097 2098 INIT_LIST_HEAD(&adev->shadow_list); 2099 mutex_init(&adev->shadow_list_lock); 2100 2101 INIT_LIST_HEAD(&adev->gtt_list); 2102 spin_lock_init(&adev->gtt_list_lock); 2103 2104 INIT_LIST_HEAD(&adev->ring_lru_list); 2105 spin_lock_init(&adev->ring_lru_list_lock); 2106 2107 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler); 2108 2109 /* Registers mapping */ 2110 /* TODO: block userspace mapping of io register */ 2111 if (adev->asic_type >= CHIP_BONAIRE) { 2112 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 2113 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 2114 } else { 2115 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 2116 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 2117 } 2118 2119 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 2120 if (adev->rmmio == NULL) { 2121 return -ENOMEM; 2122 } 2123 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 2124 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); 2125 2126 /* doorbell bar mapping */ 2127 amdgpu_doorbell_init(adev); 2128 2129 /* io port mapping */ 2130 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 2131 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { 2132 adev->rio_mem_size = pci_resource_len(adev->pdev, i); 2133 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size); 2134 break; 2135 } 2136 } 2137 if (adev->rio_mem == NULL) 2138 DRM_INFO("PCI I/O BAR is not found.\n"); 2139 2140 /* early init functions */ 2141 r = amdgpu_early_init(adev); 2142 if (r) 2143 return r; 2144 2145 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 2146 /* this will fail for cards that aren't VGA class devices, just 2147 * ignore it */ 2148 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode); 2149 2150 if (amdgpu_runtime_pm == 1) 2151 runtime = true; 2152 if (amdgpu_device_is_px(ddev)) 2153 runtime = true; 2154 if (!pci_is_thunderbolt_attached(adev->pdev)) 2155 vga_switcheroo_register_client(adev->pdev, 2156 &amdgpu_switcheroo_ops, runtime); 2157 if (runtime) 2158 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 2159 2160 /* Read BIOS */ 2161 if (!amdgpu_get_bios(adev)) { 2162 r = -EINVAL; 2163 goto failed; 2164 } 2165 2166 r = amdgpu_atombios_init(adev); 2167 if (r) { 2168 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 2169 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); 2170 goto failed; 2171 } 2172 2173 /* detect if we are with an SRIOV vbios */ 2174 amdgpu_device_detect_sriov_bios(adev); 2175 2176 /* Post card if necessary */ 2177 if (amdgpu_vpost_needed(adev)) { 2178 if (!adev->bios) { 2179 dev_err(adev->dev, "no vBIOS found\n"); 2180 amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 2181 r = -EINVAL; 2182 goto failed; 2183 } 2184 DRM_INFO("GPU posting now...\n"); 2185 r = amdgpu_atom_asic_init(adev->mode_info.atom_context); 2186 if (r) { 2187 dev_err(adev->dev, "gpu post error!\n"); 2188 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0); 2189 goto failed; 2190 } 2191 } else { 2192 DRM_INFO("GPU post is not needed\n"); 2193 } 2194 2195 if (adev->is_atom_fw) { 2196 /* Initialize clocks */ 2197 r = amdgpu_atomfirmware_get_clock_info(adev); 2198 if (r) { 2199 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 2200 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 2201 goto failed; 2202 } 2203 } else { 2204 /* Initialize clocks */ 2205 r = amdgpu_atombios_get_clock_info(adev); 2206 if (r) { 2207 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 2208 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 2209 goto failed; 2210 } 2211 /* init i2c buses */ 2212 if (!amdgpu_device_has_dc_support(adev)) 2213 amdgpu_atombios_i2c_init(adev); 2214 } 2215 2216 /* Fence driver */ 2217 r = amdgpu_fence_driver_init(adev); 2218 if (r) { 2219 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); 2220 amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 2221 goto failed; 2222 } 2223 2224 /* init the mode config */ 2225 drm_mode_config_init(adev->ddev); 2226 2227 r = amdgpu_init(adev); 2228 if (r) { 2229 dev_err(adev->dev, "amdgpu_init failed\n"); 2230 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 2231 amdgpu_fini(adev); 2232 goto failed; 2233 } 2234 2235 adev->accel_working = true; 2236 2237 amdgpu_vm_check_compute_bug(adev); 2238 2239 /* Initialize the buffer migration limit. */ 2240 if (amdgpu_moverate >= 0) 2241 max_MBps = amdgpu_moverate; 2242 else 2243 max_MBps = 8; /* Allow 8 MB/s. */ 2244 /* Get a log2 for easy divisions. */ 2245 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 2246 2247 r = amdgpu_ib_pool_init(adev); 2248 if (r) { 2249 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 2250 amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 2251 goto failed; 2252 } 2253 2254 r = amdgpu_ib_ring_tests(adev); 2255 if (r) 2256 DRM_ERROR("ib ring test failed (%d).\n", r); 2257 2258 amdgpu_fbdev_init(adev); 2259 2260 r = amdgpu_gem_debugfs_init(adev); 2261 if (r) 2262 DRM_ERROR("registering gem debugfs failed (%d).\n", r); 2263 2264 r = amdgpu_debugfs_regs_init(adev); 2265 if (r) 2266 DRM_ERROR("registering register debugfs failed (%d).\n", r); 2267 2268 r = amdgpu_debugfs_test_ib_ring_init(adev); 2269 if (r) 2270 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r); 2271 2272 r = amdgpu_debugfs_firmware_init(adev); 2273 if (r) 2274 DRM_ERROR("registering firmware debugfs failed (%d).\n", r); 2275 2276 r = amdgpu_debugfs_vbios_dump_init(adev); 2277 if (r) 2278 DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r); 2279 2280 if ((amdgpu_testing & 1)) { 2281 if (adev->accel_working) 2282 amdgpu_test_moves(adev); 2283 else 2284 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n"); 2285 } 2286 if (amdgpu_benchmarking) { 2287 if (adev->accel_working) 2288 amdgpu_benchmark(adev, amdgpu_benchmarking); 2289 else 2290 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); 2291 } 2292 2293 /* enable clockgating, etc. after ib tests, etc. since some blocks require 2294 * explicit gating rather than handling it automatically. 2295 */ 2296 r = amdgpu_late_init(adev); 2297 if (r) { 2298 dev_err(adev->dev, "amdgpu_late_init failed\n"); 2299 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 2300 goto failed; 2301 } 2302 2303 return 0; 2304 2305 failed: 2306 amdgpu_vf_error_trans_all(adev); 2307 if (runtime) 2308 vga_switcheroo_fini_domain_pm_ops(adev->dev); 2309 return r; 2310 } 2311 2312 /** 2313 * amdgpu_device_fini - tear down the driver 2314 * 2315 * @adev: amdgpu_device pointer 2316 * 2317 * Tear down the driver info (all asics). 2318 * Called at driver shutdown. 2319 */ 2320 void amdgpu_device_fini(struct amdgpu_device *adev) 2321 { 2322 int r; 2323 2324 DRM_INFO("amdgpu: finishing device.\n"); 2325 adev->shutdown = true; 2326 if (adev->mode_info.mode_config_initialized) 2327 drm_crtc_force_disable_all(adev->ddev); 2328 /* evict vram memory */ 2329 amdgpu_bo_evict_vram(adev); 2330 amdgpu_ib_pool_fini(adev); 2331 amdgpu_fence_driver_fini(adev); 2332 amdgpu_fbdev_fini(adev); 2333 r = amdgpu_fini(adev); 2334 if (adev->firmware.gpu_info_fw) { 2335 release_firmware(adev->firmware.gpu_info_fw); 2336 adev->firmware.gpu_info_fw = NULL; 2337 } 2338 adev->accel_working = false; 2339 cancel_delayed_work_sync(&adev->late_init_work); 2340 /* free i2c buses */ 2341 if (!amdgpu_device_has_dc_support(adev)) 2342 amdgpu_i2c_fini(adev); 2343 amdgpu_atombios_fini(adev); 2344 kfree(adev->bios); 2345 adev->bios = NULL; 2346 if (!pci_is_thunderbolt_attached(adev->pdev)) 2347 vga_switcheroo_unregister_client(adev->pdev); 2348 if (adev->flags & AMD_IS_PX) 2349 vga_switcheroo_fini_domain_pm_ops(adev->dev); 2350 vga_client_register(adev->pdev, NULL, NULL, NULL); 2351 if (adev->rio_mem) 2352 pci_iounmap(adev->pdev, adev->rio_mem); 2353 adev->rio_mem = NULL; 2354 iounmap(adev->rmmio); 2355 adev->rmmio = NULL; 2356 amdgpu_doorbell_fini(adev); 2357 amdgpu_debugfs_regs_cleanup(adev); 2358 } 2359 2360 2361 /* 2362 * Suspend & resume. 2363 */ 2364 /** 2365 * amdgpu_device_suspend - initiate device suspend 2366 * 2367 * @pdev: drm dev pointer 2368 * @state: suspend state 2369 * 2370 * Puts the hw in the suspend state (all asics). 2371 * Returns 0 for success or an error on failure. 2372 * Called at driver suspend. 2373 */ 2374 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) 2375 { 2376 struct amdgpu_device *adev; 2377 struct drm_crtc *crtc; 2378 struct drm_connector *connector; 2379 int r; 2380 2381 if (dev == NULL || dev->dev_private == NULL) { 2382 return -ENODEV; 2383 } 2384 2385 adev = dev->dev_private; 2386 2387 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2388 return 0; 2389 2390 drm_kms_helper_poll_disable(dev); 2391 2392 if (!amdgpu_device_has_dc_support(adev)) { 2393 /* turn off display hw */ 2394 drm_modeset_lock_all(dev); 2395 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2396 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 2397 } 2398 drm_modeset_unlock_all(dev); 2399 } 2400 2401 amdgpu_amdkfd_suspend(adev); 2402 2403 /* unpin the front buffers and cursors */ 2404 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2405 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2406 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); 2407 struct amdgpu_bo *robj; 2408 2409 if (amdgpu_crtc->cursor_bo) { 2410 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2411 r = amdgpu_bo_reserve(aobj, true); 2412 if (r == 0) { 2413 amdgpu_bo_unpin(aobj); 2414 amdgpu_bo_unreserve(aobj); 2415 } 2416 } 2417 2418 if (rfb == NULL || rfb->obj == NULL) { 2419 continue; 2420 } 2421 robj = gem_to_amdgpu_bo(rfb->obj); 2422 /* don't unpin kernel fb objects */ 2423 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { 2424 r = amdgpu_bo_reserve(robj, true); 2425 if (r == 0) { 2426 amdgpu_bo_unpin(robj); 2427 amdgpu_bo_unreserve(robj); 2428 } 2429 } 2430 } 2431 /* evict vram memory */ 2432 amdgpu_bo_evict_vram(adev); 2433 2434 amdgpu_fence_driver_suspend(adev); 2435 2436 r = amdgpu_suspend(adev); 2437 2438 /* evict remaining vram memory 2439 * This second call to evict vram is to evict the gart page table 2440 * using the CPU. 2441 */ 2442 amdgpu_bo_evict_vram(adev); 2443 2444 amdgpu_atombios_scratch_regs_save(adev); 2445 pci_save_state(dev->pdev); 2446 if (suspend) { 2447 /* Shut down the device */ 2448 pci_disable_device(dev->pdev); 2449 pci_set_power_state(dev->pdev, PCI_D3hot); 2450 } else { 2451 r = amdgpu_asic_reset(adev); 2452 if (r) 2453 DRM_ERROR("amdgpu asic reset failed\n"); 2454 } 2455 2456 if (fbcon) { 2457 console_lock(); 2458 amdgpu_fbdev_set_suspend(adev, 1); 2459 console_unlock(); 2460 } 2461 return 0; 2462 } 2463 2464 /** 2465 * amdgpu_device_resume - initiate device resume 2466 * 2467 * @pdev: drm dev pointer 2468 * 2469 * Bring the hw back to operating state (all asics). 2470 * Returns 0 for success or an error on failure. 2471 * Called at driver resume. 2472 */ 2473 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) 2474 { 2475 struct drm_connector *connector; 2476 struct amdgpu_device *adev = dev->dev_private; 2477 struct drm_crtc *crtc; 2478 int r = 0; 2479 2480 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2481 return 0; 2482 2483 if (fbcon) 2484 console_lock(); 2485 2486 if (resume) { 2487 pci_set_power_state(dev->pdev, PCI_D0); 2488 pci_restore_state(dev->pdev); 2489 r = pci_enable_device(dev->pdev); 2490 if (r) 2491 goto unlock; 2492 } 2493 amdgpu_atombios_scratch_regs_restore(adev); 2494 2495 /* post card */ 2496 if (amdgpu_need_post(adev)) { 2497 r = amdgpu_atom_asic_init(adev->mode_info.atom_context); 2498 if (r) 2499 DRM_ERROR("amdgpu asic init failed\n"); 2500 } 2501 2502 r = amdgpu_resume(adev); 2503 if (r) { 2504 DRM_ERROR("amdgpu_resume failed (%d).\n", r); 2505 goto unlock; 2506 } 2507 amdgpu_fence_driver_resume(adev); 2508 2509 if (resume) { 2510 r = amdgpu_ib_ring_tests(adev); 2511 if (r) 2512 DRM_ERROR("ib ring test failed (%d).\n", r); 2513 } 2514 2515 r = amdgpu_late_init(adev); 2516 if (r) 2517 goto unlock; 2518 2519 /* pin cursors */ 2520 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2521 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2522 2523 if (amdgpu_crtc->cursor_bo) { 2524 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2525 r = amdgpu_bo_reserve(aobj, true); 2526 if (r == 0) { 2527 r = amdgpu_bo_pin(aobj, 2528 AMDGPU_GEM_DOMAIN_VRAM, 2529 &amdgpu_crtc->cursor_addr); 2530 if (r != 0) 2531 DRM_ERROR("Failed to pin cursor BO (%d)\n", r); 2532 amdgpu_bo_unreserve(aobj); 2533 } 2534 } 2535 } 2536 r = amdgpu_amdkfd_resume(adev); 2537 if (r) 2538 return r; 2539 2540 /* blat the mode back in */ 2541 if (fbcon) { 2542 if (!amdgpu_device_has_dc_support(adev)) { 2543 /* pre DCE11 */ 2544 drm_helper_resume_force_mode(dev); 2545 2546 /* turn on display hw */ 2547 drm_modeset_lock_all(dev); 2548 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2549 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 2550 } 2551 drm_modeset_unlock_all(dev); 2552 } else { 2553 /* 2554 * There is no equivalent atomic helper to turn on 2555 * display, so we defined our own function for this, 2556 * once suspend resume is supported by the atomic 2557 * framework this will be reworked 2558 */ 2559 amdgpu_dm_display_resume(adev); 2560 } 2561 } 2562 2563 drm_kms_helper_poll_enable(dev); 2564 2565 /* 2566 * Most of the connector probing functions try to acquire runtime pm 2567 * refs to ensure that the GPU is powered on when connector polling is 2568 * performed. Since we're calling this from a runtime PM callback, 2569 * trying to acquire rpm refs will cause us to deadlock. 2570 * 2571 * Since we're guaranteed to be holding the rpm lock, it's safe to 2572 * temporarily disable the rpm helpers so this doesn't deadlock us. 2573 */ 2574 #ifdef CONFIG_PM 2575 dev->dev->power.disable_depth++; 2576 #endif 2577 if (!amdgpu_device_has_dc_support(adev)) 2578 drm_helper_hpd_irq_event(dev); 2579 else 2580 drm_kms_helper_hotplug_event(dev); 2581 #ifdef CONFIG_PM 2582 dev->dev->power.disable_depth--; 2583 #endif 2584 2585 if (fbcon) 2586 amdgpu_fbdev_set_suspend(adev, 0); 2587 2588 unlock: 2589 if (fbcon) 2590 console_unlock(); 2591 2592 return r; 2593 } 2594 2595 static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) 2596 { 2597 int i; 2598 bool asic_hang = false; 2599 2600 for (i = 0; i < adev->num_ip_blocks; i++) { 2601 if (!adev->ip_blocks[i].status.valid) 2602 continue; 2603 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 2604 adev->ip_blocks[i].status.hang = 2605 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); 2606 if (adev->ip_blocks[i].status.hang) { 2607 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 2608 asic_hang = true; 2609 } 2610 } 2611 return asic_hang; 2612 } 2613 2614 static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) 2615 { 2616 int i, r = 0; 2617 2618 for (i = 0; i < adev->num_ip_blocks; i++) { 2619 if (!adev->ip_blocks[i].status.valid) 2620 continue; 2621 if (adev->ip_blocks[i].status.hang && 2622 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 2623 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); 2624 if (r) 2625 return r; 2626 } 2627 } 2628 2629 return 0; 2630 } 2631 2632 static bool amdgpu_need_full_reset(struct amdgpu_device *adev) 2633 { 2634 int i; 2635 2636 for (i = 0; i < adev->num_ip_blocks; i++) { 2637 if (!adev->ip_blocks[i].status.valid) 2638 continue; 2639 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 2640 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 2641 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 2642 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 2643 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 2644 if (adev->ip_blocks[i].status.hang) { 2645 DRM_INFO("Some block need full reset!\n"); 2646 return true; 2647 } 2648 } 2649 } 2650 return false; 2651 } 2652 2653 static int amdgpu_soft_reset(struct amdgpu_device *adev) 2654 { 2655 int i, r = 0; 2656 2657 for (i = 0; i < adev->num_ip_blocks; i++) { 2658 if (!adev->ip_blocks[i].status.valid) 2659 continue; 2660 if (adev->ip_blocks[i].status.hang && 2661 adev->ip_blocks[i].version->funcs->soft_reset) { 2662 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); 2663 if (r) 2664 return r; 2665 } 2666 } 2667 2668 return 0; 2669 } 2670 2671 static int amdgpu_post_soft_reset(struct amdgpu_device *adev) 2672 { 2673 int i, r = 0; 2674 2675 for (i = 0; i < adev->num_ip_blocks; i++) { 2676 if (!adev->ip_blocks[i].status.valid) 2677 continue; 2678 if (adev->ip_blocks[i].status.hang && 2679 adev->ip_blocks[i].version->funcs->post_soft_reset) 2680 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); 2681 if (r) 2682 return r; 2683 } 2684 2685 return 0; 2686 } 2687 2688 bool amdgpu_need_backup(struct amdgpu_device *adev) 2689 { 2690 if (adev->flags & AMD_IS_APU) 2691 return false; 2692 2693 return amdgpu_lockup_timeout > 0 ? true : false; 2694 } 2695 2696 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, 2697 struct amdgpu_ring *ring, 2698 struct amdgpu_bo *bo, 2699 struct dma_fence **fence) 2700 { 2701 uint32_t domain; 2702 int r; 2703 2704 if (!bo->shadow) 2705 return 0; 2706 2707 r = amdgpu_bo_reserve(bo, true); 2708 if (r) 2709 return r; 2710 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 2711 /* if bo has been evicted, then no need to recover */ 2712 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 2713 r = amdgpu_bo_validate(bo->shadow); 2714 if (r) { 2715 DRM_ERROR("bo validate failed!\n"); 2716 goto err; 2717 } 2718 2719 r = amdgpu_bo_restore_from_shadow(adev, ring, bo, 2720 NULL, fence, true); 2721 if (r) { 2722 DRM_ERROR("recover page table failed!\n"); 2723 goto err; 2724 } 2725 } 2726 err: 2727 amdgpu_bo_unreserve(bo); 2728 return r; 2729 } 2730 2731 /** 2732 * amdgpu_sriov_gpu_reset - reset the asic 2733 * 2734 * @adev: amdgpu device pointer 2735 * @job: which job trigger hang 2736 * 2737 * Attempt the reset the GPU if it has hung (all asics). 2738 * for SRIOV case. 2739 * Returns 0 for success or an error on failure. 2740 */ 2741 int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) 2742 { 2743 int i, j, r = 0; 2744 int resched; 2745 struct amdgpu_bo *bo, *tmp; 2746 struct amdgpu_ring *ring; 2747 struct dma_fence *fence = NULL, *next = NULL; 2748 2749 mutex_lock(&adev->virt.lock_reset); 2750 atomic_inc(&adev->gpu_reset_counter); 2751 adev->in_sriov_reset = true; 2752 2753 /* block TTM */ 2754 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 2755 2756 /* we start from the ring trigger GPU hang */ 2757 j = job ? job->ring->idx : 0; 2758 2759 /* block scheduler */ 2760 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) { 2761 ring = adev->rings[i % AMDGPU_MAX_RINGS]; 2762 if (!ring || !ring->sched.thread) 2763 continue; 2764 2765 kthread_park(ring->sched.thread); 2766 2767 if (job && j != i) 2768 continue; 2769 2770 /* here give the last chance to check if job removed from mirror-list 2771 * since we already pay some time on kthread_park */ 2772 if (job && list_empty(&job->base.node)) { 2773 kthread_unpark(ring->sched.thread); 2774 goto give_up_reset; 2775 } 2776 2777 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit)) 2778 amd_sched_job_kickout(&job->base); 2779 2780 /* only do job_reset on the hang ring if @job not NULL */ 2781 amd_sched_hw_job_reset(&ring->sched); 2782 2783 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 2784 amdgpu_fence_driver_force_completion_ring(ring); 2785 } 2786 2787 /* request to take full control of GPU before re-initialization */ 2788 if (job) 2789 amdgpu_virt_reset_gpu(adev); 2790 else 2791 amdgpu_virt_request_full_gpu(adev, true); 2792 2793 2794 /* Resume IP prior to SMC */ 2795 amdgpu_sriov_reinit_early(adev); 2796 2797 /* we need recover gart prior to run SMC/CP/SDMA resume */ 2798 amdgpu_ttm_recover_gart(adev); 2799 2800 /* now we are okay to resume SMC/CP/SDMA */ 2801 amdgpu_sriov_reinit_late(adev); 2802 2803 amdgpu_irq_gpu_reset_resume_helper(adev); 2804 2805 if (amdgpu_ib_ring_tests(adev)) 2806 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r); 2807 2808 /* release full control of GPU after ib test */ 2809 amdgpu_virt_release_full_gpu(adev, true); 2810 2811 DRM_INFO("recover vram bo from shadow\n"); 2812 2813 ring = adev->mman.buffer_funcs_ring; 2814 mutex_lock(&adev->shadow_list_lock); 2815 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { 2816 next = NULL; 2817 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); 2818 if (fence) { 2819 r = dma_fence_wait(fence, false); 2820 if (r) { 2821 WARN(r, "recovery from shadow isn't completed\n"); 2822 break; 2823 } 2824 } 2825 2826 dma_fence_put(fence); 2827 fence = next; 2828 } 2829 mutex_unlock(&adev->shadow_list_lock); 2830 2831 if (fence) { 2832 r = dma_fence_wait(fence, false); 2833 if (r) 2834 WARN(r, "recovery from shadow isn't completed\n"); 2835 } 2836 dma_fence_put(fence); 2837 2838 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) { 2839 ring = adev->rings[i % AMDGPU_MAX_RINGS]; 2840 if (!ring || !ring->sched.thread) 2841 continue; 2842 2843 if (job && j != i) { 2844 kthread_unpark(ring->sched.thread); 2845 continue; 2846 } 2847 2848 amd_sched_job_recovery(&ring->sched); 2849 kthread_unpark(ring->sched.thread); 2850 } 2851 2852 drm_helper_resume_force_mode(adev->ddev); 2853 give_up_reset: 2854 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); 2855 if (r) { 2856 /* bad news, how to tell it to userspace ? */ 2857 dev_info(adev->dev, "GPU reset failed\n"); 2858 } else { 2859 dev_info(adev->dev, "GPU reset successed!\n"); 2860 } 2861 2862 adev->in_sriov_reset = false; 2863 mutex_unlock(&adev->virt.lock_reset); 2864 return r; 2865 } 2866 2867 /** 2868 * amdgpu_gpu_reset - reset the asic 2869 * 2870 * @adev: amdgpu device pointer 2871 * 2872 * Attempt the reset the GPU if it has hung (all asics). 2873 * Returns 0 for success or an error on failure. 2874 */ 2875 int amdgpu_gpu_reset(struct amdgpu_device *adev) 2876 { 2877 struct drm_atomic_state *state = NULL; 2878 int i, r; 2879 int resched; 2880 bool need_full_reset, vram_lost = false; 2881 2882 if (!amdgpu_check_soft_reset(adev)) { 2883 DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); 2884 return 0; 2885 } 2886 2887 atomic_inc(&adev->gpu_reset_counter); 2888 2889 /* block TTM */ 2890 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 2891 /* store modesetting */ 2892 if (amdgpu_device_has_dc_support(adev)) 2893 state = drm_atomic_helper_suspend(adev->ddev); 2894 2895 /* block scheduler */ 2896 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2897 struct amdgpu_ring *ring = adev->rings[i]; 2898 2899 if (!ring || !ring->sched.thread) 2900 continue; 2901 kthread_park(ring->sched.thread); 2902 amd_sched_hw_job_reset(&ring->sched); 2903 } 2904 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 2905 amdgpu_fence_driver_force_completion(adev); 2906 2907 need_full_reset = amdgpu_need_full_reset(adev); 2908 2909 if (!need_full_reset) { 2910 amdgpu_pre_soft_reset(adev); 2911 r = amdgpu_soft_reset(adev); 2912 amdgpu_post_soft_reset(adev); 2913 if (r || amdgpu_check_soft_reset(adev)) { 2914 DRM_INFO("soft reset failed, will fallback to full reset!\n"); 2915 need_full_reset = true; 2916 } 2917 } 2918 2919 if (need_full_reset) { 2920 r = amdgpu_suspend(adev); 2921 2922 retry: 2923 amdgpu_atombios_scratch_regs_save(adev); 2924 r = amdgpu_asic_reset(adev); 2925 amdgpu_atombios_scratch_regs_restore(adev); 2926 /* post card */ 2927 amdgpu_atom_asic_init(adev->mode_info.atom_context); 2928 2929 if (!r) { 2930 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); 2931 r = amdgpu_resume_phase1(adev); 2932 if (r) 2933 goto out; 2934 vram_lost = amdgpu_check_vram_lost(adev); 2935 if (vram_lost) { 2936 DRM_ERROR("VRAM is lost!\n"); 2937 atomic_inc(&adev->vram_lost_counter); 2938 } 2939 r = amdgpu_ttm_recover_gart(adev); 2940 if (r) 2941 goto out; 2942 r = amdgpu_resume_phase2(adev); 2943 if (r) 2944 goto out; 2945 if (vram_lost) 2946 amdgpu_fill_reset_magic(adev); 2947 } 2948 } 2949 out: 2950 if (!r) { 2951 amdgpu_irq_gpu_reset_resume_helper(adev); 2952 r = amdgpu_ib_ring_tests(adev); 2953 if (r) { 2954 dev_err(adev->dev, "ib ring test failed (%d).\n", r); 2955 r = amdgpu_suspend(adev); 2956 need_full_reset = true; 2957 goto retry; 2958 } 2959 /** 2960 * recovery vm page tables, since we cannot depend on VRAM is 2961 * consistent after gpu full reset. 2962 */ 2963 if (need_full_reset && amdgpu_need_backup(adev)) { 2964 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 2965 struct amdgpu_bo *bo, *tmp; 2966 struct dma_fence *fence = NULL, *next = NULL; 2967 2968 DRM_INFO("recover vram bo from shadow\n"); 2969 mutex_lock(&adev->shadow_list_lock); 2970 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { 2971 next = NULL; 2972 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); 2973 if (fence) { 2974 r = dma_fence_wait(fence, false); 2975 if (r) { 2976 WARN(r, "recovery from shadow isn't completed\n"); 2977 break; 2978 } 2979 } 2980 2981 dma_fence_put(fence); 2982 fence = next; 2983 } 2984 mutex_unlock(&adev->shadow_list_lock); 2985 if (fence) { 2986 r = dma_fence_wait(fence, false); 2987 if (r) 2988 WARN(r, "recovery from shadow isn't completed\n"); 2989 } 2990 dma_fence_put(fence); 2991 } 2992 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2993 struct amdgpu_ring *ring = adev->rings[i]; 2994 2995 if (!ring || !ring->sched.thread) 2996 continue; 2997 2998 amd_sched_job_recovery(&ring->sched); 2999 kthread_unpark(ring->sched.thread); 3000 } 3001 } else { 3002 dev_err(adev->dev, "asic resume failed (%d).\n", r); 3003 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r); 3004 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 3005 if (adev->rings[i] && adev->rings[i]->sched.thread) { 3006 kthread_unpark(adev->rings[i]->sched.thread); 3007 } 3008 } 3009 } 3010 3011 if (amdgpu_device_has_dc_support(adev)) { 3012 r = drm_atomic_helper_resume(adev->ddev, state); 3013 amdgpu_dm_display_resume(adev); 3014 } else 3015 drm_helper_resume_force_mode(adev->ddev); 3016 3017 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); 3018 if (r) { 3019 /* bad news, how to tell it to userspace ? */ 3020 dev_info(adev->dev, "GPU reset failed\n"); 3021 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 3022 } 3023 else { 3024 dev_info(adev->dev, "GPU reset successed!\n"); 3025 } 3026 3027 amdgpu_vf_error_trans_all(adev); 3028 return r; 3029 } 3030 3031 void amdgpu_get_pcie_info(struct amdgpu_device *adev) 3032 { 3033 u32 mask; 3034 int ret; 3035 3036 if (amdgpu_pcie_gen_cap) 3037 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 3038 3039 if (amdgpu_pcie_lane_cap) 3040 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 3041 3042 /* covers APUs as well */ 3043 if (pci_is_root_bus(adev->pdev->bus)) { 3044 if (adev->pm.pcie_gen_mask == 0) 3045 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 3046 if (adev->pm.pcie_mlw_mask == 0) 3047 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 3048 return; 3049 } 3050 3051 if (adev->pm.pcie_gen_mask == 0) { 3052 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 3053 if (!ret) { 3054 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3055 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 3056 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 3057 3058 if (mask & DRM_PCIE_SPEED_25) 3059 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 3060 if (mask & DRM_PCIE_SPEED_50) 3061 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; 3062 if (mask & DRM_PCIE_SPEED_80) 3063 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; 3064 } else { 3065 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 3066 } 3067 } 3068 if (adev->pm.pcie_mlw_mask == 0) { 3069 ret = drm_pcie_get_max_link_width(adev->ddev, &mask); 3070 if (!ret) { 3071 switch (mask) { 3072 case 32: 3073 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 3074 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 3075 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3076 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3077 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3078 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3079 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3080 break; 3081 case 16: 3082 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 3083 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3084 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3085 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3086 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3087 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3088 break; 3089 case 12: 3090 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3091 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3092 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3093 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3094 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3095 break; 3096 case 8: 3097 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3098 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3099 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3100 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3101 break; 3102 case 4: 3103 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3104 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3105 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3106 break; 3107 case 2: 3108 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3109 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3110 break; 3111 case 1: 3112 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 3113 break; 3114 default: 3115 break; 3116 } 3117 } else { 3118 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 3119 } 3120 } 3121 } 3122 3123 /* 3124 * Debugfs 3125 */ 3126 int amdgpu_debugfs_add_files(struct amdgpu_device *adev, 3127 const struct drm_info_list *files, 3128 unsigned nfiles) 3129 { 3130 unsigned i; 3131 3132 for (i = 0; i < adev->debugfs_count; i++) { 3133 if (adev->debugfs[i].files == files) { 3134 /* Already registered */ 3135 return 0; 3136 } 3137 } 3138 3139 i = adev->debugfs_count + 1; 3140 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) { 3141 DRM_ERROR("Reached maximum number of debugfs components.\n"); 3142 DRM_ERROR("Report so we increase " 3143 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n"); 3144 return -EINVAL; 3145 } 3146 adev->debugfs[adev->debugfs_count].files = files; 3147 adev->debugfs[adev->debugfs_count].num_files = nfiles; 3148 adev->debugfs_count = i; 3149 #if defined(CONFIG_DEBUG_FS) 3150 drm_debugfs_create_files(files, nfiles, 3151 adev->ddev->primary->debugfs_root, 3152 adev->ddev->primary); 3153 #endif 3154 return 0; 3155 } 3156 3157 #if defined(CONFIG_DEBUG_FS) 3158 3159 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, 3160 size_t size, loff_t *pos) 3161 { 3162 struct amdgpu_device *adev = file_inode(f)->i_private; 3163 ssize_t result = 0; 3164 int r; 3165 bool pm_pg_lock, use_bank; 3166 unsigned instance_bank, sh_bank, se_bank; 3167 3168 if (size & 0x3 || *pos & 0x3) 3169 return -EINVAL; 3170 3171 /* are we reading registers for which a PG lock is necessary? */ 3172 pm_pg_lock = (*pos >> 23) & 1; 3173 3174 if (*pos & (1ULL << 62)) { 3175 se_bank = (*pos >> 24) & 0x3FF; 3176 sh_bank = (*pos >> 34) & 0x3FF; 3177 instance_bank = (*pos >> 44) & 0x3FF; 3178 3179 if (se_bank == 0x3FF) 3180 se_bank = 0xFFFFFFFF; 3181 if (sh_bank == 0x3FF) 3182 sh_bank = 0xFFFFFFFF; 3183 if (instance_bank == 0x3FF) 3184 instance_bank = 0xFFFFFFFF; 3185 use_bank = 1; 3186 } else { 3187 use_bank = 0; 3188 } 3189 3190 *pos &= (1UL << 22) - 1; 3191 3192 if (use_bank) { 3193 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 3194 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) 3195 return -EINVAL; 3196 mutex_lock(&adev->grbm_idx_mutex); 3197 amdgpu_gfx_select_se_sh(adev, se_bank, 3198 sh_bank, instance_bank); 3199 } 3200 3201 if (pm_pg_lock) 3202 mutex_lock(&adev->pm.mutex); 3203 3204 while (size) { 3205 uint32_t value; 3206 3207 if (*pos > adev->rmmio_size) 3208 goto end; 3209 3210 value = RREG32(*pos >> 2); 3211 r = put_user(value, (uint32_t *)buf); 3212 if (r) { 3213 result = r; 3214 goto end; 3215 } 3216 3217 result += 4; 3218 buf += 4; 3219 *pos += 4; 3220 size -= 4; 3221 } 3222 3223 end: 3224 if (use_bank) { 3225 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3226 mutex_unlock(&adev->grbm_idx_mutex); 3227 } 3228 3229 if (pm_pg_lock) 3230 mutex_unlock(&adev->pm.mutex); 3231 3232 return result; 3233 } 3234 3235 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, 3236 size_t size, loff_t *pos) 3237 { 3238 struct amdgpu_device *adev = file_inode(f)->i_private; 3239 ssize_t result = 0; 3240 int r; 3241 bool pm_pg_lock, use_bank; 3242 unsigned instance_bank, sh_bank, se_bank; 3243 3244 if (size & 0x3 || *pos & 0x3) 3245 return -EINVAL; 3246 3247 /* are we reading registers for which a PG lock is necessary? */ 3248 pm_pg_lock = (*pos >> 23) & 1; 3249 3250 if (*pos & (1ULL << 62)) { 3251 se_bank = (*pos >> 24) & 0x3FF; 3252 sh_bank = (*pos >> 34) & 0x3FF; 3253 instance_bank = (*pos >> 44) & 0x3FF; 3254 3255 if (se_bank == 0x3FF) 3256 se_bank = 0xFFFFFFFF; 3257 if (sh_bank == 0x3FF) 3258 sh_bank = 0xFFFFFFFF; 3259 if (instance_bank == 0x3FF) 3260 instance_bank = 0xFFFFFFFF; 3261 use_bank = 1; 3262 } else { 3263 use_bank = 0; 3264 } 3265 3266 *pos &= (1UL << 22) - 1; 3267 3268 if (use_bank) { 3269 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 3270 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) 3271 return -EINVAL; 3272 mutex_lock(&adev->grbm_idx_mutex); 3273 amdgpu_gfx_select_se_sh(adev, se_bank, 3274 sh_bank, instance_bank); 3275 } 3276 3277 if (pm_pg_lock) 3278 mutex_lock(&adev->pm.mutex); 3279 3280 while (size) { 3281 uint32_t value; 3282 3283 if (*pos > adev->rmmio_size) 3284 return result; 3285 3286 r = get_user(value, (uint32_t *)buf); 3287 if (r) 3288 return r; 3289 3290 WREG32(*pos >> 2, value); 3291 3292 result += 4; 3293 buf += 4; 3294 *pos += 4; 3295 size -= 4; 3296 } 3297 3298 if (use_bank) { 3299 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3300 mutex_unlock(&adev->grbm_idx_mutex); 3301 } 3302 3303 if (pm_pg_lock) 3304 mutex_unlock(&adev->pm.mutex); 3305 3306 return result; 3307 } 3308 3309 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, 3310 size_t size, loff_t *pos) 3311 { 3312 struct amdgpu_device *adev = file_inode(f)->i_private; 3313 ssize_t result = 0; 3314 int r; 3315 3316 if (size & 0x3 || *pos & 0x3) 3317 return -EINVAL; 3318 3319 while (size) { 3320 uint32_t value; 3321 3322 value = RREG32_PCIE(*pos >> 2); 3323 r = put_user(value, (uint32_t *)buf); 3324 if (r) 3325 return r; 3326 3327 result += 4; 3328 buf += 4; 3329 *pos += 4; 3330 size -= 4; 3331 } 3332 3333 return result; 3334 } 3335 3336 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, 3337 size_t size, loff_t *pos) 3338 { 3339 struct amdgpu_device *adev = file_inode(f)->i_private; 3340 ssize_t result = 0; 3341 int r; 3342 3343 if (size & 0x3 || *pos & 0x3) 3344 return -EINVAL; 3345 3346 while (size) { 3347 uint32_t value; 3348 3349 r = get_user(value, (uint32_t *)buf); 3350 if (r) 3351 return r; 3352 3353 WREG32_PCIE(*pos >> 2, value); 3354 3355 result += 4; 3356 buf += 4; 3357 *pos += 4; 3358 size -= 4; 3359 } 3360 3361 return result; 3362 } 3363 3364 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, 3365 size_t size, loff_t *pos) 3366 { 3367 struct amdgpu_device *adev = file_inode(f)->i_private; 3368 ssize_t result = 0; 3369 int r; 3370 3371 if (size & 0x3 || *pos & 0x3) 3372 return -EINVAL; 3373 3374 while (size) { 3375 uint32_t value; 3376 3377 value = RREG32_DIDT(*pos >> 2); 3378 r = put_user(value, (uint32_t *)buf); 3379 if (r) 3380 return r; 3381 3382 result += 4; 3383 buf += 4; 3384 *pos += 4; 3385 size -= 4; 3386 } 3387 3388 return result; 3389 } 3390 3391 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, 3392 size_t size, loff_t *pos) 3393 { 3394 struct amdgpu_device *adev = file_inode(f)->i_private; 3395 ssize_t result = 0; 3396 int r; 3397 3398 if (size & 0x3 || *pos & 0x3) 3399 return -EINVAL; 3400 3401 while (size) { 3402 uint32_t value; 3403 3404 r = get_user(value, (uint32_t *)buf); 3405 if (r) 3406 return r; 3407 3408 WREG32_DIDT(*pos >> 2, value); 3409 3410 result += 4; 3411 buf += 4; 3412 *pos += 4; 3413 size -= 4; 3414 } 3415 3416 return result; 3417 } 3418 3419 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, 3420 size_t size, loff_t *pos) 3421 { 3422 struct amdgpu_device *adev = file_inode(f)->i_private; 3423 ssize_t result = 0; 3424 int r; 3425 3426 if (size & 0x3 || *pos & 0x3) 3427 return -EINVAL; 3428 3429 while (size) { 3430 uint32_t value; 3431 3432 value = RREG32_SMC(*pos); 3433 r = put_user(value, (uint32_t *)buf); 3434 if (r) 3435 return r; 3436 3437 result += 4; 3438 buf += 4; 3439 *pos += 4; 3440 size -= 4; 3441 } 3442 3443 return result; 3444 } 3445 3446 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, 3447 size_t size, loff_t *pos) 3448 { 3449 struct amdgpu_device *adev = file_inode(f)->i_private; 3450 ssize_t result = 0; 3451 int r; 3452 3453 if (size & 0x3 || *pos & 0x3) 3454 return -EINVAL; 3455 3456 while (size) { 3457 uint32_t value; 3458 3459 r = get_user(value, (uint32_t *)buf); 3460 if (r) 3461 return r; 3462 3463 WREG32_SMC(*pos, value); 3464 3465 result += 4; 3466 buf += 4; 3467 *pos += 4; 3468 size -= 4; 3469 } 3470 3471 return result; 3472 } 3473 3474 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, 3475 size_t size, loff_t *pos) 3476 { 3477 struct amdgpu_device *adev = file_inode(f)->i_private; 3478 ssize_t result = 0; 3479 int r; 3480 uint32_t *config, no_regs = 0; 3481 3482 if (size & 0x3 || *pos & 0x3) 3483 return -EINVAL; 3484 3485 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); 3486 if (!config) 3487 return -ENOMEM; 3488 3489 /* version, increment each time something is added */ 3490 config[no_regs++] = 3; 3491 config[no_regs++] = adev->gfx.config.max_shader_engines; 3492 config[no_regs++] = adev->gfx.config.max_tile_pipes; 3493 config[no_regs++] = adev->gfx.config.max_cu_per_sh; 3494 config[no_regs++] = adev->gfx.config.max_sh_per_se; 3495 config[no_regs++] = adev->gfx.config.max_backends_per_se; 3496 config[no_regs++] = adev->gfx.config.max_texture_channel_caches; 3497 config[no_regs++] = adev->gfx.config.max_gprs; 3498 config[no_regs++] = adev->gfx.config.max_gs_threads; 3499 config[no_regs++] = adev->gfx.config.max_hw_contexts; 3500 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; 3501 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; 3502 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; 3503 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; 3504 config[no_regs++] = adev->gfx.config.num_tile_pipes; 3505 config[no_regs++] = adev->gfx.config.backend_enable_mask; 3506 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; 3507 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; 3508 config[no_regs++] = adev->gfx.config.shader_engine_tile_size; 3509 config[no_regs++] = adev->gfx.config.num_gpus; 3510 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; 3511 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; 3512 config[no_regs++] = adev->gfx.config.gb_addr_config; 3513 config[no_regs++] = adev->gfx.config.num_rbs; 3514 3515 /* rev==1 */ 3516 config[no_regs++] = adev->rev_id; 3517 config[no_regs++] = adev->pg_flags; 3518 config[no_regs++] = adev->cg_flags; 3519 3520 /* rev==2 */ 3521 config[no_regs++] = adev->family; 3522 config[no_regs++] = adev->external_rev_id; 3523 3524 /* rev==3 */ 3525 config[no_regs++] = adev->pdev->device; 3526 config[no_regs++] = adev->pdev->revision; 3527 config[no_regs++] = adev->pdev->subsystem_device; 3528 config[no_regs++] = adev->pdev->subsystem_vendor; 3529 3530 while (size && (*pos < no_regs * 4)) { 3531 uint32_t value; 3532 3533 value = config[*pos >> 2]; 3534 r = put_user(value, (uint32_t *)buf); 3535 if (r) { 3536 kfree(config); 3537 return r; 3538 } 3539 3540 result += 4; 3541 buf += 4; 3542 *pos += 4; 3543 size -= 4; 3544 } 3545 3546 kfree(config); 3547 return result; 3548 } 3549 3550 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, 3551 size_t size, loff_t *pos) 3552 { 3553 struct amdgpu_device *adev = file_inode(f)->i_private; 3554 int idx, x, outsize, r, valuesize; 3555 uint32_t values[16]; 3556 3557 if (size & 3 || *pos & 0x3) 3558 return -EINVAL; 3559 3560 if (amdgpu_dpm == 0) 3561 return -EINVAL; 3562 3563 /* convert offset to sensor number */ 3564 idx = *pos >> 2; 3565 3566 valuesize = sizeof(values); 3567 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) 3568 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); 3569 else 3570 return -EINVAL; 3571 3572 if (size > valuesize) 3573 return -EINVAL; 3574 3575 outsize = 0; 3576 x = 0; 3577 if (!r) { 3578 while (size) { 3579 r = put_user(values[x++], (int32_t *)buf); 3580 buf += 4; 3581 size -= 4; 3582 outsize += 4; 3583 } 3584 } 3585 3586 return !r ? outsize : r; 3587 } 3588 3589 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, 3590 size_t size, loff_t *pos) 3591 { 3592 struct amdgpu_device *adev = f->f_inode->i_private; 3593 int r, x; 3594 ssize_t result=0; 3595 uint32_t offset, se, sh, cu, wave, simd, data[32]; 3596 3597 if (size & 3 || *pos & 3) 3598 return -EINVAL; 3599 3600 /* decode offset */ 3601 offset = (*pos & 0x7F); 3602 se = ((*pos >> 7) & 0xFF); 3603 sh = ((*pos >> 15) & 0xFF); 3604 cu = ((*pos >> 23) & 0xFF); 3605 wave = ((*pos >> 31) & 0xFF); 3606 simd = ((*pos >> 37) & 0xFF); 3607 3608 /* switch to the specific se/sh/cu */ 3609 mutex_lock(&adev->grbm_idx_mutex); 3610 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 3611 3612 x = 0; 3613 if (adev->gfx.funcs->read_wave_data) 3614 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); 3615 3616 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 3617 mutex_unlock(&adev->grbm_idx_mutex); 3618 3619 if (!x) 3620 return -EINVAL; 3621 3622 while (size && (offset < x * 4)) { 3623 uint32_t value; 3624 3625 value = data[offset >> 2]; 3626 r = put_user(value, (uint32_t *)buf); 3627 if (r) 3628 return r; 3629 3630 result += 4; 3631 buf += 4; 3632 offset += 4; 3633 size -= 4; 3634 } 3635 3636 return result; 3637 } 3638 3639 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, 3640 size_t size, loff_t *pos) 3641 { 3642 struct amdgpu_device *adev = f->f_inode->i_private; 3643 int r; 3644 ssize_t result = 0; 3645 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; 3646 3647 if (size & 3 || *pos & 3) 3648 return -EINVAL; 3649 3650 /* decode offset */ 3651 offset = (*pos & 0xFFF); /* in dwords */ 3652 se = ((*pos >> 12) & 0xFF); 3653 sh = ((*pos >> 20) & 0xFF); 3654 cu = ((*pos >> 28) & 0xFF); 3655 wave = ((*pos >> 36) & 0xFF); 3656 simd = ((*pos >> 44) & 0xFF); 3657 thread = ((*pos >> 52) & 0xFF); 3658 bank = ((*pos >> 60) & 1); 3659 3660 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); 3661 if (!data) 3662 return -ENOMEM; 3663 3664 /* switch to the specific se/sh/cu */ 3665 mutex_lock(&adev->grbm_idx_mutex); 3666 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 3667 3668 if (bank == 0) { 3669 if (adev->gfx.funcs->read_wave_vgprs) 3670 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); 3671 } else { 3672 if (adev->gfx.funcs->read_wave_sgprs) 3673 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); 3674 } 3675 3676 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 3677 mutex_unlock(&adev->grbm_idx_mutex); 3678 3679 while (size) { 3680 uint32_t value; 3681 3682 value = data[offset++]; 3683 r = put_user(value, (uint32_t *)buf); 3684 if (r) { 3685 result = r; 3686 goto err; 3687 } 3688 3689 result += 4; 3690 buf += 4; 3691 size -= 4; 3692 } 3693 3694 err: 3695 kfree(data); 3696 return result; 3697 } 3698 3699 static const struct file_operations amdgpu_debugfs_regs_fops = { 3700 .owner = THIS_MODULE, 3701 .read = amdgpu_debugfs_regs_read, 3702 .write = amdgpu_debugfs_regs_write, 3703 .llseek = default_llseek 3704 }; 3705 static const struct file_operations amdgpu_debugfs_regs_didt_fops = { 3706 .owner = THIS_MODULE, 3707 .read = amdgpu_debugfs_regs_didt_read, 3708 .write = amdgpu_debugfs_regs_didt_write, 3709 .llseek = default_llseek 3710 }; 3711 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { 3712 .owner = THIS_MODULE, 3713 .read = amdgpu_debugfs_regs_pcie_read, 3714 .write = amdgpu_debugfs_regs_pcie_write, 3715 .llseek = default_llseek 3716 }; 3717 static const struct file_operations amdgpu_debugfs_regs_smc_fops = { 3718 .owner = THIS_MODULE, 3719 .read = amdgpu_debugfs_regs_smc_read, 3720 .write = amdgpu_debugfs_regs_smc_write, 3721 .llseek = default_llseek 3722 }; 3723 3724 static const struct file_operations amdgpu_debugfs_gca_config_fops = { 3725 .owner = THIS_MODULE, 3726 .read = amdgpu_debugfs_gca_config_read, 3727 .llseek = default_llseek 3728 }; 3729 3730 static const struct file_operations amdgpu_debugfs_sensors_fops = { 3731 .owner = THIS_MODULE, 3732 .read = amdgpu_debugfs_sensor_read, 3733 .llseek = default_llseek 3734 }; 3735 3736 static const struct file_operations amdgpu_debugfs_wave_fops = { 3737 .owner = THIS_MODULE, 3738 .read = amdgpu_debugfs_wave_read, 3739 .llseek = default_llseek 3740 }; 3741 static const struct file_operations amdgpu_debugfs_gpr_fops = { 3742 .owner = THIS_MODULE, 3743 .read = amdgpu_debugfs_gpr_read, 3744 .llseek = default_llseek 3745 }; 3746 3747 static const struct file_operations *debugfs_regs[] = { 3748 &amdgpu_debugfs_regs_fops, 3749 &amdgpu_debugfs_regs_didt_fops, 3750 &amdgpu_debugfs_regs_pcie_fops, 3751 &amdgpu_debugfs_regs_smc_fops, 3752 &amdgpu_debugfs_gca_config_fops, 3753 &amdgpu_debugfs_sensors_fops, 3754 &amdgpu_debugfs_wave_fops, 3755 &amdgpu_debugfs_gpr_fops, 3756 }; 3757 3758 static const char *debugfs_regs_names[] = { 3759 "amdgpu_regs", 3760 "amdgpu_regs_didt", 3761 "amdgpu_regs_pcie", 3762 "amdgpu_regs_smc", 3763 "amdgpu_gca_config", 3764 "amdgpu_sensors", 3765 "amdgpu_wave", 3766 "amdgpu_gpr", 3767 }; 3768 3769 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 3770 { 3771 struct drm_minor *minor = adev->ddev->primary; 3772 struct dentry *ent, *root = minor->debugfs_root; 3773 unsigned i, j; 3774 3775 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { 3776 ent = debugfs_create_file(debugfs_regs_names[i], 3777 S_IFREG | S_IRUGO, root, 3778 adev, debugfs_regs[i]); 3779 if (IS_ERR(ent)) { 3780 for (j = 0; j < i; j++) { 3781 debugfs_remove(adev->debugfs_regs[i]); 3782 adev->debugfs_regs[i] = NULL; 3783 } 3784 return PTR_ERR(ent); 3785 } 3786 3787 if (!i) 3788 i_size_write(ent->d_inode, adev->rmmio_size); 3789 adev->debugfs_regs[i] = ent; 3790 } 3791 3792 return 0; 3793 } 3794 3795 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) 3796 { 3797 unsigned i; 3798 3799 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { 3800 if (adev->debugfs_regs[i]) { 3801 debugfs_remove(adev->debugfs_regs[i]); 3802 adev->debugfs_regs[i] = NULL; 3803 } 3804 } 3805 } 3806 3807 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) 3808 { 3809 struct drm_info_node *node = (struct drm_info_node *) m->private; 3810 struct drm_device *dev = node->minor->dev; 3811 struct amdgpu_device *adev = dev->dev_private; 3812 int r = 0, i; 3813 3814 /* hold on the scheduler */ 3815 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 3816 struct amdgpu_ring *ring = adev->rings[i]; 3817 3818 if (!ring || !ring->sched.thread) 3819 continue; 3820 kthread_park(ring->sched.thread); 3821 } 3822 3823 seq_printf(m, "run ib test:\n"); 3824 r = amdgpu_ib_ring_tests(adev); 3825 if (r) 3826 seq_printf(m, "ib ring tests failed (%d).\n", r); 3827 else 3828 seq_printf(m, "ib ring tests passed.\n"); 3829 3830 /* go on the scheduler */ 3831 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 3832 struct amdgpu_ring *ring = adev->rings[i]; 3833 3834 if (!ring || !ring->sched.thread) 3835 continue; 3836 kthread_unpark(ring->sched.thread); 3837 } 3838 3839 return 0; 3840 } 3841 3842 static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = { 3843 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib} 3844 }; 3845 3846 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) 3847 { 3848 return amdgpu_debugfs_add_files(adev, 3849 amdgpu_debugfs_test_ib_ring_list, 1); 3850 } 3851 3852 int amdgpu_debugfs_init(struct drm_minor *minor) 3853 { 3854 return 0; 3855 } 3856 3857 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) 3858 { 3859 struct drm_info_node *node = (struct drm_info_node *) m->private; 3860 struct drm_device *dev = node->minor->dev; 3861 struct amdgpu_device *adev = dev->dev_private; 3862 3863 seq_write(m, adev->bios, adev->bios_size); 3864 return 0; 3865 } 3866 3867 static const struct drm_info_list amdgpu_vbios_dump_list[] = { 3868 {"amdgpu_vbios", 3869 amdgpu_debugfs_get_vbios_dump, 3870 0, NULL}, 3871 }; 3872 3873 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) 3874 { 3875 return amdgpu_debugfs_add_files(adev, 3876 amdgpu_vbios_dump_list, 1); 3877 } 3878 #else 3879 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) 3880 { 3881 return 0; 3882 } 3883 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 3884 { 3885 return 0; 3886 } 3887 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) 3888 { 3889 return 0; 3890 } 3891 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } 3892 #endif 3893