1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/console.h> 29 #include <drm/drmP.h> 30 #include <drm/drm_crtc_helper.h> 31 #include <drm/radeon_drm.h> 32 #include <linux/vgaarb.h> 33 #include "radeon_reg.h" 34 #include "radeon.h" 35 #include "radeon_asic.h" 36 #include "atom.h" 37 38 /* 39 * Clear GPU surface registers. 40 */ 41 void radeon_surface_init(struct radeon_device *rdev) 42 { 43 /* FIXME: check this out */ 44 if (rdev->family < CHIP_R600) { 45 int i; 46 47 for (i = 0; i < 8; i++) { 48 WREG32(RADEON_SURFACE0_INFO + 49 i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), 50 0); 51 } 52 /* enable surfaces */ 53 WREG32(RADEON_SURFACE_CNTL, 0); 54 } 55 } 56 57 /* 58 * GPU scratch registers helpers function. 59 */ 60 void radeon_scratch_init(struct radeon_device *rdev) 61 { 62 int i; 63 64 /* FIXME: check this out */ 65 if (rdev->family < CHIP_R300) { 66 rdev->scratch.num_reg = 5; 67 } else { 68 rdev->scratch.num_reg = 7; 69 } 70 for (i = 0; i < rdev->scratch.num_reg; i++) { 71 rdev->scratch.free[i] = true; 72 rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); 73 } 74 } 75 76 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 77 { 78 int i; 79 80 for (i = 0; i < rdev->scratch.num_reg; i++) { 81 if (rdev->scratch.free[i]) { 82 rdev->scratch.free[i] = false; 83 *reg = rdev->scratch.reg[i]; 84 return 0; 85 } 86 } 87 return -EINVAL; 88 } 89 90 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 91 { 92 int i; 93 94 for (i = 0; i < rdev->scratch.num_reg; i++) { 95 if (rdev->scratch.reg[i] == reg) { 96 rdev->scratch.free[i] = true; 97 return; 98 } 99 } 100 } 101 102 /* 103 * MC common functions 104 */ 105 int radeon_mc_setup(struct radeon_device *rdev) 106 { 107 uint32_t tmp; 108 109 /* Some chips have an "issue" with the memory controller, the 110 * location must be aligned to the size. We just align it down, 111 * too bad if we walk over the top of system memory, we don't 112 * use DMA without a remapped anyway. 113 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP 114 */ 115 /* FGLRX seems to setup like this, VRAM a 0, then GART. 116 */ 117 /* 118 * Note: from R6xx the address space is 40bits but here we only 119 * use 32bits (still have to see a card which would exhaust 4G 120 * address space). 121 */ 122 if (rdev->mc.vram_location != 0xFFFFFFFFUL) { 123 /* vram location was already setup try to put gtt after 124 * if it fits */ 125 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; 126 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 127 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { 128 rdev->mc.gtt_location = tmp; 129 } else { 130 if (rdev->mc.gtt_size >= rdev->mc.vram_location) { 131 printk(KERN_ERR "[drm] GTT too big to fit " 132 "before or after vram location.\n"); 133 return -EINVAL; 134 } 135 rdev->mc.gtt_location = 0; 136 } 137 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { 138 /* gtt location was already setup try to put vram before 139 * if it fits */ 140 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { 141 rdev->mc.vram_location = 0; 142 } else { 143 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; 144 tmp += (rdev->mc.mc_vram_size - 1); 145 tmp &= ~(rdev->mc.mc_vram_size - 1); 146 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { 147 rdev->mc.vram_location = tmp; 148 } else { 149 printk(KERN_ERR "[drm] vram too big to fit " 150 "before or after GTT location.\n"); 151 return -EINVAL; 152 } 153 } 154 } else { 155 rdev->mc.vram_location = 0; 156 tmp = rdev->mc.mc_vram_size; 157 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 158 rdev->mc.gtt_location = tmp; 159 } 160 rdev->mc.vram_start = rdev->mc.vram_location; 161 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; 162 rdev->mc.gtt_start = rdev->mc.gtt_location; 163 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 164 DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20)); 165 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", 166 (unsigned)rdev->mc.vram_location, 167 (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1)); 168 DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20)); 169 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", 170 (unsigned)rdev->mc.gtt_location, 171 (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1)); 172 return 0; 173 } 174 175 176 /* 177 * GPU helpers function. 178 */ 179 bool radeon_card_posted(struct radeon_device *rdev) 180 { 181 uint32_t reg; 182 183 /* first check CRTCs */ 184 if (ASIC_IS_AVIVO(rdev)) { 185 reg = RREG32(AVIVO_D1CRTC_CONTROL) | 186 RREG32(AVIVO_D2CRTC_CONTROL); 187 if (reg & AVIVO_CRTC_EN) { 188 return true; 189 } 190 } else { 191 reg = RREG32(RADEON_CRTC_GEN_CNTL) | 192 RREG32(RADEON_CRTC2_GEN_CNTL); 193 if (reg & RADEON_CRTC_EN) { 194 return true; 195 } 196 } 197 198 /* then check MEM_SIZE, in case the crtcs are off */ 199 if (rdev->family >= CHIP_R600) 200 reg = RREG32(R600_CONFIG_MEMSIZE); 201 else 202 reg = RREG32(RADEON_CONFIG_MEMSIZE); 203 204 if (reg) 205 return true; 206 207 return false; 208 209 } 210 211 int radeon_dummy_page_init(struct radeon_device *rdev) 212 { 213 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 214 if (rdev->dummy_page.page == NULL) 215 return -ENOMEM; 216 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 217 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 218 if (!rdev->dummy_page.addr) { 219 __free_page(rdev->dummy_page.page); 220 rdev->dummy_page.page = NULL; 221 return -ENOMEM; 222 } 223 return 0; 224 } 225 226 void radeon_dummy_page_fini(struct radeon_device *rdev) 227 { 228 if (rdev->dummy_page.page == NULL) 229 return; 230 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 231 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 232 __free_page(rdev->dummy_page.page); 233 rdev->dummy_page.page = NULL; 234 } 235 236 237 /* 238 * Registers accessors functions. 239 */ 240 uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) 241 { 242 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 243 BUG_ON(1); 244 return 0; 245 } 246 247 void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 248 { 249 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 250 reg, v); 251 BUG_ON(1); 252 } 253 254 void radeon_register_accessor_init(struct radeon_device *rdev) 255 { 256 rdev->mc_rreg = &radeon_invalid_rreg; 257 rdev->mc_wreg = &radeon_invalid_wreg; 258 rdev->pll_rreg = &radeon_invalid_rreg; 259 rdev->pll_wreg = &radeon_invalid_wreg; 260 rdev->pciep_rreg = &radeon_invalid_rreg; 261 rdev->pciep_wreg = &radeon_invalid_wreg; 262 263 /* Don't change order as we are overridding accessor. */ 264 if (rdev->family < CHIP_RV515) { 265 rdev->pcie_reg_mask = 0xff; 266 } else { 267 rdev->pcie_reg_mask = 0x7ff; 268 } 269 /* FIXME: not sure here */ 270 if (rdev->family <= CHIP_R580) { 271 rdev->pll_rreg = &r100_pll_rreg; 272 rdev->pll_wreg = &r100_pll_wreg; 273 } 274 if (rdev->family >= CHIP_R420) { 275 rdev->mc_rreg = &r420_mc_rreg; 276 rdev->mc_wreg = &r420_mc_wreg; 277 } 278 if (rdev->family >= CHIP_RV515) { 279 rdev->mc_rreg = &rv515_mc_rreg; 280 rdev->mc_wreg = &rv515_mc_wreg; 281 } 282 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { 283 rdev->mc_rreg = &rs400_mc_rreg; 284 rdev->mc_wreg = &rs400_mc_wreg; 285 } 286 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { 287 rdev->mc_rreg = &rs690_mc_rreg; 288 rdev->mc_wreg = &rs690_mc_wreg; 289 } 290 if (rdev->family == CHIP_RS600) { 291 rdev->mc_rreg = &rs600_mc_rreg; 292 rdev->mc_wreg = &rs600_mc_wreg; 293 } 294 if (rdev->family >= CHIP_R600) { 295 rdev->pciep_rreg = &r600_pciep_rreg; 296 rdev->pciep_wreg = &r600_pciep_wreg; 297 } 298 } 299 300 301 /* 302 * ASIC 303 */ 304 int radeon_asic_init(struct radeon_device *rdev) 305 { 306 radeon_register_accessor_init(rdev); 307 switch (rdev->family) { 308 case CHIP_R100: 309 case CHIP_RV100: 310 case CHIP_RS100: 311 case CHIP_RV200: 312 case CHIP_RS200: 313 case CHIP_R200: 314 case CHIP_RV250: 315 case CHIP_RS300: 316 case CHIP_RV280: 317 rdev->asic = &r100_asic; 318 break; 319 case CHIP_R300: 320 case CHIP_R350: 321 case CHIP_RV350: 322 case CHIP_RV380: 323 rdev->asic = &r300_asic; 324 if (rdev->flags & RADEON_IS_PCIE) { 325 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 326 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 327 } 328 break; 329 case CHIP_R420: 330 case CHIP_R423: 331 case CHIP_RV410: 332 rdev->asic = &r420_asic; 333 break; 334 case CHIP_RS400: 335 case CHIP_RS480: 336 rdev->asic = &rs400_asic; 337 break; 338 case CHIP_RS600: 339 rdev->asic = &rs600_asic; 340 break; 341 case CHIP_RS690: 342 case CHIP_RS740: 343 rdev->asic = &rs690_asic; 344 break; 345 case CHIP_RV515: 346 rdev->asic = &rv515_asic; 347 break; 348 case CHIP_R520: 349 case CHIP_RV530: 350 case CHIP_RV560: 351 case CHIP_RV570: 352 case CHIP_R580: 353 rdev->asic = &r520_asic; 354 break; 355 case CHIP_R600: 356 case CHIP_RV610: 357 case CHIP_RV630: 358 case CHIP_RV620: 359 case CHIP_RV635: 360 case CHIP_RV670: 361 case CHIP_RS780: 362 case CHIP_RS880: 363 rdev->asic = &r600_asic; 364 break; 365 case CHIP_RV770: 366 case CHIP_RV730: 367 case CHIP_RV710: 368 case CHIP_RV740: 369 rdev->asic = &rv770_asic; 370 break; 371 default: 372 /* FIXME: not supported yet */ 373 return -EINVAL; 374 } 375 return 0; 376 } 377 378 379 /* 380 * Wrapper around modesetting bits. 381 */ 382 int radeon_clocks_init(struct radeon_device *rdev) 383 { 384 int r; 385 386 r = radeon_static_clocks_init(rdev->ddev); 387 if (r) { 388 return r; 389 } 390 DRM_INFO("Clocks initialized !\n"); 391 return 0; 392 } 393 394 void radeon_clocks_fini(struct radeon_device *rdev) 395 { 396 } 397 398 /* ATOM accessor methods */ 399 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 400 { 401 struct radeon_device *rdev = info->dev->dev_private; 402 uint32_t r; 403 404 r = rdev->pll_rreg(rdev, reg); 405 return r; 406 } 407 408 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 409 { 410 struct radeon_device *rdev = info->dev->dev_private; 411 412 rdev->pll_wreg(rdev, reg, val); 413 } 414 415 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 416 { 417 struct radeon_device *rdev = info->dev->dev_private; 418 uint32_t r; 419 420 r = rdev->mc_rreg(rdev, reg); 421 return r; 422 } 423 424 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 425 { 426 struct radeon_device *rdev = info->dev->dev_private; 427 428 rdev->mc_wreg(rdev, reg, val); 429 } 430 431 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 432 { 433 struct radeon_device *rdev = info->dev->dev_private; 434 435 WREG32(reg*4, val); 436 } 437 438 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 439 { 440 struct radeon_device *rdev = info->dev->dev_private; 441 uint32_t r; 442 443 r = RREG32(reg*4); 444 return r; 445 } 446 447 int radeon_atombios_init(struct radeon_device *rdev) 448 { 449 struct card_info *atom_card_info = 450 kzalloc(sizeof(struct card_info), GFP_KERNEL); 451 452 if (!atom_card_info) 453 return -ENOMEM; 454 455 rdev->mode_info.atom_card_info = atom_card_info; 456 atom_card_info->dev = rdev->ddev; 457 atom_card_info->reg_read = cail_reg_read; 458 atom_card_info->reg_write = cail_reg_write; 459 atom_card_info->mc_read = cail_mc_read; 460 atom_card_info->mc_write = cail_mc_write; 461 atom_card_info->pll_read = cail_pll_read; 462 atom_card_info->pll_write = cail_pll_write; 463 464 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 465 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 466 return 0; 467 } 468 469 void radeon_atombios_fini(struct radeon_device *rdev) 470 { 471 kfree(rdev->mode_info.atom_context); 472 kfree(rdev->mode_info.atom_card_info); 473 } 474 475 int radeon_combios_init(struct radeon_device *rdev) 476 { 477 radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 478 return 0; 479 } 480 481 void radeon_combios_fini(struct radeon_device *rdev) 482 { 483 } 484 485 /* if we get transitioned to only one device, tak VGA back */ 486 static unsigned int radeon_vga_set_decode(void *cookie, bool state) 487 { 488 struct radeon_device *rdev = cookie; 489 radeon_vga_set_state(rdev, state); 490 if (state) 491 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 492 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 493 else 494 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 495 } 496 497 void radeon_agp_disable(struct radeon_device *rdev) 498 { 499 rdev->flags &= ~RADEON_IS_AGP; 500 if (rdev->family >= CHIP_R600) { 501 DRM_INFO("Forcing AGP to PCIE mode\n"); 502 rdev->flags |= RADEON_IS_PCIE; 503 } else if (rdev->family >= CHIP_RV515 || 504 rdev->family == CHIP_RV380 || 505 rdev->family == CHIP_RV410 || 506 rdev->family == CHIP_R423) { 507 DRM_INFO("Forcing AGP to PCIE mode\n"); 508 rdev->flags |= RADEON_IS_PCIE; 509 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 510 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 511 } else { 512 DRM_INFO("Forcing AGP to PCI mode\n"); 513 rdev->flags |= RADEON_IS_PCI; 514 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 515 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 516 } 517 } 518 519 /* 520 * Radeon device. 521 */ 522 int radeon_device_init(struct radeon_device *rdev, 523 struct drm_device *ddev, 524 struct pci_dev *pdev, 525 uint32_t flags) 526 { 527 int r; 528 int dma_bits; 529 530 DRM_INFO("radeon: Initializing kernel modesetting.\n"); 531 rdev->shutdown = false; 532 rdev->dev = &pdev->dev; 533 rdev->ddev = ddev; 534 rdev->pdev = pdev; 535 rdev->flags = flags; 536 rdev->family = flags & RADEON_FAMILY_MASK; 537 rdev->is_atom_bios = false; 538 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 539 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 540 rdev->gpu_lockup = false; 541 rdev->accel_working = false; 542 /* mutex initialization are all done here so we 543 * can recall function without having locking issues */ 544 mutex_init(&rdev->cs_mutex); 545 mutex_init(&rdev->ib_pool.mutex); 546 mutex_init(&rdev->cp.mutex); 547 rwlock_init(&rdev->fence_drv.lock); 548 INIT_LIST_HEAD(&rdev->gem.objects); 549 550 /* Set asic functions */ 551 r = radeon_asic_init(rdev); 552 if (r) { 553 return r; 554 } 555 556 if (radeon_agpmode == -1) { 557 radeon_agp_disable(rdev); 558 } 559 560 /* set DMA mask + need_dma32 flags. 561 * PCIE - can handle 40-bits. 562 * IGP - can handle 40-bits (in theory) 563 * AGP - generally dma32 is safest 564 * PCI - only dma32 565 */ 566 rdev->need_dma32 = false; 567 if (rdev->flags & RADEON_IS_AGP) 568 rdev->need_dma32 = true; 569 if (rdev->flags & RADEON_IS_PCI) 570 rdev->need_dma32 = true; 571 572 dma_bits = rdev->need_dma32 ? 32 : 40; 573 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 574 if (r) { 575 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 576 } 577 578 /* Registers mapping */ 579 /* TODO: block userspace mapping of io register */ 580 rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2); 581 rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2); 582 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 583 if (rdev->rmmio == NULL) { 584 return -ENOMEM; 585 } 586 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 587 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 588 589 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 590 /* this will fail for cards that aren't VGA class devices, just 591 * ignore it */ 592 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 593 594 r = radeon_init(rdev); 595 if (r) 596 return r; 597 598 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 599 /* Acceleration not working on AGP card try again 600 * with fallback to PCI or PCIE GART 601 */ 602 radeon_gpu_reset(rdev); 603 radeon_fini(rdev); 604 radeon_agp_disable(rdev); 605 r = radeon_init(rdev); 606 if (r) 607 return r; 608 } 609 if (radeon_testing) { 610 radeon_test_moves(rdev); 611 } 612 if (radeon_benchmarking) { 613 radeon_benchmark(rdev); 614 } 615 return 0; 616 } 617 618 void radeon_device_fini(struct radeon_device *rdev) 619 { 620 DRM_INFO("radeon: finishing device.\n"); 621 rdev->shutdown = true; 622 radeon_fini(rdev); 623 vga_client_register(rdev->pdev, NULL, NULL, NULL); 624 iounmap(rdev->rmmio); 625 rdev->rmmio = NULL; 626 } 627 628 629 /* 630 * Suspend & resume. 631 */ 632 int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 633 { 634 struct radeon_device *rdev = dev->dev_private; 635 struct drm_crtc *crtc; 636 637 if (dev == NULL || rdev == NULL) { 638 return -ENODEV; 639 } 640 if (state.event == PM_EVENT_PRETHAW) { 641 return 0; 642 } 643 /* unpin the front buffers */ 644 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 645 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 646 struct radeon_object *robj; 647 648 if (rfb == NULL || rfb->obj == NULL) { 649 continue; 650 } 651 robj = rfb->obj->driver_private; 652 if (robj != rdev->fbdev_robj) { 653 radeon_object_unpin(robj); 654 } 655 } 656 /* evict vram memory */ 657 radeon_object_evict_vram(rdev); 658 /* wait for gpu to finish processing current batch */ 659 radeon_fence_wait_last(rdev); 660 661 radeon_save_bios_scratch_regs(rdev); 662 663 radeon_suspend(rdev); 664 /* evict remaining vram memory */ 665 radeon_object_evict_vram(rdev); 666 667 pci_save_state(dev->pdev); 668 if (state.event == PM_EVENT_SUSPEND) { 669 /* Shut down the device */ 670 pci_disable_device(dev->pdev); 671 pci_set_power_state(dev->pdev, PCI_D3hot); 672 } 673 acquire_console_sem(); 674 fb_set_suspend(rdev->fbdev_info, 1); 675 release_console_sem(); 676 return 0; 677 } 678 679 int radeon_resume_kms(struct drm_device *dev) 680 { 681 struct radeon_device *rdev = dev->dev_private; 682 683 acquire_console_sem(); 684 pci_set_power_state(dev->pdev, PCI_D0); 685 pci_restore_state(dev->pdev); 686 if (pci_enable_device(dev->pdev)) { 687 release_console_sem(); 688 return -1; 689 } 690 pci_set_master(dev->pdev); 691 radeon_resume(rdev); 692 radeon_restore_bios_scratch_regs(rdev); 693 fb_set_suspend(rdev->fbdev_info, 0); 694 release_console_sem(); 695 696 /* blat the mode back in */ 697 drm_helper_resume_force_mode(dev); 698 return 0; 699 } 700 701 702 /* 703 * Debugfs 704 */ 705 struct radeon_debugfs { 706 struct drm_info_list *files; 707 unsigned num_files; 708 }; 709 static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; 710 static unsigned _radeon_debugfs_count = 0; 711 712 int radeon_debugfs_add_files(struct radeon_device *rdev, 713 struct drm_info_list *files, 714 unsigned nfiles) 715 { 716 unsigned i; 717 718 for (i = 0; i < _radeon_debugfs_count; i++) { 719 if (_radeon_debugfs[i].files == files) { 720 /* Already registered */ 721 return 0; 722 } 723 } 724 if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { 725 DRM_ERROR("Reached maximum number of debugfs files.\n"); 726 DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); 727 return -EINVAL; 728 } 729 _radeon_debugfs[_radeon_debugfs_count].files = files; 730 _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; 731 _radeon_debugfs_count++; 732 #if defined(CONFIG_DEBUG_FS) 733 drm_debugfs_create_files(files, nfiles, 734 rdev->ddev->control->debugfs_root, 735 rdev->ddev->control); 736 drm_debugfs_create_files(files, nfiles, 737 rdev->ddev->primary->debugfs_root, 738 rdev->ddev->primary); 739 #endif 740 return 0; 741 } 742 743 #if defined(CONFIG_DEBUG_FS) 744 int radeon_debugfs_init(struct drm_minor *minor) 745 { 746 return 0; 747 } 748 749 void radeon_debugfs_cleanup(struct drm_minor *minor) 750 { 751 unsigned i; 752 753 for (i = 0; i < _radeon_debugfs_count; i++) { 754 drm_debugfs_remove_files(_radeon_debugfs[i].files, 755 _radeon_debugfs[i].num_files, minor); 756 } 757 } 758 #endif 759