1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/console.h> 29 #include <drm/drmP.h> 30 #include <drm/drm_crtc_helper.h> 31 #include <drm/radeon_drm.h> 32 #include <linux/vgaarb.h> 33 #include "radeon_reg.h" 34 #include "radeon.h" 35 #include "radeon_asic.h" 36 #include "atom.h" 37 38 /* 39 * Clear GPU surface registers. 40 */ 41 void radeon_surface_init(struct radeon_device *rdev) 42 { 43 /* FIXME: check this out */ 44 if (rdev->family < CHIP_R600) { 45 int i; 46 47 for (i = 0; i < 8; i++) { 48 WREG32(RADEON_SURFACE0_INFO + 49 i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), 50 0); 51 } 52 /* enable surfaces */ 53 WREG32(RADEON_SURFACE_CNTL, 0); 54 } 55 } 56 57 /* 58 * GPU scratch registers helpers function. 59 */ 60 void radeon_scratch_init(struct radeon_device *rdev) 61 { 62 int i; 63 64 /* FIXME: check this out */ 65 if (rdev->family < CHIP_R300) { 66 rdev->scratch.num_reg = 5; 67 } else { 68 rdev->scratch.num_reg = 7; 69 } 70 for (i = 0; i < rdev->scratch.num_reg; i++) { 71 rdev->scratch.free[i] = true; 72 rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); 73 } 74 } 75 76 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 77 { 78 int i; 79 80 for (i = 0; i < rdev->scratch.num_reg; i++) { 81 if (rdev->scratch.free[i]) { 82 rdev->scratch.free[i] = false; 83 *reg = rdev->scratch.reg[i]; 84 return 0; 85 } 86 } 87 return -EINVAL; 88 } 89 90 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 91 { 92 int i; 93 94 for (i = 0; i < rdev->scratch.num_reg; i++) { 95 if (rdev->scratch.reg[i] == reg) { 96 rdev->scratch.free[i] = true; 97 return; 98 } 99 } 100 } 101 102 /* 103 * MC common functions 104 */ 105 int radeon_mc_setup(struct radeon_device *rdev) 106 { 107 uint32_t tmp; 108 109 /* Some chips have an "issue" with the memory controller, the 110 * location must be aligned to the size. We just align it down, 111 * too bad if we walk over the top of system memory, we don't 112 * use DMA without a remapped anyway. 113 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP 114 */ 115 /* FGLRX seems to setup like this, VRAM a 0, then GART. 116 */ 117 /* 118 * Note: from R6xx the address space is 40bits but here we only 119 * use 32bits (still have to see a card which would exhaust 4G 120 * address space). 121 */ 122 if (rdev->mc.vram_location != 0xFFFFFFFFUL) { 123 /* vram location was already setup try to put gtt after 124 * if it fits */ 125 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; 126 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 127 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { 128 rdev->mc.gtt_location = tmp; 129 } else { 130 if (rdev->mc.gtt_size >= rdev->mc.vram_location) { 131 printk(KERN_ERR "[drm] GTT too big to fit " 132 "before or after vram location.\n"); 133 return -EINVAL; 134 } 135 rdev->mc.gtt_location = 0; 136 } 137 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { 138 /* gtt location was already setup try to put vram before 139 * if it fits */ 140 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { 141 rdev->mc.vram_location = 0; 142 } else { 143 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; 144 tmp += (rdev->mc.mc_vram_size - 1); 145 tmp &= ~(rdev->mc.mc_vram_size - 1); 146 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { 147 rdev->mc.vram_location = tmp; 148 } else { 149 printk(KERN_ERR "[drm] vram too big to fit " 150 "before or after GTT location.\n"); 151 return -EINVAL; 152 } 153 } 154 } else { 155 rdev->mc.vram_location = 0; 156 tmp = rdev->mc.mc_vram_size; 157 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 158 rdev->mc.gtt_location = tmp; 159 } 160 rdev->mc.vram_start = rdev->mc.vram_location; 161 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; 162 rdev->mc.gtt_start = rdev->mc.gtt_location; 163 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 164 DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20)); 165 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", 166 (unsigned)rdev->mc.vram_location, 167 (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1)); 168 DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20)); 169 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", 170 (unsigned)rdev->mc.gtt_location, 171 (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1)); 172 return 0; 173 } 174 175 176 /* 177 * GPU helpers function. 178 */ 179 bool radeon_card_posted(struct radeon_device *rdev) 180 { 181 uint32_t reg; 182 183 /* first check CRTCs */ 184 if (ASIC_IS_AVIVO(rdev)) { 185 reg = RREG32(AVIVO_D1CRTC_CONTROL) | 186 RREG32(AVIVO_D2CRTC_CONTROL); 187 if (reg & AVIVO_CRTC_EN) { 188 return true; 189 } 190 } else { 191 reg = RREG32(RADEON_CRTC_GEN_CNTL) | 192 RREG32(RADEON_CRTC2_GEN_CNTL); 193 if (reg & RADEON_CRTC_EN) { 194 return true; 195 } 196 } 197 198 /* then check MEM_SIZE, in case the crtcs are off */ 199 if (rdev->family >= CHIP_R600) 200 reg = RREG32(R600_CONFIG_MEMSIZE); 201 else 202 reg = RREG32(RADEON_CONFIG_MEMSIZE); 203 204 if (reg) 205 return true; 206 207 return false; 208 209 } 210 211 int radeon_dummy_page_init(struct radeon_device *rdev) 212 { 213 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 214 if (rdev->dummy_page.page == NULL) 215 return -ENOMEM; 216 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 217 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 218 if (!rdev->dummy_page.addr) { 219 __free_page(rdev->dummy_page.page); 220 rdev->dummy_page.page = NULL; 221 return -ENOMEM; 222 } 223 return 0; 224 } 225 226 void radeon_dummy_page_fini(struct radeon_device *rdev) 227 { 228 if (rdev->dummy_page.page == NULL) 229 return; 230 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 231 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 232 __free_page(rdev->dummy_page.page); 233 rdev->dummy_page.page = NULL; 234 } 235 236 237 /* 238 * Registers accessors functions. 239 */ 240 uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) 241 { 242 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 243 BUG_ON(1); 244 return 0; 245 } 246 247 void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 248 { 249 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 250 reg, v); 251 BUG_ON(1); 252 } 253 254 void radeon_register_accessor_init(struct radeon_device *rdev) 255 { 256 rdev->mc_rreg = &radeon_invalid_rreg; 257 rdev->mc_wreg = &radeon_invalid_wreg; 258 rdev->pll_rreg = &radeon_invalid_rreg; 259 rdev->pll_wreg = &radeon_invalid_wreg; 260 rdev->pciep_rreg = &radeon_invalid_rreg; 261 rdev->pciep_wreg = &radeon_invalid_wreg; 262 263 /* Don't change order as we are overridding accessor. */ 264 if (rdev->family < CHIP_RV515) { 265 rdev->pcie_reg_mask = 0xff; 266 } else { 267 rdev->pcie_reg_mask = 0x7ff; 268 } 269 /* FIXME: not sure here */ 270 if (rdev->family <= CHIP_R580) { 271 rdev->pll_rreg = &r100_pll_rreg; 272 rdev->pll_wreg = &r100_pll_wreg; 273 } 274 if (rdev->family >= CHIP_R420) { 275 rdev->mc_rreg = &r420_mc_rreg; 276 rdev->mc_wreg = &r420_mc_wreg; 277 } 278 if (rdev->family >= CHIP_RV515) { 279 rdev->mc_rreg = &rv515_mc_rreg; 280 rdev->mc_wreg = &rv515_mc_wreg; 281 } 282 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { 283 rdev->mc_rreg = &rs400_mc_rreg; 284 rdev->mc_wreg = &rs400_mc_wreg; 285 } 286 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { 287 rdev->mc_rreg = &rs690_mc_rreg; 288 rdev->mc_wreg = &rs690_mc_wreg; 289 } 290 if (rdev->family == CHIP_RS600) { 291 rdev->mc_rreg = &rs600_mc_rreg; 292 rdev->mc_wreg = &rs600_mc_wreg; 293 } 294 if (rdev->family >= CHIP_R600) { 295 rdev->pciep_rreg = &r600_pciep_rreg; 296 rdev->pciep_wreg = &r600_pciep_wreg; 297 } 298 } 299 300 301 /* 302 * ASIC 303 */ 304 int radeon_asic_init(struct radeon_device *rdev) 305 { 306 radeon_register_accessor_init(rdev); 307 switch (rdev->family) { 308 case CHIP_R100: 309 case CHIP_RV100: 310 case CHIP_RS100: 311 case CHIP_RV200: 312 case CHIP_RS200: 313 case CHIP_R200: 314 case CHIP_RV250: 315 case CHIP_RS300: 316 case CHIP_RV280: 317 rdev->asic = &r100_asic; 318 break; 319 case CHIP_R300: 320 case CHIP_R350: 321 case CHIP_RV350: 322 case CHIP_RV380: 323 rdev->asic = &r300_asic; 324 if (rdev->flags & RADEON_IS_PCIE) { 325 rdev->asic->gart_init = &rv370_pcie_gart_init; 326 rdev->asic->gart_fini = &rv370_pcie_gart_fini; 327 rdev->asic->gart_enable = &rv370_pcie_gart_enable; 328 rdev->asic->gart_disable = &rv370_pcie_gart_disable; 329 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 330 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 331 } 332 break; 333 case CHIP_R420: 334 case CHIP_R423: 335 case CHIP_RV410: 336 rdev->asic = &r420_asic; 337 break; 338 case CHIP_RS400: 339 case CHIP_RS480: 340 rdev->asic = &rs400_asic; 341 break; 342 case CHIP_RS600: 343 rdev->asic = &rs600_asic; 344 break; 345 case CHIP_RS690: 346 case CHIP_RS740: 347 rdev->asic = &rs690_asic; 348 break; 349 case CHIP_RV515: 350 rdev->asic = &rv515_asic; 351 break; 352 case CHIP_R520: 353 case CHIP_RV530: 354 case CHIP_RV560: 355 case CHIP_RV570: 356 case CHIP_R580: 357 rdev->asic = &r520_asic; 358 break; 359 case CHIP_R600: 360 case CHIP_RV610: 361 case CHIP_RV630: 362 case CHIP_RV620: 363 case CHIP_RV635: 364 case CHIP_RV670: 365 case CHIP_RS780: 366 case CHIP_RS880: 367 rdev->asic = &r600_asic; 368 break; 369 case CHIP_RV770: 370 case CHIP_RV730: 371 case CHIP_RV710: 372 case CHIP_RV740: 373 rdev->asic = &rv770_asic; 374 break; 375 default: 376 /* FIXME: not supported yet */ 377 return -EINVAL; 378 } 379 return 0; 380 } 381 382 383 /* 384 * Wrapper around modesetting bits. 385 */ 386 int radeon_clocks_init(struct radeon_device *rdev) 387 { 388 int r; 389 390 r = radeon_static_clocks_init(rdev->ddev); 391 if (r) { 392 return r; 393 } 394 DRM_INFO("Clocks initialized !\n"); 395 return 0; 396 } 397 398 void radeon_clocks_fini(struct radeon_device *rdev) 399 { 400 } 401 402 /* ATOM accessor methods */ 403 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 404 { 405 struct radeon_device *rdev = info->dev->dev_private; 406 uint32_t r; 407 408 r = rdev->pll_rreg(rdev, reg); 409 return r; 410 } 411 412 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 413 { 414 struct radeon_device *rdev = info->dev->dev_private; 415 416 rdev->pll_wreg(rdev, reg, val); 417 } 418 419 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 420 { 421 struct radeon_device *rdev = info->dev->dev_private; 422 uint32_t r; 423 424 r = rdev->mc_rreg(rdev, reg); 425 return r; 426 } 427 428 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 429 { 430 struct radeon_device *rdev = info->dev->dev_private; 431 432 rdev->mc_wreg(rdev, reg, val); 433 } 434 435 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 436 { 437 struct radeon_device *rdev = info->dev->dev_private; 438 439 WREG32(reg*4, val); 440 } 441 442 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 443 { 444 struct radeon_device *rdev = info->dev->dev_private; 445 uint32_t r; 446 447 r = RREG32(reg*4); 448 return r; 449 } 450 451 static struct card_info atom_card_info = { 452 .dev = NULL, 453 .reg_read = cail_reg_read, 454 .reg_write = cail_reg_write, 455 .mc_read = cail_mc_read, 456 .mc_write = cail_mc_write, 457 .pll_read = cail_pll_read, 458 .pll_write = cail_pll_write, 459 }; 460 461 int radeon_atombios_init(struct radeon_device *rdev) 462 { 463 atom_card_info.dev = rdev->ddev; 464 rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios); 465 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 466 return 0; 467 } 468 469 void radeon_atombios_fini(struct radeon_device *rdev) 470 { 471 kfree(rdev->mode_info.atom_context); 472 } 473 474 int radeon_combios_init(struct radeon_device *rdev) 475 { 476 radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 477 return 0; 478 } 479 480 void radeon_combios_fini(struct radeon_device *rdev) 481 { 482 } 483 484 /* if we get transitioned to only one device, tak VGA back */ 485 static unsigned int radeon_vga_set_decode(void *cookie, bool state) 486 { 487 struct radeon_device *rdev = cookie; 488 489 radeon_vga_set_state(rdev, state); 490 if (state) 491 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 492 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 493 else 494 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 495 } 496 /* 497 * Radeon device. 498 */ 499 int radeon_device_init(struct radeon_device *rdev, 500 struct drm_device *ddev, 501 struct pci_dev *pdev, 502 uint32_t flags) 503 { 504 int r; 505 int dma_bits; 506 507 DRM_INFO("radeon: Initializing kernel modesetting.\n"); 508 rdev->shutdown = false; 509 rdev->dev = &pdev->dev; 510 rdev->ddev = ddev; 511 rdev->pdev = pdev; 512 rdev->flags = flags; 513 rdev->family = flags & RADEON_FAMILY_MASK; 514 rdev->is_atom_bios = false; 515 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 516 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 517 rdev->gpu_lockup = false; 518 rdev->accel_working = false; 519 /* mutex initialization are all done here so we 520 * can recall function without having locking issues */ 521 mutex_init(&rdev->cs_mutex); 522 mutex_init(&rdev->ib_pool.mutex); 523 mutex_init(&rdev->cp.mutex); 524 rwlock_init(&rdev->fence_drv.lock); 525 INIT_LIST_HEAD(&rdev->gem.objects); 526 527 /* Set asic functions */ 528 r = radeon_asic_init(rdev); 529 if (r) { 530 return r; 531 } 532 533 if (radeon_agpmode == -1) { 534 rdev->flags &= ~RADEON_IS_AGP; 535 if (rdev->family >= CHIP_R600) { 536 DRM_INFO("Forcing AGP to PCIE mode\n"); 537 rdev->flags |= RADEON_IS_PCIE; 538 } else if (rdev->family >= CHIP_RV515 || 539 rdev->family == CHIP_RV380 || 540 rdev->family == CHIP_RV410 || 541 rdev->family == CHIP_R423) { 542 DRM_INFO("Forcing AGP to PCIE mode\n"); 543 rdev->flags |= RADEON_IS_PCIE; 544 rdev->asic->gart_init = &rv370_pcie_gart_init; 545 rdev->asic->gart_fini = &rv370_pcie_gart_fini; 546 rdev->asic->gart_enable = &rv370_pcie_gart_enable; 547 rdev->asic->gart_disable = &rv370_pcie_gart_disable; 548 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 549 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 550 } else { 551 DRM_INFO("Forcing AGP to PCI mode\n"); 552 rdev->flags |= RADEON_IS_PCI; 553 rdev->asic->gart_init = &r100_pci_gart_init; 554 rdev->asic->gart_fini = &r100_pci_gart_fini; 555 rdev->asic->gart_enable = &r100_pci_gart_enable; 556 rdev->asic->gart_disable = &r100_pci_gart_disable; 557 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 558 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 559 } 560 } 561 562 /* set DMA mask + need_dma32 flags. 563 * PCIE - can handle 40-bits. 564 * IGP - can handle 40-bits (in theory) 565 * AGP - generally dma32 is safest 566 * PCI - only dma32 567 */ 568 rdev->need_dma32 = false; 569 if (rdev->flags & RADEON_IS_AGP) 570 rdev->need_dma32 = true; 571 if (rdev->flags & RADEON_IS_PCI) 572 rdev->need_dma32 = true; 573 574 dma_bits = rdev->need_dma32 ? 32 : 40; 575 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 576 if (r) { 577 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 578 } 579 580 /* Registers mapping */ 581 /* TODO: block userspace mapping of io register */ 582 rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2); 583 rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2); 584 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 585 if (rdev->rmmio == NULL) { 586 return -ENOMEM; 587 } 588 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 589 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 590 591 rdev->new_init_path = false; 592 r = radeon_init(rdev); 593 if (r) { 594 return r; 595 } 596 597 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 598 r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 599 if (r) { 600 return -EINVAL; 601 } 602 603 if (!rdev->new_init_path) { 604 /* Setup errata flags */ 605 radeon_errata(rdev); 606 /* Initialize scratch registers */ 607 radeon_scratch_init(rdev); 608 /* Initialize surface registers */ 609 radeon_surface_init(rdev); 610 611 /* BIOS*/ 612 if (!radeon_get_bios(rdev)) { 613 if (ASIC_IS_AVIVO(rdev)) 614 return -EINVAL; 615 } 616 if (rdev->is_atom_bios) { 617 r = radeon_atombios_init(rdev); 618 if (r) { 619 return r; 620 } 621 } else { 622 r = radeon_combios_init(rdev); 623 if (r) { 624 return r; 625 } 626 } 627 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 628 if (radeon_gpu_reset(rdev)) { 629 /* FIXME: what do we want to do here ? */ 630 } 631 /* check if cards are posted or not */ 632 if (!radeon_card_posted(rdev) && rdev->bios) { 633 DRM_INFO("GPU not posted. posting now...\n"); 634 if (rdev->is_atom_bios) { 635 atom_asic_init(rdev->mode_info.atom_context); 636 } else { 637 radeon_combios_asic_init(rdev->ddev); 638 } 639 } 640 /* Get clock & vram information */ 641 radeon_get_clock_info(rdev->ddev); 642 radeon_vram_info(rdev); 643 /* Initialize clocks */ 644 r = radeon_clocks_init(rdev); 645 if (r) { 646 return r; 647 } 648 649 /* Initialize memory controller (also test AGP) */ 650 r = radeon_mc_init(rdev); 651 if (r) { 652 return r; 653 } 654 /* Fence driver */ 655 r = radeon_fence_driver_init(rdev); 656 if (r) { 657 return r; 658 } 659 r = radeon_irq_kms_init(rdev); 660 if (r) { 661 return r; 662 } 663 /* Memory manager */ 664 r = radeon_object_init(rdev); 665 if (r) { 666 return r; 667 } 668 r = radeon_gpu_gart_init(rdev); 669 if (r) 670 return r; 671 /* Initialize GART (initialize after TTM so we can allocate 672 * memory through TTM but finalize after TTM) */ 673 r = radeon_gart_enable(rdev); 674 if (r) 675 return 0; 676 r = radeon_gem_init(rdev); 677 if (r) 678 return 0; 679 680 /* 1M ring buffer */ 681 r = radeon_cp_init(rdev, 1024 * 1024); 682 if (r) 683 return 0; 684 r = radeon_wb_init(rdev); 685 if (r) 686 DRM_ERROR("radeon: failled initializing WB (%d).\n", r); 687 r = radeon_ib_pool_init(rdev); 688 if (r) 689 return 0; 690 r = radeon_ib_test(rdev); 691 if (r) 692 return 0; 693 rdev->accel_working = true; 694 } 695 DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); 696 if (radeon_testing) { 697 radeon_test_moves(rdev); 698 } 699 if (radeon_benchmarking) { 700 radeon_benchmark(rdev); 701 } 702 return 0; 703 } 704 705 void radeon_device_fini(struct radeon_device *rdev) 706 { 707 DRM_INFO("radeon: finishing device.\n"); 708 rdev->shutdown = true; 709 /* Order matter so becarefull if you rearrange anythings */ 710 if (!rdev->new_init_path) { 711 radeon_ib_pool_fini(rdev); 712 radeon_cp_fini(rdev); 713 radeon_wb_fini(rdev); 714 radeon_gpu_gart_fini(rdev); 715 radeon_gem_fini(rdev); 716 radeon_mc_fini(rdev); 717 #if __OS_HAS_AGP 718 radeon_agp_fini(rdev); 719 #endif 720 radeon_irq_kms_fini(rdev); 721 vga_client_register(rdev->pdev, NULL, NULL, NULL); 722 radeon_fence_driver_fini(rdev); 723 radeon_clocks_fini(rdev); 724 radeon_object_fini(rdev); 725 if (rdev->is_atom_bios) { 726 radeon_atombios_fini(rdev); 727 } else { 728 radeon_combios_fini(rdev); 729 } 730 kfree(rdev->bios); 731 rdev->bios = NULL; 732 } else { 733 radeon_fini(rdev); 734 } 735 iounmap(rdev->rmmio); 736 rdev->rmmio = NULL; 737 } 738 739 740 /* 741 * Suspend & resume. 742 */ 743 int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 744 { 745 struct radeon_device *rdev = dev->dev_private; 746 struct drm_crtc *crtc; 747 748 if (dev == NULL || rdev == NULL) { 749 return -ENODEV; 750 } 751 if (state.event == PM_EVENT_PRETHAW) { 752 return 0; 753 } 754 /* unpin the front buffers */ 755 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 756 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 757 struct radeon_object *robj; 758 759 if (rfb == NULL || rfb->obj == NULL) { 760 continue; 761 } 762 robj = rfb->obj->driver_private; 763 if (robj != rdev->fbdev_robj) { 764 radeon_object_unpin(robj); 765 } 766 } 767 /* evict vram memory */ 768 radeon_object_evict_vram(rdev); 769 /* wait for gpu to finish processing current batch */ 770 radeon_fence_wait_last(rdev); 771 772 radeon_save_bios_scratch_regs(rdev); 773 774 if (!rdev->new_init_path) { 775 radeon_cp_disable(rdev); 776 radeon_gart_disable(rdev); 777 rdev->irq.sw_int = false; 778 radeon_irq_set(rdev); 779 } else { 780 radeon_suspend(rdev); 781 } 782 /* evict remaining vram memory */ 783 radeon_object_evict_vram(rdev); 784 785 pci_save_state(dev->pdev); 786 if (state.event == PM_EVENT_SUSPEND) { 787 /* Shut down the device */ 788 pci_disable_device(dev->pdev); 789 pci_set_power_state(dev->pdev, PCI_D3hot); 790 } 791 acquire_console_sem(); 792 fb_set_suspend(rdev->fbdev_info, 1); 793 release_console_sem(); 794 return 0; 795 } 796 797 int radeon_resume_kms(struct drm_device *dev) 798 { 799 struct radeon_device *rdev = dev->dev_private; 800 int r; 801 802 acquire_console_sem(); 803 pci_set_power_state(dev->pdev, PCI_D0); 804 pci_restore_state(dev->pdev); 805 if (pci_enable_device(dev->pdev)) { 806 release_console_sem(); 807 return -1; 808 } 809 pci_set_master(dev->pdev); 810 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 811 if (!rdev->new_init_path) { 812 if (radeon_gpu_reset(rdev)) { 813 /* FIXME: what do we want to do here ? */ 814 } 815 /* post card */ 816 if (rdev->is_atom_bios) { 817 atom_asic_init(rdev->mode_info.atom_context); 818 } else { 819 radeon_combios_asic_init(rdev->ddev); 820 } 821 /* Initialize clocks */ 822 r = radeon_clocks_init(rdev); 823 if (r) { 824 release_console_sem(); 825 return r; 826 } 827 /* Enable IRQ */ 828 rdev->irq.sw_int = true; 829 radeon_irq_set(rdev); 830 /* Initialize GPU Memory Controller */ 831 r = radeon_mc_init(rdev); 832 if (r) { 833 goto out; 834 } 835 r = radeon_gart_enable(rdev); 836 if (r) { 837 goto out; 838 } 839 r = radeon_cp_init(rdev, rdev->cp.ring_size); 840 if (r) { 841 goto out; 842 } 843 } else { 844 radeon_resume(rdev); 845 } 846 out: 847 radeon_restore_bios_scratch_regs(rdev); 848 fb_set_suspend(rdev->fbdev_info, 0); 849 release_console_sem(); 850 851 /* blat the mode back in */ 852 drm_helper_resume_force_mode(dev); 853 return 0; 854 } 855 856 857 /* 858 * Debugfs 859 */ 860 struct radeon_debugfs { 861 struct drm_info_list *files; 862 unsigned num_files; 863 }; 864 static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; 865 static unsigned _radeon_debugfs_count = 0; 866 867 int radeon_debugfs_add_files(struct radeon_device *rdev, 868 struct drm_info_list *files, 869 unsigned nfiles) 870 { 871 unsigned i; 872 873 for (i = 0; i < _radeon_debugfs_count; i++) { 874 if (_radeon_debugfs[i].files == files) { 875 /* Already registered */ 876 return 0; 877 } 878 } 879 if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { 880 DRM_ERROR("Reached maximum number of debugfs files.\n"); 881 DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); 882 return -EINVAL; 883 } 884 _radeon_debugfs[_radeon_debugfs_count].files = files; 885 _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; 886 _radeon_debugfs_count++; 887 #if defined(CONFIG_DEBUG_FS) 888 drm_debugfs_create_files(files, nfiles, 889 rdev->ddev->control->debugfs_root, 890 rdev->ddev->control); 891 drm_debugfs_create_files(files, nfiles, 892 rdev->ddev->primary->debugfs_root, 893 rdev->ddev->primary); 894 #endif 895 return 0; 896 } 897 898 #if defined(CONFIG_DEBUG_FS) 899 int radeon_debugfs_init(struct drm_minor *minor) 900 { 901 return 0; 902 } 903 904 void radeon_debugfs_cleanup(struct drm_minor *minor) 905 { 906 unsigned i; 907 908 for (i = 0; i < _radeon_debugfs_count; i++) { 909 drm_debugfs_remove_files(_radeon_debugfs[i].files, 910 _radeon_debugfs[i].num_files, minor); 911 } 912 } 913 #endif 914