1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <linux/slab.h> 30 #include <drm/drmP.h> 31 #include "radeon.h" 32 #include "radeon_asic.h" 33 #include "rs400d.h" 34 35 /* This files gather functions specifics to : rs400,rs480 */ 36 static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); 37 38 void rs400_gart_adjust_size(struct radeon_device *rdev) 39 { 40 /* Check gart size */ 41 switch (rdev->mc.gtt_size/(1024*1024)) { 42 case 32: 43 case 64: 44 case 128: 45 case 256: 46 case 512: 47 case 1024: 48 case 2048: 49 break; 50 default: 51 DRM_ERROR("Unable to use IGP GART size %uM\n", 52 (unsigned)(rdev->mc.gtt_size >> 20)); 53 DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n"); 54 DRM_ERROR("Forcing to 32M GART size\n"); 55 rdev->mc.gtt_size = 32 * 1024 * 1024; 56 return; 57 } 58 } 59 60 void rs400_gart_tlb_flush(struct radeon_device *rdev) 61 { 62 uint32_t tmp; 63 unsigned int timeout = rdev->usec_timeout; 64 65 WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE); 66 do { 67 tmp = RREG32_MC(RS480_GART_CACHE_CNTRL); 68 if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0) 69 break; 70 DRM_UDELAY(1); 71 timeout--; 72 } while (timeout > 0); 73 WREG32_MC(RS480_GART_CACHE_CNTRL, 0); 74 } 75 76 int rs400_gart_init(struct radeon_device *rdev) 77 { 78 int r; 79 80 if (rdev->gart.table.ram.ptr) { 81 WARN(1, "RS400 GART already initialized.\n"); 82 return 0; 83 } 84 /* Check gart size */ 85 switch(rdev->mc.gtt_size / (1024 * 1024)) { 86 case 32: 87 case 64: 88 case 128: 89 case 256: 90 case 512: 91 case 1024: 92 case 2048: 93 break; 94 default: 95 return -EINVAL; 96 } 97 /* Initialize common gart structure */ 98 r = radeon_gart_init(rdev); 99 if (r) 100 return r; 101 if (rs400_debugfs_pcie_gart_info_init(rdev)) 102 DRM_ERROR("Failed to register debugfs file for RS400 GART !\n"); 103 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 104 return radeon_gart_table_ram_alloc(rdev); 105 } 106 107 int rs400_gart_enable(struct radeon_device *rdev) 108 { 109 uint32_t size_reg; 110 uint32_t tmp; 111 112 radeon_gart_restore(rdev); 113 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 114 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; 115 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); 116 /* Check gart size */ 117 switch(rdev->mc.gtt_size / (1024 * 1024)) { 118 case 32: 119 size_reg = RS480_VA_SIZE_32MB; 120 break; 121 case 64: 122 size_reg = RS480_VA_SIZE_64MB; 123 break; 124 case 128: 125 size_reg = RS480_VA_SIZE_128MB; 126 break; 127 case 256: 128 size_reg = RS480_VA_SIZE_256MB; 129 break; 130 case 512: 131 size_reg = RS480_VA_SIZE_512MB; 132 break; 133 case 1024: 134 size_reg = RS480_VA_SIZE_1GB; 135 break; 136 case 2048: 137 size_reg = RS480_VA_SIZE_2GB; 138 break; 139 default: 140 return -EINVAL; 141 } 142 /* It should be fine to program it to max value */ 143 if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) { 144 WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF); 145 WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0); 146 } else { 147 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF); 148 WREG32(RS480_AGP_BASE_2, 0); 149 } 150 tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16); 151 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16); 152 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 153 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp); 154 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 155 WREG32(RADEON_BUS_CNTL, tmp); 156 } else { 157 WREG32(RADEON_MC_AGP_LOCATION, tmp); 158 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 159 WREG32(RADEON_BUS_CNTL, tmp); 160 } 161 /* Table should be in 32bits address space so ignore bits above. */ 162 tmp = (u32)rdev->gart.table_addr & 0xfffff000; 163 tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4; 164 165 WREG32_MC(RS480_GART_BASE, tmp); 166 /* TODO: more tweaking here */ 167 WREG32_MC(RS480_GART_FEATURE_ID, 168 (RS480_TLB_ENABLE | 169 RS480_GTW_LAC_EN | RS480_1LEVEL_GART)); 170 /* Disable snooping */ 171 WREG32_MC(RS480_AGP_MODE_CNTL, 172 (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS); 173 /* Disable AGP mode */ 174 /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0, 175 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */ 176 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 177 WREG32_MC(RS480_MC_MISC_CNTL, 178 (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); 179 } else { 180 WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); 181 } 182 /* Enable gart */ 183 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); 184 rs400_gart_tlb_flush(rdev); 185 rdev->gart.ready = true; 186 return 0; 187 } 188 189 void rs400_gart_disable(struct radeon_device *rdev) 190 { 191 uint32_t tmp; 192 193 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 194 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; 195 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); 196 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0); 197 } 198 199 void rs400_gart_fini(struct radeon_device *rdev) 200 { 201 radeon_gart_fini(rdev); 202 rs400_gart_disable(rdev); 203 radeon_gart_table_ram_free(rdev); 204 } 205 206 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 207 { 208 uint32_t entry; 209 210 if (i < 0 || i > rdev->gart.num_gpu_pages) { 211 return -EINVAL; 212 } 213 214 entry = (lower_32_bits(addr) & PAGE_MASK) | 215 ((upper_32_bits(addr) & 0xff) << 4) | 216 0xc; 217 entry = cpu_to_le32(entry); 218 rdev->gart.table.ram.ptr[i] = entry; 219 return 0; 220 } 221 222 int rs400_mc_wait_for_idle(struct radeon_device *rdev) 223 { 224 unsigned i; 225 uint32_t tmp; 226 227 for (i = 0; i < rdev->usec_timeout; i++) { 228 /* read MC_STATUS */ 229 tmp = RREG32(0x0150); 230 if (tmp & (1 << 2)) { 231 return 0; 232 } 233 DRM_UDELAY(1); 234 } 235 return -1; 236 } 237 238 void rs400_gpu_init(struct radeon_device *rdev) 239 { 240 /* FIXME: is this correct ? */ 241 r420_pipes_init(rdev); 242 if (rs400_mc_wait_for_idle(rdev)) { 243 printk(KERN_WARNING "rs400: Failed to wait MC idle while " 244 "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); 245 } 246 } 247 248 void rs400_mc_init(struct radeon_device *rdev) 249 { 250 u64 base; 251 252 rs400_gart_adjust_size(rdev); 253 rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev); 254 /* DDR for all card after R300 & IGP */ 255 rdev->mc.vram_is_ddr = true; 256 rdev->mc.vram_width = 128; 257 r100_vram_init_sizes(rdev); 258 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 259 radeon_vram_location(rdev, &rdev->mc, base); 260 rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; 261 radeon_gtt_location(rdev, &rdev->mc); 262 radeon_update_bandwidth_info(rdev); 263 } 264 265 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 266 { 267 uint32_t r; 268 269 WREG32(RS480_NB_MC_INDEX, reg & 0xff); 270 r = RREG32(RS480_NB_MC_DATA); 271 WREG32(RS480_NB_MC_INDEX, 0xff); 272 return r; 273 } 274 275 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 276 { 277 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); 278 WREG32(RS480_NB_MC_DATA, (v)); 279 WREG32(RS480_NB_MC_INDEX, 0xff); 280 } 281 282 #if defined(CONFIG_DEBUG_FS) 283 static int rs400_debugfs_gart_info(struct seq_file *m, void *data) 284 { 285 struct drm_info_node *node = (struct drm_info_node *) m->private; 286 struct drm_device *dev = node->minor->dev; 287 struct radeon_device *rdev = dev->dev_private; 288 uint32_t tmp; 289 290 tmp = RREG32(RADEON_HOST_PATH_CNTL); 291 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 292 tmp = RREG32(RADEON_BUS_CNTL); 293 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 294 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 295 seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp); 296 if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) { 297 tmp = RREG32_MC(RS690_MCCFG_AGP_BASE); 298 seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp); 299 tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2); 300 seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); 301 tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); 302 seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); 303 tmp = RREG32_MC(0x100); 304 seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); 305 tmp = RREG32(0x134); 306 seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); 307 } else { 308 tmp = RREG32(RADEON_AGP_BASE); 309 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 310 tmp = RREG32(RS480_AGP_BASE_2); 311 seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp); 312 tmp = RREG32(RADEON_MC_AGP_LOCATION); 313 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 314 } 315 tmp = RREG32_MC(RS480_GART_BASE); 316 seq_printf(m, "GART_BASE 0x%08x\n", tmp); 317 tmp = RREG32_MC(RS480_GART_FEATURE_ID); 318 seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp); 319 tmp = RREG32_MC(RS480_AGP_MODE_CNTL); 320 seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp); 321 tmp = RREG32_MC(RS480_MC_MISC_CNTL); 322 seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp); 323 tmp = RREG32_MC(0x5F); 324 seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp); 325 tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE); 326 seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp); 327 tmp = RREG32_MC(RS480_GART_CACHE_CNTRL); 328 seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp); 329 tmp = RREG32_MC(0x3B); 330 seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp); 331 tmp = RREG32_MC(0x3C); 332 seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp); 333 tmp = RREG32_MC(0x30); 334 seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp); 335 tmp = RREG32_MC(0x31); 336 seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp); 337 tmp = RREG32_MC(0x32); 338 seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp); 339 tmp = RREG32_MC(0x33); 340 seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp); 341 tmp = RREG32_MC(0x34); 342 seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp); 343 tmp = RREG32_MC(0x35); 344 seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp); 345 tmp = RREG32_MC(0x36); 346 seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp); 347 tmp = RREG32_MC(0x37); 348 seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp); 349 return 0; 350 } 351 352 static struct drm_info_list rs400_gart_info_list[] = { 353 {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL}, 354 }; 355 #endif 356 357 static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 358 { 359 #if defined(CONFIG_DEBUG_FS) 360 return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); 361 #else 362 return 0; 363 #endif 364 } 365 366 void rs400_mc_program(struct radeon_device *rdev) 367 { 368 struct r100_mc_save save; 369 370 /* Stops all mc clients */ 371 r100_mc_stop(rdev, &save); 372 373 /* Wait for mc idle */ 374 if (rs400_mc_wait_for_idle(rdev)) 375 dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); 376 WREG32(R_000148_MC_FB_LOCATION, 377 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 378 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 379 380 r100_mc_resume(rdev, &save); 381 } 382 383 static int rs400_startup(struct radeon_device *rdev) 384 { 385 int r; 386 387 r100_set_common_regs(rdev); 388 389 rs400_mc_program(rdev); 390 /* Resume clock */ 391 r300_clock_startup(rdev); 392 /* Initialize GPU configuration (# pipes, ...) */ 393 rs400_gpu_init(rdev); 394 r100_enable_bm(rdev); 395 /* Initialize GART (initialize after TTM so we can allocate 396 * memory through TTM but finalize after TTM) */ 397 r = rs400_gart_enable(rdev); 398 if (r) 399 return r; 400 /* Enable IRQ */ 401 r100_irq_set(rdev); 402 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 403 /* 1M ring buffer */ 404 r = r100_cp_init(rdev, 1024 * 1024); 405 if (r) { 406 dev_err(rdev->dev, "failled initializing CP (%d).\n", r); 407 return r; 408 } 409 r = r100_wb_init(rdev); 410 if (r) 411 dev_err(rdev->dev, "failled initializing WB (%d).\n", r); 412 r = r100_ib_init(rdev); 413 if (r) { 414 dev_err(rdev->dev, "failled initializing IB (%d).\n", r); 415 return r; 416 } 417 return 0; 418 } 419 420 int rs400_resume(struct radeon_device *rdev) 421 { 422 /* Make sur GART are not working */ 423 rs400_gart_disable(rdev); 424 /* Resume clock before doing reset */ 425 r300_clock_startup(rdev); 426 /* setup MC before calling post tables */ 427 rs400_mc_program(rdev); 428 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 429 if (radeon_asic_reset(rdev)) { 430 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 431 RREG32(R_000E40_RBBM_STATUS), 432 RREG32(R_0007C0_CP_STAT)); 433 } 434 /* post */ 435 radeon_combios_asic_init(rdev->ddev); 436 /* Resume clock after posting */ 437 r300_clock_startup(rdev); 438 /* Initialize surface registers */ 439 radeon_surface_init(rdev); 440 return rs400_startup(rdev); 441 } 442 443 int rs400_suspend(struct radeon_device *rdev) 444 { 445 r100_cp_disable(rdev); 446 r100_wb_disable(rdev); 447 r100_irq_disable(rdev); 448 rs400_gart_disable(rdev); 449 return 0; 450 } 451 452 void rs400_fini(struct radeon_device *rdev) 453 { 454 r100_cp_fini(rdev); 455 r100_wb_fini(rdev); 456 r100_ib_fini(rdev); 457 radeon_gem_fini(rdev); 458 rs400_gart_fini(rdev); 459 radeon_irq_kms_fini(rdev); 460 radeon_fence_driver_fini(rdev); 461 radeon_bo_fini(rdev); 462 radeon_atombios_fini(rdev); 463 kfree(rdev->bios); 464 rdev->bios = NULL; 465 } 466 467 int rs400_init(struct radeon_device *rdev) 468 { 469 int r; 470 471 /* Disable VGA */ 472 r100_vga_render_disable(rdev); 473 /* Initialize scratch registers */ 474 radeon_scratch_init(rdev); 475 /* Initialize surface registers */ 476 radeon_surface_init(rdev); 477 /* TODO: disable VGA need to use VGA request */ 478 /* restore some register to sane defaults */ 479 r100_restore_sanity(rdev); 480 /* BIOS*/ 481 if (!radeon_get_bios(rdev)) { 482 if (ASIC_IS_AVIVO(rdev)) 483 return -EINVAL; 484 } 485 if (rdev->is_atom_bios) { 486 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 487 return -EINVAL; 488 } else { 489 r = radeon_combios_init(rdev); 490 if (r) 491 return r; 492 } 493 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 494 if (radeon_asic_reset(rdev)) { 495 dev_warn(rdev->dev, 496 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 497 RREG32(R_000E40_RBBM_STATUS), 498 RREG32(R_0007C0_CP_STAT)); 499 } 500 /* check if cards are posted or not */ 501 if (radeon_boot_test_post_card(rdev) == false) 502 return -EINVAL; 503 504 /* Initialize clocks */ 505 radeon_get_clock_info(rdev->ddev); 506 /* initialize memory controller */ 507 rs400_mc_init(rdev); 508 /* Fence driver */ 509 r = radeon_fence_driver_init(rdev); 510 if (r) 511 return r; 512 r = radeon_irq_kms_init(rdev); 513 if (r) 514 return r; 515 /* Memory manager */ 516 r = radeon_bo_init(rdev); 517 if (r) 518 return r; 519 r = rs400_gart_init(rdev); 520 if (r) 521 return r; 522 r300_set_reg_safe(rdev); 523 rdev->accel_working = true; 524 r = rs400_startup(rdev); 525 if (r) { 526 /* Somethings want wront with the accel init stop accel */ 527 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 528 r100_cp_fini(rdev); 529 r100_wb_fini(rdev); 530 r100_ib_fini(rdev); 531 rs400_gart_fini(rdev); 532 radeon_irq_kms_fini(rdev); 533 rdev->accel_working = false; 534 } 535 return 0; 536 } 537