1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include "drmP.h" 30 #include "drm.h" 31 #include "radeon_drm.h" 32 #include "radeon_reg.h" 33 #include "radeon.h" 34 #include "r100d.h" 35 #include "rs100d.h" 36 #include "rv200d.h" 37 #include "rv250d.h" 38 39 #include <linux/firmware.h> 40 #include <linux/platform_device.h> 41 42 #include "r100_reg_safe.h" 43 #include "rn50_reg_safe.h" 44 45 /* Firmware Names */ 46 #define FIRMWARE_R100 "radeon/R100_cp.bin" 47 #define FIRMWARE_R200 "radeon/R200_cp.bin" 48 #define FIRMWARE_R300 "radeon/R300_cp.bin" 49 #define FIRMWARE_R420 "radeon/R420_cp.bin" 50 #define FIRMWARE_RS690 "radeon/RS690_cp.bin" 51 #define FIRMWARE_RS600 "radeon/RS600_cp.bin" 52 #define FIRMWARE_R520 "radeon/R520_cp.bin" 53 54 MODULE_FIRMWARE(FIRMWARE_R100); 55 MODULE_FIRMWARE(FIRMWARE_R200); 56 MODULE_FIRMWARE(FIRMWARE_R300); 57 MODULE_FIRMWARE(FIRMWARE_R420); 58 MODULE_FIRMWARE(FIRMWARE_RS690); 59 MODULE_FIRMWARE(FIRMWARE_RS600); 60 MODULE_FIRMWARE(FIRMWARE_R520); 61 62 #include "r100_track.h" 63 64 /* This files gather functions specifics to: 65 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 66 */ 67 68 /* hpd for digital panel detect/disconnect */ 69 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 70 { 71 bool connected = false; 72 73 switch (hpd) { 74 case RADEON_HPD_1: 75 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) 76 connected = true; 77 break; 78 case RADEON_HPD_2: 79 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) 80 connected = true; 81 break; 82 default: 83 break; 84 } 85 return connected; 86 } 87 88 void r100_hpd_set_polarity(struct radeon_device *rdev, 89 enum radeon_hpd_id hpd) 90 { 91 u32 tmp; 92 bool connected = r100_hpd_sense(rdev, hpd); 93 94 switch (hpd) { 95 case RADEON_HPD_1: 96 tmp = RREG32(RADEON_FP_GEN_CNTL); 97 if (connected) 98 tmp &= ~RADEON_FP_DETECT_INT_POL; 99 else 100 tmp |= RADEON_FP_DETECT_INT_POL; 101 WREG32(RADEON_FP_GEN_CNTL, tmp); 102 break; 103 case RADEON_HPD_2: 104 tmp = RREG32(RADEON_FP2_GEN_CNTL); 105 if (connected) 106 tmp &= ~RADEON_FP2_DETECT_INT_POL; 107 else 108 tmp |= RADEON_FP2_DETECT_INT_POL; 109 WREG32(RADEON_FP2_GEN_CNTL, tmp); 110 break; 111 default: 112 break; 113 } 114 } 115 116 void r100_hpd_init(struct radeon_device *rdev) 117 { 118 struct drm_device *dev = rdev->ddev; 119 struct drm_connector *connector; 120 121 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 122 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 123 switch (radeon_connector->hpd.hpd) { 124 case RADEON_HPD_1: 125 rdev->irq.hpd[0] = true; 126 break; 127 case RADEON_HPD_2: 128 rdev->irq.hpd[1] = true; 129 break; 130 default: 131 break; 132 } 133 } 134 if (rdev->irq.installed) 135 r100_irq_set(rdev); 136 } 137 138 void r100_hpd_fini(struct radeon_device *rdev) 139 { 140 struct drm_device *dev = rdev->ddev; 141 struct drm_connector *connector; 142 143 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 144 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 145 switch (radeon_connector->hpd.hpd) { 146 case RADEON_HPD_1: 147 rdev->irq.hpd[0] = false; 148 break; 149 case RADEON_HPD_2: 150 rdev->irq.hpd[1] = false; 151 break; 152 default: 153 break; 154 } 155 } 156 } 157 158 /* 159 * PCI GART 160 */ 161 void r100_pci_gart_tlb_flush(struct radeon_device *rdev) 162 { 163 /* TODO: can we do somethings here ? */ 164 /* It seems hw only cache one entry so we should discard this 165 * entry otherwise if first GPU GART read hit this entry it 166 * could end up in wrong address. */ 167 } 168 169 int r100_pci_gart_init(struct radeon_device *rdev) 170 { 171 int r; 172 173 if (rdev->gart.table.ram.ptr) { 174 WARN(1, "R100 PCI GART already initialized.\n"); 175 return 0; 176 } 177 /* Initialize common gart structure */ 178 r = radeon_gart_init(rdev); 179 if (r) 180 return r; 181 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 182 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 183 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 184 return radeon_gart_table_ram_alloc(rdev); 185 } 186 187 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 188 void r100_enable_bm(struct radeon_device *rdev) 189 { 190 uint32_t tmp; 191 /* Enable bus mastering */ 192 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 193 WREG32(RADEON_BUS_CNTL, tmp); 194 } 195 196 int r100_pci_gart_enable(struct radeon_device *rdev) 197 { 198 uint32_t tmp; 199 200 /* discard memory request outside of configured range */ 201 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 202 WREG32(RADEON_AIC_CNTL, tmp); 203 /* set address range for PCI address translate */ 204 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); 205 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 206 WREG32(RADEON_AIC_HI_ADDR, tmp); 207 /* set PCI GART page-table base address */ 208 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 209 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 210 WREG32(RADEON_AIC_CNTL, tmp); 211 r100_pci_gart_tlb_flush(rdev); 212 rdev->gart.ready = true; 213 return 0; 214 } 215 216 void r100_pci_gart_disable(struct radeon_device *rdev) 217 { 218 uint32_t tmp; 219 220 /* discard memory request outside of configured range */ 221 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 222 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); 223 WREG32(RADEON_AIC_LO_ADDR, 0); 224 WREG32(RADEON_AIC_HI_ADDR, 0); 225 } 226 227 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 228 { 229 if (i < 0 || i > rdev->gart.num_gpu_pages) { 230 return -EINVAL; 231 } 232 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); 233 return 0; 234 } 235 236 void r100_pci_gart_fini(struct radeon_device *rdev) 237 { 238 r100_pci_gart_disable(rdev); 239 radeon_gart_table_ram_free(rdev); 240 radeon_gart_fini(rdev); 241 } 242 243 int r100_irq_set(struct radeon_device *rdev) 244 { 245 uint32_t tmp = 0; 246 247 if (!rdev->irq.installed) { 248 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 249 WREG32(R_000040_GEN_INT_CNTL, 0); 250 return -EINVAL; 251 } 252 if (rdev->irq.sw_int) { 253 tmp |= RADEON_SW_INT_ENABLE; 254 } 255 if (rdev->irq.crtc_vblank_int[0]) { 256 tmp |= RADEON_CRTC_VBLANK_MASK; 257 } 258 if (rdev->irq.crtc_vblank_int[1]) { 259 tmp |= RADEON_CRTC2_VBLANK_MASK; 260 } 261 if (rdev->irq.hpd[0]) { 262 tmp |= RADEON_FP_DETECT_MASK; 263 } 264 if (rdev->irq.hpd[1]) { 265 tmp |= RADEON_FP2_DETECT_MASK; 266 } 267 WREG32(RADEON_GEN_INT_CNTL, tmp); 268 return 0; 269 } 270 271 void r100_irq_disable(struct radeon_device *rdev) 272 { 273 u32 tmp; 274 275 WREG32(R_000040_GEN_INT_CNTL, 0); 276 /* Wait and acknowledge irq */ 277 mdelay(1); 278 tmp = RREG32(R_000044_GEN_INT_STATUS); 279 WREG32(R_000044_GEN_INT_STATUS, tmp); 280 } 281 282 static inline uint32_t r100_irq_ack(struct radeon_device *rdev) 283 { 284 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 285 uint32_t irq_mask = RADEON_SW_INT_TEST | 286 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 287 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 288 289 if (irqs) { 290 WREG32(RADEON_GEN_INT_STATUS, irqs); 291 } 292 return irqs & irq_mask; 293 } 294 295 int r100_irq_process(struct radeon_device *rdev) 296 { 297 uint32_t status, msi_rearm; 298 bool queue_hotplug = false; 299 300 status = r100_irq_ack(rdev); 301 if (!status) { 302 return IRQ_NONE; 303 } 304 if (rdev->shutdown) { 305 return IRQ_NONE; 306 } 307 while (status) { 308 /* SW interrupt */ 309 if (status & RADEON_SW_INT_TEST) { 310 radeon_fence_process(rdev); 311 } 312 /* Vertical blank interrupts */ 313 if (status & RADEON_CRTC_VBLANK_STAT) { 314 drm_handle_vblank(rdev->ddev, 0); 315 } 316 if (status & RADEON_CRTC2_VBLANK_STAT) { 317 drm_handle_vblank(rdev->ddev, 1); 318 } 319 if (status & RADEON_FP_DETECT_STAT) { 320 queue_hotplug = true; 321 DRM_DEBUG("HPD1\n"); 322 } 323 if (status & RADEON_FP2_DETECT_STAT) { 324 queue_hotplug = true; 325 DRM_DEBUG("HPD2\n"); 326 } 327 status = r100_irq_ack(rdev); 328 } 329 if (queue_hotplug) 330 queue_work(rdev->wq, &rdev->hotplug_work); 331 if (rdev->msi_enabled) { 332 switch (rdev->family) { 333 case CHIP_RS400: 334 case CHIP_RS480: 335 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; 336 WREG32(RADEON_AIC_CNTL, msi_rearm); 337 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 338 break; 339 default: 340 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; 341 WREG32(RADEON_MSI_REARM_EN, msi_rearm); 342 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); 343 break; 344 } 345 } 346 return IRQ_HANDLED; 347 } 348 349 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) 350 { 351 if (crtc == 0) 352 return RREG32(RADEON_CRTC_CRNT_FRAME); 353 else 354 return RREG32(RADEON_CRTC2_CRNT_FRAME); 355 } 356 357 /* Who ever call radeon_fence_emit should call ring_lock and ask 358 * for enough space (today caller are ib schedule and buffer move) */ 359 void r100_fence_ring_emit(struct radeon_device *rdev, 360 struct radeon_fence *fence) 361 { 362 /* We have to make sure that caches are flushed before 363 * CPU might read something from VRAM. */ 364 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 365 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); 366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); 368 /* Wait until IDLE & CLEAN */ 369 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 370 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 371 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 372 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl | 373 RADEON_HDP_READ_BUFFER_INVALIDATE); 374 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 375 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl); 376 /* Emit fence sequence & fire IRQ */ 377 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 378 radeon_ring_write(rdev, fence->seq); 379 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0)); 380 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 381 } 382 383 int r100_wb_init(struct radeon_device *rdev) 384 { 385 int r; 386 387 if (rdev->wb.wb_obj == NULL) { 388 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, 389 RADEON_GEM_DOMAIN_GTT, 390 &rdev->wb.wb_obj); 391 if (r) { 392 dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); 393 return r; 394 } 395 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 396 if (unlikely(r != 0)) 397 return r; 398 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 399 &rdev->wb.gpu_addr); 400 if (r) { 401 dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); 402 radeon_bo_unreserve(rdev->wb.wb_obj); 403 return r; 404 } 405 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 406 radeon_bo_unreserve(rdev->wb.wb_obj); 407 if (r) { 408 dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); 409 return r; 410 } 411 } 412 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); 413 WREG32(R_00070C_CP_RB_RPTR_ADDR, 414 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); 415 WREG32(R_000770_SCRATCH_UMSK, 0xff); 416 return 0; 417 } 418 419 void r100_wb_disable(struct radeon_device *rdev) 420 { 421 WREG32(R_000770_SCRATCH_UMSK, 0); 422 } 423 424 void r100_wb_fini(struct radeon_device *rdev) 425 { 426 int r; 427 428 r100_wb_disable(rdev); 429 if (rdev->wb.wb_obj) { 430 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 431 if (unlikely(r != 0)) { 432 dev_err(rdev->dev, "(%d) can't finish WB\n", r); 433 return; 434 } 435 radeon_bo_kunmap(rdev->wb.wb_obj); 436 radeon_bo_unpin(rdev->wb.wb_obj); 437 radeon_bo_unreserve(rdev->wb.wb_obj); 438 radeon_bo_unref(&rdev->wb.wb_obj); 439 rdev->wb.wb = NULL; 440 rdev->wb.wb_obj = NULL; 441 } 442 } 443 444 int r100_copy_blit(struct radeon_device *rdev, 445 uint64_t src_offset, 446 uint64_t dst_offset, 447 unsigned num_pages, 448 struct radeon_fence *fence) 449 { 450 uint32_t cur_pages; 451 uint32_t stride_bytes = PAGE_SIZE; 452 uint32_t pitch; 453 uint32_t stride_pixels; 454 unsigned ndw; 455 int num_loops; 456 int r = 0; 457 458 /* radeon limited to 16k stride */ 459 stride_bytes &= 0x3fff; 460 /* radeon pitch is /64 */ 461 pitch = stride_bytes / 64; 462 stride_pixels = stride_bytes / 4; 463 num_loops = DIV_ROUND_UP(num_pages, 8191); 464 465 /* Ask for enough room for blit + flush + fence */ 466 ndw = 64 + (10 * num_loops); 467 r = radeon_ring_lock(rdev, ndw); 468 if (r) { 469 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 470 return -EINVAL; 471 } 472 while (num_pages > 0) { 473 cur_pages = num_pages; 474 if (cur_pages > 8191) { 475 cur_pages = 8191; 476 } 477 num_pages -= cur_pages; 478 479 /* pages are in Y direction - height 480 page width in X direction - width */ 481 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8)); 482 radeon_ring_write(rdev, 483 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 484 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 485 RADEON_GMC_SRC_CLIPPING | 486 RADEON_GMC_DST_CLIPPING | 487 RADEON_GMC_BRUSH_NONE | 488 (RADEON_COLOR_FORMAT_ARGB8888 << 8) | 489 RADEON_GMC_SRC_DATATYPE_COLOR | 490 RADEON_ROP3_S | 491 RADEON_DP_SRC_SOURCE_MEMORY | 492 RADEON_GMC_CLR_CMP_CNTL_DIS | 493 RADEON_GMC_WR_MSK_DIS); 494 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10)); 495 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10)); 496 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 497 radeon_ring_write(rdev, 0); 498 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 499 radeon_ring_write(rdev, num_pages); 500 radeon_ring_write(rdev, num_pages); 501 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); 502 } 503 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 504 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL); 505 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 506 radeon_ring_write(rdev, 507 RADEON_WAIT_2D_IDLECLEAN | 508 RADEON_WAIT_HOST_IDLECLEAN | 509 RADEON_WAIT_DMA_GUI_IDLE); 510 if (fence) { 511 r = radeon_fence_emit(rdev, fence); 512 } 513 radeon_ring_unlock_commit(rdev); 514 return r; 515 } 516 517 static int r100_cp_wait_for_idle(struct radeon_device *rdev) 518 { 519 unsigned i; 520 u32 tmp; 521 522 for (i = 0; i < rdev->usec_timeout; i++) { 523 tmp = RREG32(R_000E40_RBBM_STATUS); 524 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { 525 return 0; 526 } 527 udelay(1); 528 } 529 return -1; 530 } 531 532 void r100_ring_start(struct radeon_device *rdev) 533 { 534 int r; 535 536 r = radeon_ring_lock(rdev, 2); 537 if (r) { 538 return; 539 } 540 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); 541 radeon_ring_write(rdev, 542 RADEON_ISYNC_ANY2D_IDLE3D | 543 RADEON_ISYNC_ANY3D_IDLE2D | 544 RADEON_ISYNC_WAIT_IDLEGUI | 545 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 546 radeon_ring_unlock_commit(rdev); 547 } 548 549 550 /* Load the microcode for the CP */ 551 static int r100_cp_init_microcode(struct radeon_device *rdev) 552 { 553 struct platform_device *pdev; 554 const char *fw_name = NULL; 555 int err; 556 557 DRM_DEBUG("\n"); 558 559 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); 560 err = IS_ERR(pdev); 561 if (err) { 562 printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); 563 return -EINVAL; 564 } 565 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 566 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 567 (rdev->family == CHIP_RS200)) { 568 DRM_INFO("Loading R100 Microcode\n"); 569 fw_name = FIRMWARE_R100; 570 } else if ((rdev->family == CHIP_R200) || 571 (rdev->family == CHIP_RV250) || 572 (rdev->family == CHIP_RV280) || 573 (rdev->family == CHIP_RS300)) { 574 DRM_INFO("Loading R200 Microcode\n"); 575 fw_name = FIRMWARE_R200; 576 } else if ((rdev->family == CHIP_R300) || 577 (rdev->family == CHIP_R350) || 578 (rdev->family == CHIP_RV350) || 579 (rdev->family == CHIP_RV380) || 580 (rdev->family == CHIP_RS400) || 581 (rdev->family == CHIP_RS480)) { 582 DRM_INFO("Loading R300 Microcode\n"); 583 fw_name = FIRMWARE_R300; 584 } else if ((rdev->family == CHIP_R420) || 585 (rdev->family == CHIP_R423) || 586 (rdev->family == CHIP_RV410)) { 587 DRM_INFO("Loading R400 Microcode\n"); 588 fw_name = FIRMWARE_R420; 589 } else if ((rdev->family == CHIP_RS690) || 590 (rdev->family == CHIP_RS740)) { 591 DRM_INFO("Loading RS690/RS740 Microcode\n"); 592 fw_name = FIRMWARE_RS690; 593 } else if (rdev->family == CHIP_RS600) { 594 DRM_INFO("Loading RS600 Microcode\n"); 595 fw_name = FIRMWARE_RS600; 596 } else if ((rdev->family == CHIP_RV515) || 597 (rdev->family == CHIP_R520) || 598 (rdev->family == CHIP_RV530) || 599 (rdev->family == CHIP_R580) || 600 (rdev->family == CHIP_RV560) || 601 (rdev->family == CHIP_RV570)) { 602 DRM_INFO("Loading R500 Microcode\n"); 603 fw_name = FIRMWARE_R520; 604 } 605 606 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 607 platform_device_unregister(pdev); 608 if (err) { 609 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 610 fw_name); 611 } else if (rdev->me_fw->size % 8) { 612 printk(KERN_ERR 613 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 614 rdev->me_fw->size, fw_name); 615 err = -EINVAL; 616 release_firmware(rdev->me_fw); 617 rdev->me_fw = NULL; 618 } 619 return err; 620 } 621 622 static void r100_cp_load_microcode(struct radeon_device *rdev) 623 { 624 const __be32 *fw_data; 625 int i, size; 626 627 if (r100_gui_wait_for_idle(rdev)) { 628 printk(KERN_WARNING "Failed to wait GUI idle while " 629 "programming pipes. Bad things might happen.\n"); 630 } 631 632 if (rdev->me_fw) { 633 size = rdev->me_fw->size / 4; 634 fw_data = (const __be32 *)&rdev->me_fw->data[0]; 635 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 636 for (i = 0; i < size; i += 2) { 637 WREG32(RADEON_CP_ME_RAM_DATAH, 638 be32_to_cpup(&fw_data[i])); 639 WREG32(RADEON_CP_ME_RAM_DATAL, 640 be32_to_cpup(&fw_data[i + 1])); 641 } 642 } 643 } 644 645 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 646 { 647 unsigned rb_bufsz; 648 unsigned rb_blksz; 649 unsigned max_fetch; 650 unsigned pre_write_timer; 651 unsigned pre_write_limit; 652 unsigned indirect2_start; 653 unsigned indirect1_start; 654 uint32_t tmp; 655 int r; 656 657 if (r100_debugfs_cp_init(rdev)) { 658 DRM_ERROR("Failed to register debugfs file for CP !\n"); 659 } 660 /* Reset CP */ 661 tmp = RREG32(RADEON_CP_CSQ_STAT); 662 if ((tmp & (1 << 31))) { 663 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp); 664 WREG32(RADEON_CP_CSQ_MODE, 0); 665 WREG32(RADEON_CP_CSQ_CNTL, 0); 666 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); 667 tmp = RREG32(RADEON_RBBM_SOFT_RESET); 668 mdelay(2); 669 WREG32(RADEON_RBBM_SOFT_RESET, 0); 670 tmp = RREG32(RADEON_RBBM_SOFT_RESET); 671 mdelay(2); 672 tmp = RREG32(RADEON_CP_CSQ_STAT); 673 if ((tmp & (1 << 31))) { 674 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp); 675 } 676 } else { 677 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); 678 } 679 680 if (!rdev->me_fw) { 681 r = r100_cp_init_microcode(rdev); 682 if (r) { 683 DRM_ERROR("Failed to load firmware!\n"); 684 return r; 685 } 686 } 687 688 /* Align ring size */ 689 rb_bufsz = drm_order(ring_size / 8); 690 ring_size = (1 << (rb_bufsz + 1)) * 4; 691 r100_cp_load_microcode(rdev); 692 r = radeon_ring_init(rdev, ring_size); 693 if (r) { 694 return r; 695 } 696 /* Each time the cp read 1024 bytes (16 dword/quadword) update 697 * the rptr copy in system ram */ 698 rb_blksz = 9; 699 /* cp will read 128bytes at a time (4 dwords) */ 700 max_fetch = 1; 701 rdev->cp.align_mask = 16 - 1; 702 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 703 pre_write_timer = 64; 704 /* Force CP_RB_WPTR write if written more than one time before the 705 * delay expire 706 */ 707 pre_write_limit = 0; 708 /* Setup the cp cache like this (cache size is 96 dwords) : 709 * RING 0 to 15 710 * INDIRECT1 16 to 79 711 * INDIRECT2 80 to 95 712 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 713 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) 714 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 715 * Idea being that most of the gpu cmd will be through indirect1 buffer 716 * so it gets the bigger cache. 717 */ 718 indirect2_start = 80; 719 indirect1_start = 16; 720 /* cp setup */ 721 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 722 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 723 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 724 REG_SET(RADEON_MAX_FETCH, max_fetch) | 725 RADEON_RB_NO_UPDATE); 726 #ifdef __BIG_ENDIAN 727 tmp |= RADEON_BUF_SWAP_32BIT; 728 #endif 729 WREG32(RADEON_CP_RB_CNTL, tmp); 730 731 /* Set ring address */ 732 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); 733 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); 734 /* Force read & write ptr to 0 */ 735 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 736 WREG32(RADEON_CP_RB_RPTR_WR, 0); 737 WREG32(RADEON_CP_RB_WPTR, 0); 738 WREG32(RADEON_CP_RB_CNTL, tmp); 739 udelay(10); 740 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 741 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); 742 /* Set cp mode to bus mastering & enable cp*/ 743 WREG32(RADEON_CP_CSQ_MODE, 744 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 745 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 746 WREG32(0x718, 0); 747 WREG32(0x744, 0x00004D4D); 748 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 749 radeon_ring_start(rdev); 750 r = radeon_ring_test(rdev); 751 if (r) { 752 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 753 return r; 754 } 755 rdev->cp.ready = true; 756 return 0; 757 } 758 759 void r100_cp_fini(struct radeon_device *rdev) 760 { 761 if (r100_cp_wait_for_idle(rdev)) { 762 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); 763 } 764 /* Disable ring */ 765 r100_cp_disable(rdev); 766 radeon_ring_fini(rdev); 767 DRM_INFO("radeon: cp finalized\n"); 768 } 769 770 void r100_cp_disable(struct radeon_device *rdev) 771 { 772 /* Disable ring */ 773 rdev->cp.ready = false; 774 WREG32(RADEON_CP_CSQ_MODE, 0); 775 WREG32(RADEON_CP_CSQ_CNTL, 0); 776 if (r100_gui_wait_for_idle(rdev)) { 777 printk(KERN_WARNING "Failed to wait GUI idle while " 778 "programming pipes. Bad things might happen.\n"); 779 } 780 } 781 782 int r100_cp_reset(struct radeon_device *rdev) 783 { 784 uint32_t tmp; 785 bool reinit_cp; 786 int i; 787 788 reinit_cp = rdev->cp.ready; 789 rdev->cp.ready = false; 790 WREG32(RADEON_CP_CSQ_MODE, 0); 791 WREG32(RADEON_CP_CSQ_CNTL, 0); 792 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); 793 (void)RREG32(RADEON_RBBM_SOFT_RESET); 794 udelay(200); 795 WREG32(RADEON_RBBM_SOFT_RESET, 0); 796 /* Wait to prevent race in RBBM_STATUS */ 797 mdelay(1); 798 for (i = 0; i < rdev->usec_timeout; i++) { 799 tmp = RREG32(RADEON_RBBM_STATUS); 800 if (!(tmp & (1 << 16))) { 801 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n", 802 tmp); 803 if (reinit_cp) { 804 return r100_cp_init(rdev, rdev->cp.ring_size); 805 } 806 return 0; 807 } 808 DRM_UDELAY(1); 809 } 810 tmp = RREG32(RADEON_RBBM_STATUS); 811 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp); 812 return -1; 813 } 814 815 void r100_cp_commit(struct radeon_device *rdev) 816 { 817 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 818 (void)RREG32(RADEON_CP_RB_WPTR); 819 } 820 821 822 /* 823 * CS functions 824 */ 825 int r100_cs_parse_packet0(struct radeon_cs_parser *p, 826 struct radeon_cs_packet *pkt, 827 const unsigned *auth, unsigned n, 828 radeon_packet0_check_t check) 829 { 830 unsigned reg; 831 unsigned i, j, m; 832 unsigned idx; 833 int r; 834 835 idx = pkt->idx + 1; 836 reg = pkt->reg; 837 /* Check that register fall into register range 838 * determined by the number of entry (n) in the 839 * safe register bitmap. 840 */ 841 if (pkt->one_reg_wr) { 842 if ((reg >> 7) > n) { 843 return -EINVAL; 844 } 845 } else { 846 if (((reg + (pkt->count << 2)) >> 7) > n) { 847 return -EINVAL; 848 } 849 } 850 for (i = 0; i <= pkt->count; i++, idx++) { 851 j = (reg >> 7); 852 m = 1 << ((reg >> 2) & 31); 853 if (auth[j] & m) { 854 r = check(p, pkt, idx, reg); 855 if (r) { 856 return r; 857 } 858 } 859 if (pkt->one_reg_wr) { 860 if (!(auth[j] & m)) { 861 break; 862 } 863 } else { 864 reg += 4; 865 } 866 } 867 return 0; 868 } 869 870 void r100_cs_dump_packet(struct radeon_cs_parser *p, 871 struct radeon_cs_packet *pkt) 872 { 873 volatile uint32_t *ib; 874 unsigned i; 875 unsigned idx; 876 877 ib = p->ib->ptr; 878 idx = pkt->idx; 879 for (i = 0; i <= (pkt->count + 1); i++, idx++) { 880 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 881 } 882 } 883 884 /** 885 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet 886 * @parser: parser structure holding parsing context. 887 * @pkt: where to store packet informations 888 * 889 * Assume that chunk_ib_index is properly set. Will return -EINVAL 890 * if packet is bigger than remaining ib size. or if packets is unknown. 891 **/ 892 int r100_cs_packet_parse(struct radeon_cs_parser *p, 893 struct radeon_cs_packet *pkt, 894 unsigned idx) 895 { 896 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 897 uint32_t header; 898 899 if (idx >= ib_chunk->length_dw) { 900 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 901 idx, ib_chunk->length_dw); 902 return -EINVAL; 903 } 904 header = radeon_get_ib_value(p, idx); 905 pkt->idx = idx; 906 pkt->type = CP_PACKET_GET_TYPE(header); 907 pkt->count = CP_PACKET_GET_COUNT(header); 908 switch (pkt->type) { 909 case PACKET_TYPE0: 910 pkt->reg = CP_PACKET0_GET_REG(header); 911 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header); 912 break; 913 case PACKET_TYPE3: 914 pkt->opcode = CP_PACKET3_GET_OPCODE(header); 915 break; 916 case PACKET_TYPE2: 917 pkt->count = -1; 918 break; 919 default: 920 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 921 return -EINVAL; 922 } 923 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 924 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 925 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 926 return -EINVAL; 927 } 928 return 0; 929 } 930 931 /** 932 * r100_cs_packet_next_vline() - parse userspace VLINE packet 933 * @parser: parser structure holding parsing context. 934 * 935 * Userspace sends a special sequence for VLINE waits. 936 * PACKET0 - VLINE_START_END + value 937 * PACKET0 - WAIT_UNTIL +_value 938 * RELOC (P3) - crtc_id in reloc. 939 * 940 * This function parses this and relocates the VLINE START END 941 * and WAIT UNTIL packets to the correct crtc. 942 * It also detects a switched off crtc and nulls out the 943 * wait in that case. 944 */ 945 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 946 { 947 struct drm_mode_object *obj; 948 struct drm_crtc *crtc; 949 struct radeon_crtc *radeon_crtc; 950 struct radeon_cs_packet p3reloc, waitreloc; 951 int crtc_id; 952 int r; 953 uint32_t header, h_idx, reg; 954 volatile uint32_t *ib; 955 956 ib = p->ib->ptr; 957 958 /* parse the wait until */ 959 r = r100_cs_packet_parse(p, &waitreloc, p->idx); 960 if (r) 961 return r; 962 963 /* check its a wait until and only 1 count */ 964 if (waitreloc.reg != RADEON_WAIT_UNTIL || 965 waitreloc.count != 0) { 966 DRM_ERROR("vline wait had illegal wait until segment\n"); 967 r = -EINVAL; 968 return r; 969 } 970 971 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 972 DRM_ERROR("vline wait had illegal wait until\n"); 973 r = -EINVAL; 974 return r; 975 } 976 977 /* jump over the NOP */ 978 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 979 if (r) 980 return r; 981 982 h_idx = p->idx - 2; 983 p->idx += waitreloc.count + 2; 984 p->idx += p3reloc.count + 2; 985 986 header = radeon_get_ib_value(p, h_idx); 987 crtc_id = radeon_get_ib_value(p, h_idx + 5); 988 reg = CP_PACKET0_GET_REG(header); 989 mutex_lock(&p->rdev->ddev->mode_config.mutex); 990 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 991 if (!obj) { 992 DRM_ERROR("cannot find crtc %d\n", crtc_id); 993 r = -EINVAL; 994 goto out; 995 } 996 crtc = obj_to_crtc(obj); 997 radeon_crtc = to_radeon_crtc(crtc); 998 crtc_id = radeon_crtc->crtc_id; 999 1000 if (!crtc->enabled) { 1001 /* if the CRTC isn't enabled - we need to nop out the wait until */ 1002 ib[h_idx + 2] = PACKET2(0); 1003 ib[h_idx + 3] = PACKET2(0); 1004 } else if (crtc_id == 1) { 1005 switch (reg) { 1006 case AVIVO_D1MODE_VLINE_START_END: 1007 header &= ~R300_CP_PACKET0_REG_MASK; 1008 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1009 break; 1010 case RADEON_CRTC_GUI_TRIG_VLINE: 1011 header &= ~R300_CP_PACKET0_REG_MASK; 1012 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1013 break; 1014 default: 1015 DRM_ERROR("unknown crtc reloc\n"); 1016 r = -EINVAL; 1017 goto out; 1018 } 1019 ib[h_idx] = header; 1020 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1021 } 1022 out: 1023 mutex_unlock(&p->rdev->ddev->mode_config.mutex); 1024 return r; 1025 } 1026 1027 /** 1028 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 1029 * @parser: parser structure holding parsing context. 1030 * @data: pointer to relocation data 1031 * @offset_start: starting offset 1032 * @offset_mask: offset mask (to align start offset on) 1033 * @reloc: reloc informations 1034 * 1035 * Check next packet is relocation packet3, do bo validation and compute 1036 * GPU offset using the provided start. 1037 **/ 1038 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, 1039 struct radeon_cs_reloc **cs_reloc) 1040 { 1041 struct radeon_cs_chunk *relocs_chunk; 1042 struct radeon_cs_packet p3reloc; 1043 unsigned idx; 1044 int r; 1045 1046 if (p->chunk_relocs_idx == -1) { 1047 DRM_ERROR("No relocation chunk !\n"); 1048 return -EINVAL; 1049 } 1050 *cs_reloc = NULL; 1051 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 1052 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 1053 if (r) { 1054 return r; 1055 } 1056 p->idx += p3reloc.count + 2; 1057 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 1058 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 1059 p3reloc.idx); 1060 r100_cs_dump_packet(p, &p3reloc); 1061 return -EINVAL; 1062 } 1063 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 1064 if (idx >= relocs_chunk->length_dw) { 1065 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 1066 idx, relocs_chunk->length_dw); 1067 r100_cs_dump_packet(p, &p3reloc); 1068 return -EINVAL; 1069 } 1070 /* FIXME: we assume reloc size is 4 dwords */ 1071 *cs_reloc = p->relocs_ptr[(idx / 4)]; 1072 return 0; 1073 } 1074 1075 static int r100_get_vtx_size(uint32_t vtx_fmt) 1076 { 1077 int vtx_size; 1078 vtx_size = 2; 1079 /* ordered according to bits in spec */ 1080 if (vtx_fmt & RADEON_SE_VTX_FMT_W0) 1081 vtx_size++; 1082 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) 1083 vtx_size += 3; 1084 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) 1085 vtx_size++; 1086 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) 1087 vtx_size++; 1088 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) 1089 vtx_size += 3; 1090 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) 1091 vtx_size++; 1092 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) 1093 vtx_size++; 1094 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) 1095 vtx_size += 2; 1096 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) 1097 vtx_size += 2; 1098 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) 1099 vtx_size++; 1100 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) 1101 vtx_size += 2; 1102 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) 1103 vtx_size++; 1104 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) 1105 vtx_size += 2; 1106 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) 1107 vtx_size++; 1108 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) 1109 vtx_size++; 1110 /* blend weight */ 1111 if (vtx_fmt & (0x7 << 15)) 1112 vtx_size += (vtx_fmt >> 15) & 0x7; 1113 if (vtx_fmt & RADEON_SE_VTX_FMT_N0) 1114 vtx_size += 3; 1115 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) 1116 vtx_size += 2; 1117 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) 1118 vtx_size++; 1119 if (vtx_fmt & RADEON_SE_VTX_FMT_W1) 1120 vtx_size++; 1121 if (vtx_fmt & RADEON_SE_VTX_FMT_N1) 1122 vtx_size++; 1123 if (vtx_fmt & RADEON_SE_VTX_FMT_Z) 1124 vtx_size++; 1125 return vtx_size; 1126 } 1127 1128 static int r100_packet0_check(struct radeon_cs_parser *p, 1129 struct radeon_cs_packet *pkt, 1130 unsigned idx, unsigned reg) 1131 { 1132 struct radeon_cs_reloc *reloc; 1133 struct r100_cs_track *track; 1134 volatile uint32_t *ib; 1135 uint32_t tmp; 1136 int r; 1137 int i, face; 1138 u32 tile_flags = 0; 1139 u32 idx_value; 1140 1141 ib = p->ib->ptr; 1142 track = (struct r100_cs_track *)p->track; 1143 1144 idx_value = radeon_get_ib_value(p, idx); 1145 1146 switch (reg) { 1147 case RADEON_CRTC_GUI_TRIG_VLINE: 1148 r = r100_cs_packet_parse_vline(p); 1149 if (r) { 1150 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1151 idx, reg); 1152 r100_cs_dump_packet(p, pkt); 1153 return r; 1154 } 1155 break; 1156 /* FIXME: only allow PACKET3 blit? easier to check for out of 1157 * range access */ 1158 case RADEON_DST_PITCH_OFFSET: 1159 case RADEON_SRC_PITCH_OFFSET: 1160 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 1161 if (r) 1162 return r; 1163 break; 1164 case RADEON_RB3D_DEPTHOFFSET: 1165 r = r100_cs_packet_next_reloc(p, &reloc); 1166 if (r) { 1167 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1168 idx, reg); 1169 r100_cs_dump_packet(p, pkt); 1170 return r; 1171 } 1172 track->zb.robj = reloc->robj; 1173 track->zb.offset = idx_value; 1174 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1175 break; 1176 case RADEON_RB3D_COLOROFFSET: 1177 r = r100_cs_packet_next_reloc(p, &reloc); 1178 if (r) { 1179 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1180 idx, reg); 1181 r100_cs_dump_packet(p, pkt); 1182 return r; 1183 } 1184 track->cb[0].robj = reloc->robj; 1185 track->cb[0].offset = idx_value; 1186 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1187 break; 1188 case RADEON_PP_TXOFFSET_0: 1189 case RADEON_PP_TXOFFSET_1: 1190 case RADEON_PP_TXOFFSET_2: 1191 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1192 r = r100_cs_packet_next_reloc(p, &reloc); 1193 if (r) { 1194 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1195 idx, reg); 1196 r100_cs_dump_packet(p, pkt); 1197 return r; 1198 } 1199 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1200 track->textures[i].robj = reloc->robj; 1201 break; 1202 case RADEON_PP_CUBIC_OFFSET_T0_0: 1203 case RADEON_PP_CUBIC_OFFSET_T0_1: 1204 case RADEON_PP_CUBIC_OFFSET_T0_2: 1205 case RADEON_PP_CUBIC_OFFSET_T0_3: 1206 case RADEON_PP_CUBIC_OFFSET_T0_4: 1207 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1208 r = r100_cs_packet_next_reloc(p, &reloc); 1209 if (r) { 1210 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1211 idx, reg); 1212 r100_cs_dump_packet(p, pkt); 1213 return r; 1214 } 1215 track->textures[0].cube_info[i].offset = idx_value; 1216 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1217 track->textures[0].cube_info[i].robj = reloc->robj; 1218 break; 1219 case RADEON_PP_CUBIC_OFFSET_T1_0: 1220 case RADEON_PP_CUBIC_OFFSET_T1_1: 1221 case RADEON_PP_CUBIC_OFFSET_T1_2: 1222 case RADEON_PP_CUBIC_OFFSET_T1_3: 1223 case RADEON_PP_CUBIC_OFFSET_T1_4: 1224 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1225 r = r100_cs_packet_next_reloc(p, &reloc); 1226 if (r) { 1227 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1228 idx, reg); 1229 r100_cs_dump_packet(p, pkt); 1230 return r; 1231 } 1232 track->textures[1].cube_info[i].offset = idx_value; 1233 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1234 track->textures[1].cube_info[i].robj = reloc->robj; 1235 break; 1236 case RADEON_PP_CUBIC_OFFSET_T2_0: 1237 case RADEON_PP_CUBIC_OFFSET_T2_1: 1238 case RADEON_PP_CUBIC_OFFSET_T2_2: 1239 case RADEON_PP_CUBIC_OFFSET_T2_3: 1240 case RADEON_PP_CUBIC_OFFSET_T2_4: 1241 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1242 r = r100_cs_packet_next_reloc(p, &reloc); 1243 if (r) { 1244 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1245 idx, reg); 1246 r100_cs_dump_packet(p, pkt); 1247 return r; 1248 } 1249 track->textures[2].cube_info[i].offset = idx_value; 1250 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1251 track->textures[2].cube_info[i].robj = reloc->robj; 1252 break; 1253 case RADEON_RE_WIDTH_HEIGHT: 1254 track->maxy = ((idx_value >> 16) & 0x7FF); 1255 break; 1256 case RADEON_RB3D_COLORPITCH: 1257 r = r100_cs_packet_next_reloc(p, &reloc); 1258 if (r) { 1259 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1260 idx, reg); 1261 r100_cs_dump_packet(p, pkt); 1262 return r; 1263 } 1264 1265 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1266 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1267 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1268 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1269 1270 tmp = idx_value & ~(0x7 << 16); 1271 tmp |= tile_flags; 1272 ib[idx] = tmp; 1273 1274 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1275 break; 1276 case RADEON_RB3D_DEPTHPITCH: 1277 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1278 break; 1279 case RADEON_RB3D_CNTL: 1280 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1281 case 7: 1282 case 8: 1283 case 9: 1284 case 11: 1285 case 12: 1286 track->cb[0].cpp = 1; 1287 break; 1288 case 3: 1289 case 4: 1290 case 15: 1291 track->cb[0].cpp = 2; 1292 break; 1293 case 6: 1294 track->cb[0].cpp = 4; 1295 break; 1296 default: 1297 DRM_ERROR("Invalid color buffer format (%d) !\n", 1298 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1299 return -EINVAL; 1300 } 1301 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1302 break; 1303 case RADEON_RB3D_ZSTENCILCNTL: 1304 switch (idx_value & 0xf) { 1305 case 0: 1306 track->zb.cpp = 2; 1307 break; 1308 case 2: 1309 case 3: 1310 case 4: 1311 case 5: 1312 case 9: 1313 case 11: 1314 track->zb.cpp = 4; 1315 break; 1316 default: 1317 break; 1318 } 1319 break; 1320 case RADEON_RB3D_ZPASS_ADDR: 1321 r = r100_cs_packet_next_reloc(p, &reloc); 1322 if (r) { 1323 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1324 idx, reg); 1325 r100_cs_dump_packet(p, pkt); 1326 return r; 1327 } 1328 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1329 break; 1330 case RADEON_PP_CNTL: 1331 { 1332 uint32_t temp = idx_value >> 4; 1333 for (i = 0; i < track->num_texture; i++) 1334 track->textures[i].enabled = !!(temp & (1 << i)); 1335 } 1336 break; 1337 case RADEON_SE_VF_CNTL: 1338 track->vap_vf_cntl = idx_value; 1339 break; 1340 case RADEON_SE_VTX_FMT: 1341 track->vtx_size = r100_get_vtx_size(idx_value); 1342 break; 1343 case RADEON_PP_TEX_SIZE_0: 1344 case RADEON_PP_TEX_SIZE_1: 1345 case RADEON_PP_TEX_SIZE_2: 1346 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1347 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1348 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1349 break; 1350 case RADEON_PP_TEX_PITCH_0: 1351 case RADEON_PP_TEX_PITCH_1: 1352 case RADEON_PP_TEX_PITCH_2: 1353 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1354 track->textures[i].pitch = idx_value + 32; 1355 break; 1356 case RADEON_PP_TXFILTER_0: 1357 case RADEON_PP_TXFILTER_1: 1358 case RADEON_PP_TXFILTER_2: 1359 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1360 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) 1361 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1362 tmp = (idx_value >> 23) & 0x7; 1363 if (tmp == 2 || tmp == 6) 1364 track->textures[i].roundup_w = false; 1365 tmp = (idx_value >> 27) & 0x7; 1366 if (tmp == 2 || tmp == 6) 1367 track->textures[i].roundup_h = false; 1368 break; 1369 case RADEON_PP_TXFORMAT_0: 1370 case RADEON_PP_TXFORMAT_1: 1371 case RADEON_PP_TXFORMAT_2: 1372 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1373 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1374 track->textures[i].use_pitch = 1; 1375 } else { 1376 track->textures[i].use_pitch = 0; 1377 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1378 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1379 } 1380 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1381 track->textures[i].tex_coord_type = 2; 1382 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { 1383 case RADEON_TXFORMAT_I8: 1384 case RADEON_TXFORMAT_RGB332: 1385 case RADEON_TXFORMAT_Y8: 1386 track->textures[i].cpp = 1; 1387 break; 1388 case RADEON_TXFORMAT_AI88: 1389 case RADEON_TXFORMAT_ARGB1555: 1390 case RADEON_TXFORMAT_RGB565: 1391 case RADEON_TXFORMAT_ARGB4444: 1392 case RADEON_TXFORMAT_VYUY422: 1393 case RADEON_TXFORMAT_YVYU422: 1394 case RADEON_TXFORMAT_SHADOW16: 1395 case RADEON_TXFORMAT_LDUDV655: 1396 case RADEON_TXFORMAT_DUDV88: 1397 track->textures[i].cpp = 2; 1398 break; 1399 case RADEON_TXFORMAT_ARGB8888: 1400 case RADEON_TXFORMAT_RGBA8888: 1401 case RADEON_TXFORMAT_SHADOW32: 1402 case RADEON_TXFORMAT_LDUDUV8888: 1403 track->textures[i].cpp = 4; 1404 break; 1405 case RADEON_TXFORMAT_DXT1: 1406 track->textures[i].cpp = 1; 1407 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 1408 break; 1409 case RADEON_TXFORMAT_DXT23: 1410 case RADEON_TXFORMAT_DXT45: 1411 track->textures[i].cpp = 1; 1412 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 1413 break; 1414 } 1415 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1416 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1417 break; 1418 case RADEON_PP_CUBIC_FACES_0: 1419 case RADEON_PP_CUBIC_FACES_1: 1420 case RADEON_PP_CUBIC_FACES_2: 1421 tmp = idx_value; 1422 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1423 for (face = 0; face < 4; face++) { 1424 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1425 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1426 } 1427 break; 1428 default: 1429 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1430 reg, idx); 1431 return -EINVAL; 1432 } 1433 return 0; 1434 } 1435 1436 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1437 struct radeon_cs_packet *pkt, 1438 struct radeon_bo *robj) 1439 { 1440 unsigned idx; 1441 u32 value; 1442 idx = pkt->idx + 1; 1443 value = radeon_get_ib_value(p, idx + 2); 1444 if ((value + 1) > radeon_bo_size(robj)) { 1445 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1446 "(need %u have %lu) !\n", 1447 value + 1, 1448 radeon_bo_size(robj)); 1449 return -EINVAL; 1450 } 1451 return 0; 1452 } 1453 1454 static int r100_packet3_check(struct radeon_cs_parser *p, 1455 struct radeon_cs_packet *pkt) 1456 { 1457 struct radeon_cs_reloc *reloc; 1458 struct r100_cs_track *track; 1459 unsigned idx; 1460 volatile uint32_t *ib; 1461 int r; 1462 1463 ib = p->ib->ptr; 1464 idx = pkt->idx + 1; 1465 track = (struct r100_cs_track *)p->track; 1466 switch (pkt->opcode) { 1467 case PACKET3_3D_LOAD_VBPNTR: 1468 r = r100_packet3_load_vbpntr(p, pkt, idx); 1469 if (r) 1470 return r; 1471 break; 1472 case PACKET3_INDX_BUFFER: 1473 r = r100_cs_packet_next_reloc(p, &reloc); 1474 if (r) { 1475 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1476 r100_cs_dump_packet(p, pkt); 1477 return r; 1478 } 1479 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); 1480 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1481 if (r) { 1482 return r; 1483 } 1484 break; 1485 case 0x23: 1486 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1487 r = r100_cs_packet_next_reloc(p, &reloc); 1488 if (r) { 1489 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1490 r100_cs_dump_packet(p, pkt); 1491 return r; 1492 } 1493 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); 1494 track->num_arrays = 1; 1495 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 1496 1497 track->arrays[0].robj = reloc->robj; 1498 track->arrays[0].esize = track->vtx_size; 1499 1500 track->max_indx = radeon_get_ib_value(p, idx+1); 1501 1502 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); 1503 track->immd_dwords = pkt->count - 1; 1504 r = r100_cs_track_check(p->rdev, track); 1505 if (r) 1506 return r; 1507 break; 1508 case PACKET3_3D_DRAW_IMMD: 1509 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1510 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1511 return -EINVAL; 1512 } 1513 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); 1514 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1515 track->immd_dwords = pkt->count - 1; 1516 r = r100_cs_track_check(p->rdev, track); 1517 if (r) 1518 return r; 1519 break; 1520 /* triggers drawing using in-packet vertex data */ 1521 case PACKET3_3D_DRAW_IMMD_2: 1522 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1523 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1524 return -EINVAL; 1525 } 1526 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1527 track->immd_dwords = pkt->count; 1528 r = r100_cs_track_check(p->rdev, track); 1529 if (r) 1530 return r; 1531 break; 1532 /* triggers drawing using in-packet vertex data */ 1533 case PACKET3_3D_DRAW_VBUF_2: 1534 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1535 r = r100_cs_track_check(p->rdev, track); 1536 if (r) 1537 return r; 1538 break; 1539 /* triggers drawing of vertex buffers setup elsewhere */ 1540 case PACKET3_3D_DRAW_INDX_2: 1541 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1542 r = r100_cs_track_check(p->rdev, track); 1543 if (r) 1544 return r; 1545 break; 1546 /* triggers drawing using indices to vertex buffer */ 1547 case PACKET3_3D_DRAW_VBUF: 1548 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1549 r = r100_cs_track_check(p->rdev, track); 1550 if (r) 1551 return r; 1552 break; 1553 /* triggers drawing of vertex buffers setup elsewhere */ 1554 case PACKET3_3D_DRAW_INDX: 1555 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1556 r = r100_cs_track_check(p->rdev, track); 1557 if (r) 1558 return r; 1559 break; 1560 /* triggers drawing using indices to vertex buffer */ 1561 case PACKET3_NOP: 1562 break; 1563 default: 1564 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 1565 return -EINVAL; 1566 } 1567 return 0; 1568 } 1569 1570 int r100_cs_parse(struct radeon_cs_parser *p) 1571 { 1572 struct radeon_cs_packet pkt; 1573 struct r100_cs_track *track; 1574 int r; 1575 1576 track = kzalloc(sizeof(*track), GFP_KERNEL); 1577 r100_cs_track_clear(p->rdev, track); 1578 p->track = track; 1579 do { 1580 r = r100_cs_packet_parse(p, &pkt, p->idx); 1581 if (r) { 1582 return r; 1583 } 1584 p->idx += pkt.count + 2; 1585 switch (pkt.type) { 1586 case PACKET_TYPE0: 1587 if (p->rdev->family >= CHIP_R200) 1588 r = r100_cs_parse_packet0(p, &pkt, 1589 p->rdev->config.r100.reg_safe_bm, 1590 p->rdev->config.r100.reg_safe_bm_size, 1591 &r200_packet0_check); 1592 else 1593 r = r100_cs_parse_packet0(p, &pkt, 1594 p->rdev->config.r100.reg_safe_bm, 1595 p->rdev->config.r100.reg_safe_bm_size, 1596 &r100_packet0_check); 1597 break; 1598 case PACKET_TYPE2: 1599 break; 1600 case PACKET_TYPE3: 1601 r = r100_packet3_check(p, &pkt); 1602 break; 1603 default: 1604 DRM_ERROR("Unknown packet type %d !\n", 1605 pkt.type); 1606 return -EINVAL; 1607 } 1608 if (r) { 1609 return r; 1610 } 1611 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1612 return 0; 1613 } 1614 1615 1616 /* 1617 * Global GPU functions 1618 */ 1619 void r100_errata(struct radeon_device *rdev) 1620 { 1621 rdev->pll_errata = 0; 1622 1623 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { 1624 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; 1625 } 1626 1627 if (rdev->family == CHIP_RV100 || 1628 rdev->family == CHIP_RS100 || 1629 rdev->family == CHIP_RS200) { 1630 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; 1631 } 1632 } 1633 1634 /* Wait for vertical sync on primary CRTC */ 1635 void r100_gpu_wait_for_vsync(struct radeon_device *rdev) 1636 { 1637 uint32_t crtc_gen_cntl, tmp; 1638 int i; 1639 1640 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); 1641 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) || 1642 !(crtc_gen_cntl & RADEON_CRTC_EN)) { 1643 return; 1644 } 1645 /* Clear the CRTC_VBLANK_SAVE bit */ 1646 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR); 1647 for (i = 0; i < rdev->usec_timeout; i++) { 1648 tmp = RREG32(RADEON_CRTC_STATUS); 1649 if (tmp & RADEON_CRTC_VBLANK_SAVE) { 1650 return; 1651 } 1652 DRM_UDELAY(1); 1653 } 1654 } 1655 1656 /* Wait for vertical sync on secondary CRTC */ 1657 void r100_gpu_wait_for_vsync2(struct radeon_device *rdev) 1658 { 1659 uint32_t crtc2_gen_cntl, tmp; 1660 int i; 1661 1662 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); 1663 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) || 1664 !(crtc2_gen_cntl & RADEON_CRTC2_EN)) 1665 return; 1666 1667 /* Clear the CRTC_VBLANK_SAVE bit */ 1668 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR); 1669 for (i = 0; i < rdev->usec_timeout; i++) { 1670 tmp = RREG32(RADEON_CRTC2_STATUS); 1671 if (tmp & RADEON_CRTC2_VBLANK_SAVE) { 1672 return; 1673 } 1674 DRM_UDELAY(1); 1675 } 1676 } 1677 1678 int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) 1679 { 1680 unsigned i; 1681 uint32_t tmp; 1682 1683 for (i = 0; i < rdev->usec_timeout; i++) { 1684 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; 1685 if (tmp >= n) { 1686 return 0; 1687 } 1688 DRM_UDELAY(1); 1689 } 1690 return -1; 1691 } 1692 1693 int r100_gui_wait_for_idle(struct radeon_device *rdev) 1694 { 1695 unsigned i; 1696 uint32_t tmp; 1697 1698 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { 1699 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" 1700 " Bad things might happen.\n"); 1701 } 1702 for (i = 0; i < rdev->usec_timeout; i++) { 1703 tmp = RREG32(RADEON_RBBM_STATUS); 1704 if (!(tmp & (1 << 31))) { 1705 return 0; 1706 } 1707 DRM_UDELAY(1); 1708 } 1709 return -1; 1710 } 1711 1712 int r100_mc_wait_for_idle(struct radeon_device *rdev) 1713 { 1714 unsigned i; 1715 uint32_t tmp; 1716 1717 for (i = 0; i < rdev->usec_timeout; i++) { 1718 /* read MC_STATUS */ 1719 tmp = RREG32(0x0150); 1720 if (tmp & (1 << 2)) { 1721 return 0; 1722 } 1723 DRM_UDELAY(1); 1724 } 1725 return -1; 1726 } 1727 1728 void r100_gpu_init(struct radeon_device *rdev) 1729 { 1730 /* TODO: anythings to do here ? pipes ? */ 1731 r100_hdp_reset(rdev); 1732 } 1733 1734 void r100_hdp_reset(struct radeon_device *rdev) 1735 { 1736 uint32_t tmp; 1737 1738 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; 1739 tmp |= (7 << 28); 1740 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); 1741 (void)RREG32(RADEON_HOST_PATH_CNTL); 1742 udelay(200); 1743 WREG32(RADEON_RBBM_SOFT_RESET, 0); 1744 WREG32(RADEON_HOST_PATH_CNTL, tmp); 1745 (void)RREG32(RADEON_HOST_PATH_CNTL); 1746 } 1747 1748 int r100_rb2d_reset(struct radeon_device *rdev) 1749 { 1750 uint32_t tmp; 1751 int i; 1752 1753 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); 1754 (void)RREG32(RADEON_RBBM_SOFT_RESET); 1755 udelay(200); 1756 WREG32(RADEON_RBBM_SOFT_RESET, 0); 1757 /* Wait to prevent race in RBBM_STATUS */ 1758 mdelay(1); 1759 for (i = 0; i < rdev->usec_timeout; i++) { 1760 tmp = RREG32(RADEON_RBBM_STATUS); 1761 if (!(tmp & (1 << 26))) { 1762 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n", 1763 tmp); 1764 return 0; 1765 } 1766 DRM_UDELAY(1); 1767 } 1768 tmp = RREG32(RADEON_RBBM_STATUS); 1769 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp); 1770 return -1; 1771 } 1772 1773 int r100_gpu_reset(struct radeon_device *rdev) 1774 { 1775 uint32_t status; 1776 1777 /* reset order likely matter */ 1778 status = RREG32(RADEON_RBBM_STATUS); 1779 /* reset HDP */ 1780 r100_hdp_reset(rdev); 1781 /* reset rb2d */ 1782 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { 1783 r100_rb2d_reset(rdev); 1784 } 1785 /* TODO: reset 3D engine */ 1786 /* reset CP */ 1787 status = RREG32(RADEON_RBBM_STATUS); 1788 if (status & (1 << 16)) { 1789 r100_cp_reset(rdev); 1790 } 1791 /* Check if GPU is idle */ 1792 status = RREG32(RADEON_RBBM_STATUS); 1793 if (status & (1 << 31)) { 1794 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 1795 return -1; 1796 } 1797 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); 1798 return 0; 1799 } 1800 1801 void r100_set_common_regs(struct radeon_device *rdev) 1802 { 1803 struct drm_device *dev = rdev->ddev; 1804 bool force_dac2 = false; 1805 1806 /* set these so they don't interfere with anything */ 1807 WREG32(RADEON_OV0_SCALE_CNTL, 0); 1808 WREG32(RADEON_SUBPIC_CNTL, 0); 1809 WREG32(RADEON_VIPH_CONTROL, 0); 1810 WREG32(RADEON_I2C_CNTL_1, 0); 1811 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 1812 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 1813 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 1814 1815 /* always set up dac2 on rn50 and some rv100 as lots 1816 * of servers seem to wire it up to a VGA port but 1817 * don't report it in the bios connector 1818 * table. 1819 */ 1820 switch (dev->pdev->device) { 1821 /* RN50 */ 1822 case 0x515e: 1823 case 0x5969: 1824 force_dac2 = true; 1825 break; 1826 /* RV100*/ 1827 case 0x5159: 1828 case 0x515a: 1829 /* DELL triple head servers */ 1830 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && 1831 ((dev->pdev->subsystem_device == 0x016c) || 1832 (dev->pdev->subsystem_device == 0x016d) || 1833 (dev->pdev->subsystem_device == 0x016e) || 1834 (dev->pdev->subsystem_device == 0x016f) || 1835 (dev->pdev->subsystem_device == 0x0170) || 1836 (dev->pdev->subsystem_device == 0x017d) || 1837 (dev->pdev->subsystem_device == 0x017e) || 1838 (dev->pdev->subsystem_device == 0x0183) || 1839 (dev->pdev->subsystem_device == 0x018a) || 1840 (dev->pdev->subsystem_device == 0x019a))) 1841 force_dac2 = true; 1842 break; 1843 } 1844 1845 if (force_dac2) { 1846 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 1847 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 1848 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 1849 1850 /* For CRT on DAC2, don't turn it on if BIOS didn't 1851 enable it, even it's detected. 1852 */ 1853 1854 /* force it to crtc0 */ 1855 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; 1856 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; 1857 disp_hw_debug |= RADEON_CRT2_DISP1_SEL; 1858 1859 /* set up the TV DAC */ 1860 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | 1861 RADEON_TV_DAC_STD_MASK | 1862 RADEON_TV_DAC_RDACPD | 1863 RADEON_TV_DAC_GDACPD | 1864 RADEON_TV_DAC_BDACPD | 1865 RADEON_TV_DAC_BGADJ_MASK | 1866 RADEON_TV_DAC_DACADJ_MASK); 1867 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 1868 RADEON_TV_DAC_NHOLD | 1869 RADEON_TV_DAC_STD_PS2 | 1870 (0x58 << 16)); 1871 1872 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 1873 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 1874 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 1875 } 1876 } 1877 1878 /* 1879 * VRAM info 1880 */ 1881 static void r100_vram_get_type(struct radeon_device *rdev) 1882 { 1883 uint32_t tmp; 1884 1885 rdev->mc.vram_is_ddr = false; 1886 if (rdev->flags & RADEON_IS_IGP) 1887 rdev->mc.vram_is_ddr = true; 1888 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) 1889 rdev->mc.vram_is_ddr = true; 1890 if ((rdev->family == CHIP_RV100) || 1891 (rdev->family == CHIP_RS100) || 1892 (rdev->family == CHIP_RS200)) { 1893 tmp = RREG32(RADEON_MEM_CNTL); 1894 if (tmp & RV100_HALF_MODE) { 1895 rdev->mc.vram_width = 32; 1896 } else { 1897 rdev->mc.vram_width = 64; 1898 } 1899 if (rdev->flags & RADEON_SINGLE_CRTC) { 1900 rdev->mc.vram_width /= 4; 1901 rdev->mc.vram_is_ddr = true; 1902 } 1903 } else if (rdev->family <= CHIP_RV280) { 1904 tmp = RREG32(RADEON_MEM_CNTL); 1905 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { 1906 rdev->mc.vram_width = 128; 1907 } else { 1908 rdev->mc.vram_width = 64; 1909 } 1910 } else { 1911 /* newer IGPs */ 1912 rdev->mc.vram_width = 128; 1913 } 1914 } 1915 1916 static u32 r100_get_accessible_vram(struct radeon_device *rdev) 1917 { 1918 u32 aper_size; 1919 u8 byte; 1920 1921 aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 1922 1923 /* Set HDP_APER_CNTL only on cards that are known not to be broken, 1924 * that is has the 2nd generation multifunction PCI interface 1925 */ 1926 if (rdev->family == CHIP_RV280 || 1927 rdev->family >= CHIP_RV350) { 1928 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, 1929 ~RADEON_HDP_APER_CNTL); 1930 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); 1931 return aper_size * 2; 1932 } 1933 1934 /* Older cards have all sorts of funny issues to deal with. First 1935 * check if it's a multifunction card by reading the PCI config 1936 * header type... Limit those to one aperture size 1937 */ 1938 pci_read_config_byte(rdev->pdev, 0xe, &byte); 1939 if (byte & 0x80) { 1940 DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); 1941 DRM_INFO("Limiting VRAM to one aperture\n"); 1942 return aper_size; 1943 } 1944 1945 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS 1946 * have set it up. We don't write this as it's broken on some ASICs but 1947 * we expect the BIOS to have done the right thing (might be too optimistic...) 1948 */ 1949 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) 1950 return aper_size * 2; 1951 return aper_size; 1952 } 1953 1954 void r100_vram_init_sizes(struct radeon_device *rdev) 1955 { 1956 u64 config_aper_size; 1957 u32 accessible; 1958 1959 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 1960 1961 if (rdev->flags & RADEON_IS_IGP) { 1962 uint32_t tom; 1963 /* read NB_TOM to get the amount of ram stolen for the GPU */ 1964 tom = RREG32(RADEON_NB_TOM); 1965 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 1966 /* for IGPs we need to keep VRAM where it was put by the BIOS */ 1967 rdev->mc.vram_location = (tom & 0xffff) << 16; 1968 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 1969 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 1970 } else { 1971 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 1972 /* Some production boards of m6 will report 0 1973 * if it's 8 MB 1974 */ 1975 if (rdev->mc.real_vram_size == 0) { 1976 rdev->mc.real_vram_size = 8192 * 1024; 1977 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 1978 } 1979 /* let driver place VRAM */ 1980 rdev->mc.vram_location = 0xFFFFFFFFUL; 1981 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 1982 * Novell bug 204882 + along with lots of ubuntu ones */ 1983 if (config_aper_size > rdev->mc.real_vram_size) 1984 rdev->mc.mc_vram_size = config_aper_size; 1985 else 1986 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 1987 } 1988 1989 /* work out accessible VRAM */ 1990 accessible = r100_get_accessible_vram(rdev); 1991 1992 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 1993 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 1994 1995 if (accessible > rdev->mc.aper_size) 1996 accessible = rdev->mc.aper_size; 1997 1998 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 1999 rdev->mc.mc_vram_size = rdev->mc.aper_size; 2000 2001 if (rdev->mc.real_vram_size > rdev->mc.aper_size) 2002 rdev->mc.real_vram_size = rdev->mc.aper_size; 2003 } 2004 2005 void r100_vga_set_state(struct radeon_device *rdev, bool state) 2006 { 2007 uint32_t temp; 2008 2009 temp = RREG32(RADEON_CONFIG_CNTL); 2010 if (state == false) { 2011 temp &= ~(1<<8); 2012 temp |= (1<<9); 2013 } else { 2014 temp &= ~(1<<9); 2015 } 2016 WREG32(RADEON_CONFIG_CNTL, temp); 2017 } 2018 2019 void r100_vram_info(struct radeon_device *rdev) 2020 { 2021 r100_vram_get_type(rdev); 2022 2023 r100_vram_init_sizes(rdev); 2024 } 2025 2026 2027 /* 2028 * Indirect registers accessor 2029 */ 2030 void r100_pll_errata_after_index(struct radeon_device *rdev) 2031 { 2032 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) { 2033 return; 2034 } 2035 (void)RREG32(RADEON_CLOCK_CNTL_DATA); 2036 (void)RREG32(RADEON_CRTC_GEN_CNTL); 2037 } 2038 2039 static void r100_pll_errata_after_data(struct radeon_device *rdev) 2040 { 2041 /* This workarounds is necessary on RV100, RS100 and RS200 chips 2042 * or the chip could hang on a subsequent access 2043 */ 2044 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 2045 udelay(5000); 2046 } 2047 2048 /* This function is required to workaround a hardware bug in some (all?) 2049 * revisions of the R300. This workaround should be called after every 2050 * CLOCK_CNTL_INDEX register access. If not, register reads afterward 2051 * may not be correct. 2052 */ 2053 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { 2054 uint32_t save, tmp; 2055 2056 save = RREG32(RADEON_CLOCK_CNTL_INDEX); 2057 tmp = save & ~(0x3f | RADEON_PLL_WR_EN); 2058 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); 2059 tmp = RREG32(RADEON_CLOCK_CNTL_DATA); 2060 WREG32(RADEON_CLOCK_CNTL_INDEX, save); 2061 } 2062 } 2063 2064 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2065 { 2066 uint32_t data; 2067 2068 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2069 r100_pll_errata_after_index(rdev); 2070 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2071 r100_pll_errata_after_data(rdev); 2072 return data; 2073 } 2074 2075 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2076 { 2077 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2078 r100_pll_errata_after_index(rdev); 2079 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2080 r100_pll_errata_after_data(rdev); 2081 } 2082 2083 void r100_set_safe_registers(struct radeon_device *rdev) 2084 { 2085 if (ASIC_IS_RN50(rdev)) { 2086 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2087 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); 2088 } else if (rdev->family < CHIP_R200) { 2089 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 2090 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); 2091 } else { 2092 r200_set_safe_registers(rdev); 2093 } 2094 } 2095 2096 /* 2097 * Debugfs info 2098 */ 2099 #if defined(CONFIG_DEBUG_FS) 2100 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) 2101 { 2102 struct drm_info_node *node = (struct drm_info_node *) m->private; 2103 struct drm_device *dev = node->minor->dev; 2104 struct radeon_device *rdev = dev->dev_private; 2105 uint32_t reg, value; 2106 unsigned i; 2107 2108 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); 2109 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); 2110 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2111 for (i = 0; i < 64; i++) { 2112 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); 2113 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; 2114 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); 2115 value = RREG32(RADEON_RBBM_CMDFIFO_DATA); 2116 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); 2117 } 2118 return 0; 2119 } 2120 2121 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) 2122 { 2123 struct drm_info_node *node = (struct drm_info_node *) m->private; 2124 struct drm_device *dev = node->minor->dev; 2125 struct radeon_device *rdev = dev->dev_private; 2126 uint32_t rdp, wdp; 2127 unsigned count, i, j; 2128 2129 radeon_ring_free_size(rdev); 2130 rdp = RREG32(RADEON_CP_RB_RPTR); 2131 wdp = RREG32(RADEON_CP_RB_WPTR); 2132 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask; 2133 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2134 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2135 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2136 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); 2137 seq_printf(m, "%u dwords in ring\n", count); 2138 for (j = 0; j <= count; j++) { 2139 i = (rdp + j) & rdev->cp.ptr_mask; 2140 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); 2141 } 2142 return 0; 2143 } 2144 2145 2146 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) 2147 { 2148 struct drm_info_node *node = (struct drm_info_node *) m->private; 2149 struct drm_device *dev = node->minor->dev; 2150 struct radeon_device *rdev = dev->dev_private; 2151 uint32_t csq_stat, csq2_stat, tmp; 2152 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; 2153 unsigned i; 2154 2155 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2156 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); 2157 csq_stat = RREG32(RADEON_CP_CSQ_STAT); 2158 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); 2159 r_rptr = (csq_stat >> 0) & 0x3ff; 2160 r_wptr = (csq_stat >> 10) & 0x3ff; 2161 ib1_rptr = (csq_stat >> 20) & 0x3ff; 2162 ib1_wptr = (csq2_stat >> 0) & 0x3ff; 2163 ib2_rptr = (csq2_stat >> 10) & 0x3ff; 2164 ib2_wptr = (csq2_stat >> 20) & 0x3ff; 2165 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); 2166 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); 2167 seq_printf(m, "Ring rptr %u\n", r_rptr); 2168 seq_printf(m, "Ring wptr %u\n", r_wptr); 2169 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); 2170 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); 2171 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); 2172 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); 2173 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms 2174 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ 2175 seq_printf(m, "Ring fifo:\n"); 2176 for (i = 0; i < 256; i++) { 2177 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 2178 tmp = RREG32(RADEON_CP_CSQ_DATA); 2179 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); 2180 } 2181 seq_printf(m, "Indirect1 fifo:\n"); 2182 for (i = 256; i <= 512; i++) { 2183 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 2184 tmp = RREG32(RADEON_CP_CSQ_DATA); 2185 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); 2186 } 2187 seq_printf(m, "Indirect2 fifo:\n"); 2188 for (i = 640; i < ib1_wptr; i++) { 2189 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 2190 tmp = RREG32(RADEON_CP_CSQ_DATA); 2191 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); 2192 } 2193 return 0; 2194 } 2195 2196 static int r100_debugfs_mc_info(struct seq_file *m, void *data) 2197 { 2198 struct drm_info_node *node = (struct drm_info_node *) m->private; 2199 struct drm_device *dev = node->minor->dev; 2200 struct radeon_device *rdev = dev->dev_private; 2201 uint32_t tmp; 2202 2203 tmp = RREG32(RADEON_CONFIG_MEMSIZE); 2204 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); 2205 tmp = RREG32(RADEON_MC_FB_LOCATION); 2206 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); 2207 tmp = RREG32(RADEON_BUS_CNTL); 2208 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 2209 tmp = RREG32(RADEON_MC_AGP_LOCATION); 2210 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 2211 tmp = RREG32(RADEON_AGP_BASE); 2212 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 2213 tmp = RREG32(RADEON_HOST_PATH_CNTL); 2214 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 2215 tmp = RREG32(0x01D0); 2216 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); 2217 tmp = RREG32(RADEON_AIC_LO_ADDR); 2218 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); 2219 tmp = RREG32(RADEON_AIC_HI_ADDR); 2220 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); 2221 tmp = RREG32(0x01E4); 2222 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); 2223 return 0; 2224 } 2225 2226 static struct drm_info_list r100_debugfs_rbbm_list[] = { 2227 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, 2228 }; 2229 2230 static struct drm_info_list r100_debugfs_cp_list[] = { 2231 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, 2232 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, 2233 }; 2234 2235 static struct drm_info_list r100_debugfs_mc_info_list[] = { 2236 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, 2237 }; 2238 #endif 2239 2240 int r100_debugfs_rbbm_init(struct radeon_device *rdev) 2241 { 2242 #if defined(CONFIG_DEBUG_FS) 2243 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); 2244 #else 2245 return 0; 2246 #endif 2247 } 2248 2249 int r100_debugfs_cp_init(struct radeon_device *rdev) 2250 { 2251 #if defined(CONFIG_DEBUG_FS) 2252 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); 2253 #else 2254 return 0; 2255 #endif 2256 } 2257 2258 int r100_debugfs_mc_info_init(struct radeon_device *rdev) 2259 { 2260 #if defined(CONFIG_DEBUG_FS) 2261 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); 2262 #else 2263 return 0; 2264 #endif 2265 } 2266 2267 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 2268 uint32_t tiling_flags, uint32_t pitch, 2269 uint32_t offset, uint32_t obj_size) 2270 { 2271 int surf_index = reg * 16; 2272 int flags = 0; 2273 2274 /* r100/r200 divide by 16 */ 2275 if (rdev->family < CHIP_R300) 2276 flags = pitch / 16; 2277 else 2278 flags = pitch / 8; 2279 2280 if (rdev->family <= CHIP_RS200) { 2281 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 2282 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 2283 flags |= RADEON_SURF_TILE_COLOR_BOTH; 2284 if (tiling_flags & RADEON_TILING_MACRO) 2285 flags |= RADEON_SURF_TILE_COLOR_MACRO; 2286 } else if (rdev->family <= CHIP_RV280) { 2287 if (tiling_flags & (RADEON_TILING_MACRO)) 2288 flags |= R200_SURF_TILE_COLOR_MACRO; 2289 if (tiling_flags & RADEON_TILING_MICRO) 2290 flags |= R200_SURF_TILE_COLOR_MICRO; 2291 } else { 2292 if (tiling_flags & RADEON_TILING_MACRO) 2293 flags |= R300_SURF_TILE_MACRO; 2294 if (tiling_flags & RADEON_TILING_MICRO) 2295 flags |= R300_SURF_TILE_MICRO; 2296 } 2297 2298 if (tiling_flags & RADEON_TILING_SWAP_16BIT) 2299 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 2300 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 2301 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 2302 2303 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 2304 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 2305 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 2306 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); 2307 return 0; 2308 } 2309 2310 void r100_clear_surface_reg(struct radeon_device *rdev, int reg) 2311 { 2312 int surf_index = reg * 16; 2313 WREG32(RADEON_SURFACE0_INFO + surf_index, 0); 2314 } 2315 2316 void r100_bandwidth_update(struct radeon_device *rdev) 2317 { 2318 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 2319 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 2320 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; 2321 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 2322 fixed20_12 memtcas_ff[8] = { 2323 fixed_init(1), 2324 fixed_init(2), 2325 fixed_init(3), 2326 fixed_init(0), 2327 fixed_init_half(1), 2328 fixed_init_half(2), 2329 fixed_init(0), 2330 }; 2331 fixed20_12 memtcas_rs480_ff[8] = { 2332 fixed_init(0), 2333 fixed_init(1), 2334 fixed_init(2), 2335 fixed_init(3), 2336 fixed_init(0), 2337 fixed_init_half(1), 2338 fixed_init_half(2), 2339 fixed_init_half(3), 2340 }; 2341 fixed20_12 memtcas2_ff[8] = { 2342 fixed_init(0), 2343 fixed_init(1), 2344 fixed_init(2), 2345 fixed_init(3), 2346 fixed_init(4), 2347 fixed_init(5), 2348 fixed_init(6), 2349 fixed_init(7), 2350 }; 2351 fixed20_12 memtrbs[8] = { 2352 fixed_init(1), 2353 fixed_init_half(1), 2354 fixed_init(2), 2355 fixed_init_half(2), 2356 fixed_init(3), 2357 fixed_init_half(3), 2358 fixed_init(4), 2359 fixed_init_half(4) 2360 }; 2361 fixed20_12 memtrbs_r4xx[8] = { 2362 fixed_init(4), 2363 fixed_init(5), 2364 fixed_init(6), 2365 fixed_init(7), 2366 fixed_init(8), 2367 fixed_init(9), 2368 fixed_init(10), 2369 fixed_init(11) 2370 }; 2371 fixed20_12 min_mem_eff; 2372 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 2373 fixed20_12 cur_latency_mclk, cur_latency_sclk; 2374 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, 2375 disp_drain_rate2, read_return_rate; 2376 fixed20_12 time_disp1_drop_priority; 2377 int c; 2378 int cur_size = 16; /* in octawords */ 2379 int critical_point = 0, critical_point2; 2380 /* uint32_t read_return_rate, time_disp1_drop_priority; */ 2381 int stop_req, max_stop_req; 2382 struct drm_display_mode *mode1 = NULL; 2383 struct drm_display_mode *mode2 = NULL; 2384 uint32_t pixel_bytes1 = 0; 2385 uint32_t pixel_bytes2 = 0; 2386 2387 if (rdev->mode_info.crtcs[0]->base.enabled) { 2388 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 2389 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; 2390 } 2391 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 2392 if (rdev->mode_info.crtcs[1]->base.enabled) { 2393 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 2394 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; 2395 } 2396 } 2397 2398 min_mem_eff.full = rfixed_const_8(0); 2399 /* get modes */ 2400 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 2401 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 2402 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); 2403 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); 2404 /* check crtc enables */ 2405 if (mode2) 2406 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); 2407 if (mode1) 2408 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); 2409 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); 2410 } 2411 2412 /* 2413 * determine is there is enough bw for current mode 2414 */ 2415 mclk_ff.full = rfixed_const(rdev->clock.default_mclk); 2416 temp_ff.full = rfixed_const(100); 2417 mclk_ff.full = rfixed_div(mclk_ff, temp_ff); 2418 sclk_ff.full = rfixed_const(rdev->clock.default_sclk); 2419 sclk_ff.full = rfixed_div(sclk_ff, temp_ff); 2420 2421 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 2422 temp_ff.full = rfixed_const(temp); 2423 mem_bw.full = rfixed_mul(mclk_ff, temp_ff); 2424 2425 pix_clk.full = 0; 2426 pix_clk2.full = 0; 2427 peak_disp_bw.full = 0; 2428 if (mode1) { 2429 temp_ff.full = rfixed_const(1000); 2430 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ 2431 pix_clk.full = rfixed_div(pix_clk, temp_ff); 2432 temp_ff.full = rfixed_const(pixel_bytes1); 2433 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); 2434 } 2435 if (mode2) { 2436 temp_ff.full = rfixed_const(1000); 2437 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ 2438 pix_clk2.full = rfixed_div(pix_clk2, temp_ff); 2439 temp_ff.full = rfixed_const(pixel_bytes2); 2440 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); 2441 } 2442 2443 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); 2444 if (peak_disp_bw.full >= mem_bw.full) { 2445 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 2446 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 2447 } 2448 2449 /* Get values from the EXT_MEM_CNTL register...converting its contents. */ 2450 temp = RREG32(RADEON_MEM_TIMING_CNTL); 2451 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ 2452 mem_trcd = ((temp >> 2) & 0x3) + 1; 2453 mem_trp = ((temp & 0x3)) + 1; 2454 mem_tras = ((temp & 0x70) >> 4) + 1; 2455 } else if (rdev->family == CHIP_R300 || 2456 rdev->family == CHIP_R350) { /* r300, r350 */ 2457 mem_trcd = (temp & 0x7) + 1; 2458 mem_trp = ((temp >> 8) & 0x7) + 1; 2459 mem_tras = ((temp >> 11) & 0xf) + 4; 2460 } else if (rdev->family == CHIP_RV350 || 2461 rdev->family <= CHIP_RV380) { 2462 /* rv3x0 */ 2463 mem_trcd = (temp & 0x7) + 3; 2464 mem_trp = ((temp >> 8) & 0x7) + 3; 2465 mem_tras = ((temp >> 11) & 0xf) + 6; 2466 } else if (rdev->family == CHIP_R420 || 2467 rdev->family == CHIP_R423 || 2468 rdev->family == CHIP_RV410) { 2469 /* r4xx */ 2470 mem_trcd = (temp & 0xf) + 3; 2471 if (mem_trcd > 15) 2472 mem_trcd = 15; 2473 mem_trp = ((temp >> 8) & 0xf) + 3; 2474 if (mem_trp > 15) 2475 mem_trp = 15; 2476 mem_tras = ((temp >> 12) & 0x1f) + 6; 2477 if (mem_tras > 31) 2478 mem_tras = 31; 2479 } else { /* RV200, R200 */ 2480 mem_trcd = (temp & 0x7) + 1; 2481 mem_trp = ((temp >> 8) & 0x7) + 1; 2482 mem_tras = ((temp >> 12) & 0xf) + 4; 2483 } 2484 /* convert to FF */ 2485 trcd_ff.full = rfixed_const(mem_trcd); 2486 trp_ff.full = rfixed_const(mem_trp); 2487 tras_ff.full = rfixed_const(mem_tras); 2488 2489 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 2490 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 2491 data = (temp & (7 << 20)) >> 20; 2492 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { 2493 if (rdev->family == CHIP_RS480) /* don't think rs400 */ 2494 tcas_ff = memtcas_rs480_ff[data]; 2495 else 2496 tcas_ff = memtcas_ff[data]; 2497 } else 2498 tcas_ff = memtcas2_ff[data]; 2499 2500 if (rdev->family == CHIP_RS400 || 2501 rdev->family == CHIP_RS480) { 2502 /* extra cas latency stored in bits 23-25 0-4 clocks */ 2503 data = (temp >> 23) & 0x7; 2504 if (data < 5) 2505 tcas_ff.full += rfixed_const(data); 2506 } 2507 2508 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 2509 /* on the R300, Tcas is included in Trbs. 2510 */ 2511 temp = RREG32(RADEON_MEM_CNTL); 2512 data = (R300_MEM_NUM_CHANNELS_MASK & temp); 2513 if (data == 1) { 2514 if (R300_MEM_USE_CD_CH_ONLY & temp) { 2515 temp = RREG32(R300_MC_IND_INDEX); 2516 temp &= ~R300_MC_IND_ADDR_MASK; 2517 temp |= R300_MC_READ_CNTL_CD_mcind; 2518 WREG32(R300_MC_IND_INDEX, temp); 2519 temp = RREG32(R300_MC_IND_DATA); 2520 data = (R300_MEM_RBS_POSITION_C_MASK & temp); 2521 } else { 2522 temp = RREG32(R300_MC_READ_CNTL_AB); 2523 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 2524 } 2525 } else { 2526 temp = RREG32(R300_MC_READ_CNTL_AB); 2527 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 2528 } 2529 if (rdev->family == CHIP_RV410 || 2530 rdev->family == CHIP_R420 || 2531 rdev->family == CHIP_R423) 2532 trbs_ff = memtrbs_r4xx[data]; 2533 else 2534 trbs_ff = memtrbs[data]; 2535 tcas_ff.full += trbs_ff.full; 2536 } 2537 2538 sclk_eff_ff.full = sclk_ff.full; 2539 2540 if (rdev->flags & RADEON_IS_AGP) { 2541 fixed20_12 agpmode_ff; 2542 agpmode_ff.full = rfixed_const(radeon_agpmode); 2543 temp_ff.full = rfixed_const_666(16); 2544 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); 2545 } 2546 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 2547 2548 if (ASIC_IS_R300(rdev)) { 2549 sclk_delay_ff.full = rfixed_const(250); 2550 } else { 2551 if ((rdev->family == CHIP_RV100) || 2552 rdev->flags & RADEON_IS_IGP) { 2553 if (rdev->mc.vram_is_ddr) 2554 sclk_delay_ff.full = rfixed_const(41); 2555 else 2556 sclk_delay_ff.full = rfixed_const(33); 2557 } else { 2558 if (rdev->mc.vram_width == 128) 2559 sclk_delay_ff.full = rfixed_const(57); 2560 else 2561 sclk_delay_ff.full = rfixed_const(41); 2562 } 2563 } 2564 2565 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); 2566 2567 if (rdev->mc.vram_is_ddr) { 2568 if (rdev->mc.vram_width == 32) { 2569 k1.full = rfixed_const(40); 2570 c = 3; 2571 } else { 2572 k1.full = rfixed_const(20); 2573 c = 1; 2574 } 2575 } else { 2576 k1.full = rfixed_const(40); 2577 c = 3; 2578 } 2579 2580 temp_ff.full = rfixed_const(2); 2581 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); 2582 temp_ff.full = rfixed_const(c); 2583 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); 2584 temp_ff.full = rfixed_const(4); 2585 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); 2586 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); 2587 mc_latency_mclk.full += k1.full; 2588 2589 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); 2590 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); 2591 2592 /* 2593 HW cursor time assuming worst case of full size colour cursor. 2594 */ 2595 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 2596 temp_ff.full += trcd_ff.full; 2597 if (temp_ff.full < tras_ff.full) 2598 temp_ff.full = tras_ff.full; 2599 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); 2600 2601 temp_ff.full = rfixed_const(cur_size); 2602 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); 2603 /* 2604 Find the total latency for the display data. 2605 */ 2606 disp_latency_overhead.full = rfixed_const(8); 2607 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); 2608 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 2609 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 2610 2611 if (mc_latency_mclk.full > mc_latency_sclk.full) 2612 disp_latency.full = mc_latency_mclk.full; 2613 else 2614 disp_latency.full = mc_latency_sclk.full; 2615 2616 /* setup Max GRPH_STOP_REQ default value */ 2617 if (ASIC_IS_RV100(rdev)) 2618 max_stop_req = 0x5c; 2619 else 2620 max_stop_req = 0x7c; 2621 2622 if (mode1) { 2623 /* CRTC1 2624 Set GRPH_BUFFER_CNTL register using h/w defined optimal values. 2625 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] 2626 */ 2627 stop_req = mode1->hdisplay * pixel_bytes1 / 16; 2628 2629 if (stop_req > max_stop_req) 2630 stop_req = max_stop_req; 2631 2632 /* 2633 Find the drain rate of the display buffer. 2634 */ 2635 temp_ff.full = rfixed_const((16/pixel_bytes1)); 2636 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); 2637 2638 /* 2639 Find the critical point of the display buffer. 2640 */ 2641 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); 2642 crit_point_ff.full += rfixed_const_half(0); 2643 2644 critical_point = rfixed_trunc(crit_point_ff); 2645 2646 if (rdev->disp_priority == 2) { 2647 critical_point = 0; 2648 } 2649 2650 /* 2651 The critical point should never be above max_stop_req-4. Setting 2652 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. 2653 */ 2654 if (max_stop_req - critical_point < 4) 2655 critical_point = 0; 2656 2657 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { 2658 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ 2659 critical_point = 0x10; 2660 } 2661 2662 temp = RREG32(RADEON_GRPH_BUFFER_CNTL); 2663 temp &= ~(RADEON_GRPH_STOP_REQ_MASK); 2664 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 2665 temp &= ~(RADEON_GRPH_START_REQ_MASK); 2666 if ((rdev->family == CHIP_R350) && 2667 (stop_req > 0x15)) { 2668 stop_req -= 0x10; 2669 } 2670 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 2671 temp |= RADEON_GRPH_BUFFER_SIZE; 2672 temp &= ~(RADEON_GRPH_CRITICAL_CNTL | 2673 RADEON_GRPH_CRITICAL_AT_SOF | 2674 RADEON_GRPH_STOP_CNTL); 2675 /* 2676 Write the result into the register. 2677 */ 2678 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 2679 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 2680 2681 #if 0 2682 if ((rdev->family == CHIP_RS400) || 2683 (rdev->family == CHIP_RS480)) { 2684 /* attempt to program RS400 disp regs correctly ??? */ 2685 temp = RREG32(RS400_DISP1_REG_CNTL); 2686 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | 2687 RS400_DISP1_STOP_REQ_LEVEL_MASK); 2688 WREG32(RS400_DISP1_REQ_CNTL1, (temp | 2689 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 2690 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 2691 temp = RREG32(RS400_DMIF_MEM_CNTL1); 2692 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | 2693 RS400_DISP1_CRITICAL_POINT_STOP_MASK); 2694 WREG32(RS400_DMIF_MEM_CNTL1, (temp | 2695 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | 2696 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); 2697 } 2698 #endif 2699 2700 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", 2701 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ 2702 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); 2703 } 2704 2705 if (mode2) { 2706 u32 grph2_cntl; 2707 stop_req = mode2->hdisplay * pixel_bytes2 / 16; 2708 2709 if (stop_req > max_stop_req) 2710 stop_req = max_stop_req; 2711 2712 /* 2713 Find the drain rate of the display buffer. 2714 */ 2715 temp_ff.full = rfixed_const((16/pixel_bytes2)); 2716 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); 2717 2718 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 2719 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 2720 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 2721 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); 2722 if ((rdev->family == CHIP_R350) && 2723 (stop_req > 0x15)) { 2724 stop_req -= 0x10; 2725 } 2726 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 2727 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; 2728 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | 2729 RADEON_GRPH_CRITICAL_AT_SOF | 2730 RADEON_GRPH_STOP_CNTL); 2731 2732 if ((rdev->family == CHIP_RS100) || 2733 (rdev->family == CHIP_RS200)) 2734 critical_point2 = 0; 2735 else { 2736 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 2737 temp_ff.full = rfixed_const(temp); 2738 temp_ff.full = rfixed_mul(mclk_ff, temp_ff); 2739 if (sclk_ff.full < temp_ff.full) 2740 temp_ff.full = sclk_ff.full; 2741 2742 read_return_rate.full = temp_ff.full; 2743 2744 if (mode1) { 2745 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 2746 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); 2747 } else { 2748 time_disp1_drop_priority.full = 0; 2749 } 2750 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 2751 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); 2752 crit_point_ff.full += rfixed_const_half(0); 2753 2754 critical_point2 = rfixed_trunc(crit_point_ff); 2755 2756 if (rdev->disp_priority == 2) { 2757 critical_point2 = 0; 2758 } 2759 2760 if (max_stop_req - critical_point2 < 4) 2761 critical_point2 = 0; 2762 2763 } 2764 2765 if (critical_point2 == 0 && rdev->family == CHIP_R300) { 2766 /* some R300 cards have problem with this set to 0 */ 2767 critical_point2 = 0x10; 2768 } 2769 2770 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 2771 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 2772 2773 if ((rdev->family == CHIP_RS400) || 2774 (rdev->family == CHIP_RS480)) { 2775 #if 0 2776 /* attempt to program RS400 disp2 regs correctly ??? */ 2777 temp = RREG32(RS400_DISP2_REQ_CNTL1); 2778 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | 2779 RS400_DISP2_STOP_REQ_LEVEL_MASK); 2780 WREG32(RS400_DISP2_REQ_CNTL1, (temp | 2781 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 2782 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 2783 temp = RREG32(RS400_DISP2_REQ_CNTL2); 2784 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | 2785 RS400_DISP2_CRITICAL_POINT_STOP_MASK); 2786 WREG32(RS400_DISP2_REQ_CNTL2, (temp | 2787 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | 2788 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); 2789 #endif 2790 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); 2791 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); 2792 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); 2793 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); 2794 } 2795 2796 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", 2797 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 2798 } 2799 } 2800 2801 static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2802 { 2803 DRM_ERROR("pitch %d\n", t->pitch); 2804 DRM_ERROR("use_pitch %d\n", t->use_pitch); 2805 DRM_ERROR("width %d\n", t->width); 2806 DRM_ERROR("width_11 %d\n", t->width_11); 2807 DRM_ERROR("height %d\n", t->height); 2808 DRM_ERROR("height_11 %d\n", t->height_11); 2809 DRM_ERROR("num levels %d\n", t->num_levels); 2810 DRM_ERROR("depth %d\n", t->txdepth); 2811 DRM_ERROR("bpp %d\n", t->cpp); 2812 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2813 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2814 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2815 DRM_ERROR("compress format %d\n", t->compress_format); 2816 } 2817 2818 static int r100_cs_track_cube(struct radeon_device *rdev, 2819 struct r100_cs_track *track, unsigned idx) 2820 { 2821 unsigned face, w, h; 2822 struct radeon_bo *cube_robj; 2823 unsigned long size; 2824 2825 for (face = 0; face < 5; face++) { 2826 cube_robj = track->textures[idx].cube_info[face].robj; 2827 w = track->textures[idx].cube_info[face].width; 2828 h = track->textures[idx].cube_info[face].height; 2829 2830 size = w * h; 2831 size *= track->textures[idx].cpp; 2832 2833 size += track->textures[idx].cube_info[face].offset; 2834 2835 if (size > radeon_bo_size(cube_robj)) { 2836 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2837 size, radeon_bo_size(cube_robj)); 2838 r100_cs_track_texture_print(&track->textures[idx]); 2839 return -1; 2840 } 2841 } 2842 return 0; 2843 } 2844 2845 static int r100_track_compress_size(int compress_format, int w, int h) 2846 { 2847 int block_width, block_height, block_bytes; 2848 int wblocks, hblocks; 2849 int min_wblocks; 2850 int sz; 2851 2852 block_width = 4; 2853 block_height = 4; 2854 2855 switch (compress_format) { 2856 case R100_TRACK_COMP_DXT1: 2857 block_bytes = 8; 2858 min_wblocks = 4; 2859 break; 2860 default: 2861 case R100_TRACK_COMP_DXT35: 2862 block_bytes = 16; 2863 min_wblocks = 2; 2864 break; 2865 } 2866 2867 hblocks = (h + block_height - 1) / block_height; 2868 wblocks = (w + block_width - 1) / block_width; 2869 if (wblocks < min_wblocks) 2870 wblocks = min_wblocks; 2871 sz = wblocks * hblocks * block_bytes; 2872 return sz; 2873 } 2874 2875 static int r100_cs_track_texture_check(struct radeon_device *rdev, 2876 struct r100_cs_track *track) 2877 { 2878 struct radeon_bo *robj; 2879 unsigned long size; 2880 unsigned u, i, w, h; 2881 int ret; 2882 2883 for (u = 0; u < track->num_texture; u++) { 2884 if (!track->textures[u].enabled) 2885 continue; 2886 robj = track->textures[u].robj; 2887 if (robj == NULL) { 2888 DRM_ERROR("No texture bound to unit %u\n", u); 2889 return -EINVAL; 2890 } 2891 size = 0; 2892 for (i = 0; i <= track->textures[u].num_levels; i++) { 2893 if (track->textures[u].use_pitch) { 2894 if (rdev->family < CHIP_R300) 2895 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); 2896 else 2897 w = track->textures[u].pitch / (1 << i); 2898 } else { 2899 w = track->textures[u].width; 2900 if (rdev->family >= CHIP_RV515) 2901 w |= track->textures[u].width_11; 2902 w = w / (1 << i); 2903 if (track->textures[u].roundup_w) 2904 w = roundup_pow_of_two(w); 2905 } 2906 h = track->textures[u].height; 2907 if (rdev->family >= CHIP_RV515) 2908 h |= track->textures[u].height_11; 2909 h = h / (1 << i); 2910 if (track->textures[u].roundup_h) 2911 h = roundup_pow_of_two(h); 2912 if (track->textures[u].compress_format) { 2913 2914 size += r100_track_compress_size(track->textures[u].compress_format, w, h); 2915 /* compressed textures are block based */ 2916 } else 2917 size += w * h; 2918 } 2919 size *= track->textures[u].cpp; 2920 2921 switch (track->textures[u].tex_coord_type) { 2922 case 0: 2923 break; 2924 case 1: 2925 size *= (1 << track->textures[u].txdepth); 2926 break; 2927 case 2: 2928 if (track->separate_cube) { 2929 ret = r100_cs_track_cube(rdev, track, u); 2930 if (ret) 2931 return ret; 2932 } else 2933 size *= 6; 2934 break; 2935 default: 2936 DRM_ERROR("Invalid texture coordinate type %u for unit " 2937 "%u\n", track->textures[u].tex_coord_type, u); 2938 return -EINVAL; 2939 } 2940 if (size > radeon_bo_size(robj)) { 2941 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2942 "%lu\n", u, size, radeon_bo_size(robj)); 2943 r100_cs_track_texture_print(&track->textures[u]); 2944 return -EINVAL; 2945 } 2946 } 2947 return 0; 2948 } 2949 2950 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) 2951 { 2952 unsigned i; 2953 unsigned long size; 2954 unsigned prim_walk; 2955 unsigned nverts; 2956 2957 for (i = 0; i < track->num_cb; i++) { 2958 if (track->cb[i].robj == NULL) { 2959 if (!(track->fastfill || track->color_channel_mask || 2960 track->blend_read_enable)) { 2961 continue; 2962 } 2963 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2964 return -EINVAL; 2965 } 2966 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2967 size += track->cb[i].offset; 2968 if (size > radeon_bo_size(track->cb[i].robj)) { 2969 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2970 "(need %lu have %lu) !\n", i, size, 2971 radeon_bo_size(track->cb[i].robj)); 2972 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2973 i, track->cb[i].pitch, track->cb[i].cpp, 2974 track->cb[i].offset, track->maxy); 2975 return -EINVAL; 2976 } 2977 } 2978 if (track->z_enabled) { 2979 if (track->zb.robj == NULL) { 2980 DRM_ERROR("[drm] No buffer for z buffer !\n"); 2981 return -EINVAL; 2982 } 2983 size = track->zb.pitch * track->zb.cpp * track->maxy; 2984 size += track->zb.offset; 2985 if (size > radeon_bo_size(track->zb.robj)) { 2986 DRM_ERROR("[drm] Buffer too small for z buffer " 2987 "(need %lu have %lu) !\n", size, 2988 radeon_bo_size(track->zb.robj)); 2989 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2990 track->zb.pitch, track->zb.cpp, 2991 track->zb.offset, track->maxy); 2992 return -EINVAL; 2993 } 2994 } 2995 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 2996 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 2997 switch (prim_walk) { 2998 case 1: 2999 for (i = 0; i < track->num_arrays; i++) { 3000 size = track->arrays[i].esize * track->max_indx * 4; 3001 if (track->arrays[i].robj == NULL) { 3002 DRM_ERROR("(PW %u) Vertex array %u no buffer " 3003 "bound\n", prim_walk, i); 3004 return -EINVAL; 3005 } 3006 if (size > radeon_bo_size(track->arrays[i].robj)) { 3007 dev_err(rdev->dev, "(PW %u) Vertex array %u " 3008 "need %lu dwords have %lu dwords\n", 3009 prim_walk, i, size >> 2, 3010 radeon_bo_size(track->arrays[i].robj) 3011 >> 2); 3012 DRM_ERROR("Max indices %u\n", track->max_indx); 3013 return -EINVAL; 3014 } 3015 } 3016 break; 3017 case 2: 3018 for (i = 0; i < track->num_arrays; i++) { 3019 size = track->arrays[i].esize * (nverts - 1) * 4; 3020 if (track->arrays[i].robj == NULL) { 3021 DRM_ERROR("(PW %u) Vertex array %u no buffer " 3022 "bound\n", prim_walk, i); 3023 return -EINVAL; 3024 } 3025 if (size > radeon_bo_size(track->arrays[i].robj)) { 3026 dev_err(rdev->dev, "(PW %u) Vertex array %u " 3027 "need %lu dwords have %lu dwords\n", 3028 prim_walk, i, size >> 2, 3029 radeon_bo_size(track->arrays[i].robj) 3030 >> 2); 3031 return -EINVAL; 3032 } 3033 } 3034 break; 3035 case 3: 3036 size = track->vtx_size * nverts; 3037 if (size != track->immd_dwords) { 3038 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 3039 track->immd_dwords, size); 3040 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 3041 nverts, track->vtx_size); 3042 return -EINVAL; 3043 } 3044 break; 3045 default: 3046 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 3047 prim_walk); 3048 return -EINVAL; 3049 } 3050 return r100_cs_track_texture_check(rdev, track); 3051 } 3052 3053 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 3054 { 3055 unsigned i, face; 3056 3057 if (rdev->family < CHIP_R300) { 3058 track->num_cb = 1; 3059 if (rdev->family <= CHIP_RS200) 3060 track->num_texture = 3; 3061 else 3062 track->num_texture = 6; 3063 track->maxy = 2048; 3064 track->separate_cube = 1; 3065 } else { 3066 track->num_cb = 4; 3067 track->num_texture = 16; 3068 track->maxy = 4096; 3069 track->separate_cube = 0; 3070 } 3071 3072 for (i = 0; i < track->num_cb; i++) { 3073 track->cb[i].robj = NULL; 3074 track->cb[i].pitch = 8192; 3075 track->cb[i].cpp = 16; 3076 track->cb[i].offset = 0; 3077 } 3078 track->z_enabled = true; 3079 track->zb.robj = NULL; 3080 track->zb.pitch = 8192; 3081 track->zb.cpp = 4; 3082 track->zb.offset = 0; 3083 track->vtx_size = 0x7F; 3084 track->immd_dwords = 0xFFFFFFFFUL; 3085 track->num_arrays = 11; 3086 track->max_indx = 0x00FFFFFFUL; 3087 for (i = 0; i < track->num_arrays; i++) { 3088 track->arrays[i].robj = NULL; 3089 track->arrays[i].esize = 0x7F; 3090 } 3091 for (i = 0; i < track->num_texture; i++) { 3092 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 3093 track->textures[i].pitch = 16536; 3094 track->textures[i].width = 16536; 3095 track->textures[i].height = 16536; 3096 track->textures[i].width_11 = 1 << 11; 3097 track->textures[i].height_11 = 1 << 11; 3098 track->textures[i].num_levels = 12; 3099 if (rdev->family <= CHIP_RS200) { 3100 track->textures[i].tex_coord_type = 0; 3101 track->textures[i].txdepth = 0; 3102 } else { 3103 track->textures[i].txdepth = 16; 3104 track->textures[i].tex_coord_type = 1; 3105 } 3106 track->textures[i].cpp = 64; 3107 track->textures[i].robj = NULL; 3108 /* CS IB emission code makes sure texture unit are disabled */ 3109 track->textures[i].enabled = false; 3110 track->textures[i].roundup_w = true; 3111 track->textures[i].roundup_h = true; 3112 if (track->separate_cube) 3113 for (face = 0; face < 5; face++) { 3114 track->textures[i].cube_info[face].robj = NULL; 3115 track->textures[i].cube_info[face].width = 16536; 3116 track->textures[i].cube_info[face].height = 16536; 3117 track->textures[i].cube_info[face].offset = 0; 3118 } 3119 } 3120 } 3121 3122 int r100_ring_test(struct radeon_device *rdev) 3123 { 3124 uint32_t scratch; 3125 uint32_t tmp = 0; 3126 unsigned i; 3127 int r; 3128 3129 r = radeon_scratch_get(rdev, &scratch); 3130 if (r) { 3131 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 3132 return r; 3133 } 3134 WREG32(scratch, 0xCAFEDEAD); 3135 r = radeon_ring_lock(rdev, 2); 3136 if (r) { 3137 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3138 radeon_scratch_free(rdev, scratch); 3139 return r; 3140 } 3141 radeon_ring_write(rdev, PACKET0(scratch, 0)); 3142 radeon_ring_write(rdev, 0xDEADBEEF); 3143 radeon_ring_unlock_commit(rdev); 3144 for (i = 0; i < rdev->usec_timeout; i++) { 3145 tmp = RREG32(scratch); 3146 if (tmp == 0xDEADBEEF) { 3147 break; 3148 } 3149 DRM_UDELAY(1); 3150 } 3151 if (i < rdev->usec_timeout) { 3152 DRM_INFO("ring test succeeded in %d usecs\n", i); 3153 } else { 3154 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", 3155 scratch, tmp); 3156 r = -EINVAL; 3157 } 3158 radeon_scratch_free(rdev, scratch); 3159 return r; 3160 } 3161 3162 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3163 { 3164 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); 3165 radeon_ring_write(rdev, ib->gpu_addr); 3166 radeon_ring_write(rdev, ib->length_dw); 3167 } 3168 3169 int r100_ib_test(struct radeon_device *rdev) 3170 { 3171 struct radeon_ib *ib; 3172 uint32_t scratch; 3173 uint32_t tmp = 0; 3174 unsigned i; 3175 int r; 3176 3177 r = radeon_scratch_get(rdev, &scratch); 3178 if (r) { 3179 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3180 return r; 3181 } 3182 WREG32(scratch, 0xCAFEDEAD); 3183 r = radeon_ib_get(rdev, &ib); 3184 if (r) { 3185 return r; 3186 } 3187 ib->ptr[0] = PACKET0(scratch, 0); 3188 ib->ptr[1] = 0xDEADBEEF; 3189 ib->ptr[2] = PACKET2(0); 3190 ib->ptr[3] = PACKET2(0); 3191 ib->ptr[4] = PACKET2(0); 3192 ib->ptr[5] = PACKET2(0); 3193 ib->ptr[6] = PACKET2(0); 3194 ib->ptr[7] = PACKET2(0); 3195 ib->length_dw = 8; 3196 r = radeon_ib_schedule(rdev, ib); 3197 if (r) { 3198 radeon_scratch_free(rdev, scratch); 3199 radeon_ib_free(rdev, &ib); 3200 return r; 3201 } 3202 r = radeon_fence_wait(ib->fence, false); 3203 if (r) { 3204 return r; 3205 } 3206 for (i = 0; i < rdev->usec_timeout; i++) { 3207 tmp = RREG32(scratch); 3208 if (tmp == 0xDEADBEEF) { 3209 break; 3210 } 3211 DRM_UDELAY(1); 3212 } 3213 if (i < rdev->usec_timeout) { 3214 DRM_INFO("ib test succeeded in %u usecs\n", i); 3215 } else { 3216 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", 3217 scratch, tmp); 3218 r = -EINVAL; 3219 } 3220 radeon_scratch_free(rdev, scratch); 3221 radeon_ib_free(rdev, &ib); 3222 return r; 3223 } 3224 3225 void r100_ib_fini(struct radeon_device *rdev) 3226 { 3227 radeon_ib_pool_fini(rdev); 3228 } 3229 3230 int r100_ib_init(struct radeon_device *rdev) 3231 { 3232 int r; 3233 3234 r = radeon_ib_pool_init(rdev); 3235 if (r) { 3236 dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r); 3237 r100_ib_fini(rdev); 3238 return r; 3239 } 3240 r = r100_ib_test(rdev); 3241 if (r) { 3242 dev_err(rdev->dev, "failled testing IB (%d).\n", r); 3243 r100_ib_fini(rdev); 3244 return r; 3245 } 3246 return 0; 3247 } 3248 3249 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3250 { 3251 /* Shutdown CP we shouldn't need to do that but better be safe than 3252 * sorry 3253 */ 3254 rdev->cp.ready = false; 3255 WREG32(R_000740_CP_CSQ_CNTL, 0); 3256 3257 /* Save few CRTC registers */ 3258 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); 3259 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 3260 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 3261 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 3262 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3263 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); 3264 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); 3265 } 3266 3267 /* Disable VGA aperture access */ 3268 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); 3269 /* Disable cursor, overlay, crtc */ 3270 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3271 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3272 S_000054_CRTC_DISPLAY_DIS(1)); 3273 WREG32(R_000050_CRTC_GEN_CNTL, 3274 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | 3275 S_000050_CRTC_DISP_REQ_EN_B(1)); 3276 WREG32(R_000420_OV0_SCALE_CNTL, 3277 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); 3278 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); 3279 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3280 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | 3281 S_000360_CUR2_LOCK(1)); 3282 WREG32(R_0003F8_CRTC2_GEN_CNTL, 3283 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | 3284 S_0003F8_CRTC2_DISPLAY_DIS(1) | 3285 S_0003F8_CRTC2_DISP_REQ_EN_B(1)); 3286 WREG32(R_000360_CUR2_OFFSET, 3287 C_000360_CUR2_LOCK & save->CUR2_OFFSET); 3288 } 3289 } 3290 3291 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3292 { 3293 /* Update base address for crtc */ 3294 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location); 3295 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3296 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, 3297 rdev->mc.vram_location); 3298 } 3299 /* Restore CRTC registers */ 3300 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3301 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3302 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3303 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3304 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3305 } 3306 } 3307 3308 void r100_vga_render_disable(struct radeon_device *rdev) 3309 { 3310 u32 tmp; 3311 3312 tmp = RREG8(R_0003C2_GENMO_WT); 3313 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); 3314 } 3315 3316 static void r100_debugfs(struct radeon_device *rdev) 3317 { 3318 int r; 3319 3320 r = r100_debugfs_mc_info_init(rdev); 3321 if (r) 3322 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 3323 } 3324 3325 static void r100_mc_program(struct radeon_device *rdev) 3326 { 3327 struct r100_mc_save save; 3328 3329 /* Stops all mc clients */ 3330 r100_mc_stop(rdev, &save); 3331 if (rdev->flags & RADEON_IS_AGP) { 3332 WREG32(R_00014C_MC_AGP_LOCATION, 3333 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 3334 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 3335 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 3336 if (rdev->family > CHIP_RV200) 3337 WREG32(R_00015C_AGP_BASE_2, 3338 upper_32_bits(rdev->mc.agp_base) & 0xff); 3339 } else { 3340 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 3341 WREG32(R_000170_AGP_BASE, 0); 3342 if (rdev->family > CHIP_RV200) 3343 WREG32(R_00015C_AGP_BASE_2, 0); 3344 } 3345 /* Wait for mc idle */ 3346 if (r100_mc_wait_for_idle(rdev)) 3347 dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); 3348 /* Program MC, should be a 32bits limited address space */ 3349 WREG32(R_000148_MC_FB_LOCATION, 3350 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 3351 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 3352 r100_mc_resume(rdev, &save); 3353 } 3354 3355 void r100_clock_startup(struct radeon_device *rdev) 3356 { 3357 u32 tmp; 3358 3359 if (radeon_dynclks != -1 && radeon_dynclks) 3360 radeon_legacy_set_clock_gating(rdev, 1); 3361 /* We need to force on some of the block */ 3362 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 3363 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 3364 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) 3365 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); 3366 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 3367 } 3368 3369 static int r100_startup(struct radeon_device *rdev) 3370 { 3371 int r; 3372 3373 /* set common regs */ 3374 r100_set_common_regs(rdev); 3375 /* program mc */ 3376 r100_mc_program(rdev); 3377 /* Resume clock */ 3378 r100_clock_startup(rdev); 3379 /* Initialize GPU configuration (# pipes, ...) */ 3380 r100_gpu_init(rdev); 3381 /* Initialize GART (initialize after TTM so we can allocate 3382 * memory through TTM but finalize after TTM) */ 3383 r100_enable_bm(rdev); 3384 if (rdev->flags & RADEON_IS_PCI) { 3385 r = r100_pci_gart_enable(rdev); 3386 if (r) 3387 return r; 3388 } 3389 /* Enable IRQ */ 3390 r100_irq_set(rdev); 3391 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3392 /* 1M ring buffer */ 3393 r = r100_cp_init(rdev, 1024 * 1024); 3394 if (r) { 3395 dev_err(rdev->dev, "failled initializing CP (%d).\n", r); 3396 return r; 3397 } 3398 r = r100_wb_init(rdev); 3399 if (r) 3400 dev_err(rdev->dev, "failled initializing WB (%d).\n", r); 3401 r = r100_ib_init(rdev); 3402 if (r) { 3403 dev_err(rdev->dev, "failled initializing IB (%d).\n", r); 3404 return r; 3405 } 3406 return 0; 3407 } 3408 3409 int r100_resume(struct radeon_device *rdev) 3410 { 3411 /* Make sur GART are not working */ 3412 if (rdev->flags & RADEON_IS_PCI) 3413 r100_pci_gart_disable(rdev); 3414 /* Resume clock before doing reset */ 3415 r100_clock_startup(rdev); 3416 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3417 if (radeon_gpu_reset(rdev)) { 3418 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3419 RREG32(R_000E40_RBBM_STATUS), 3420 RREG32(R_0007C0_CP_STAT)); 3421 } 3422 /* post */ 3423 radeon_combios_asic_init(rdev->ddev); 3424 /* Resume clock after posting */ 3425 r100_clock_startup(rdev); 3426 /* Initialize surface registers */ 3427 radeon_surface_init(rdev); 3428 return r100_startup(rdev); 3429 } 3430 3431 int r100_suspend(struct radeon_device *rdev) 3432 { 3433 r100_cp_disable(rdev); 3434 r100_wb_disable(rdev); 3435 r100_irq_disable(rdev); 3436 if (rdev->flags & RADEON_IS_PCI) 3437 r100_pci_gart_disable(rdev); 3438 return 0; 3439 } 3440 3441 void r100_fini(struct radeon_device *rdev) 3442 { 3443 r100_cp_fini(rdev); 3444 r100_wb_fini(rdev); 3445 r100_ib_fini(rdev); 3446 radeon_gem_fini(rdev); 3447 if (rdev->flags & RADEON_IS_PCI) 3448 r100_pci_gart_fini(rdev); 3449 radeon_agp_fini(rdev); 3450 radeon_irq_kms_fini(rdev); 3451 radeon_fence_driver_fini(rdev); 3452 radeon_bo_fini(rdev); 3453 radeon_atombios_fini(rdev); 3454 kfree(rdev->bios); 3455 rdev->bios = NULL; 3456 } 3457 3458 int r100_mc_init(struct radeon_device *rdev) 3459 { 3460 int r; 3461 u32 tmp; 3462 3463 /* Setup GPU memory space */ 3464 rdev->mc.vram_location = 0xFFFFFFFFUL; 3465 rdev->mc.gtt_location = 0xFFFFFFFFUL; 3466 if (rdev->flags & RADEON_IS_IGP) { 3467 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); 3468 rdev->mc.vram_location = tmp << 16; 3469 } 3470 if (rdev->flags & RADEON_IS_AGP) { 3471 r = radeon_agp_init(rdev); 3472 if (r) { 3473 radeon_agp_disable(rdev); 3474 } else { 3475 rdev->mc.gtt_location = rdev->mc.agp_base; 3476 } 3477 } 3478 r = radeon_mc_setup(rdev); 3479 if (r) 3480 return r; 3481 return 0; 3482 } 3483 3484 int r100_init(struct radeon_device *rdev) 3485 { 3486 int r; 3487 3488 /* Register debugfs file specific to this group of asics */ 3489 r100_debugfs(rdev); 3490 /* Disable VGA */ 3491 r100_vga_render_disable(rdev); 3492 /* Initialize scratch registers */ 3493 radeon_scratch_init(rdev); 3494 /* Initialize surface registers */ 3495 radeon_surface_init(rdev); 3496 /* TODO: disable VGA need to use VGA request */ 3497 /* BIOS*/ 3498 if (!radeon_get_bios(rdev)) { 3499 if (ASIC_IS_AVIVO(rdev)) 3500 return -EINVAL; 3501 } 3502 if (rdev->is_atom_bios) { 3503 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 3504 return -EINVAL; 3505 } else { 3506 r = radeon_combios_init(rdev); 3507 if (r) 3508 return r; 3509 } 3510 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3511 if (radeon_gpu_reset(rdev)) { 3512 dev_warn(rdev->dev, 3513 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3514 RREG32(R_000E40_RBBM_STATUS), 3515 RREG32(R_0007C0_CP_STAT)); 3516 } 3517 /* check if cards are posted or not */ 3518 if (radeon_boot_test_post_card(rdev) == false) 3519 return -EINVAL; 3520 /* Set asic errata */ 3521 r100_errata(rdev); 3522 /* Initialize clocks */ 3523 radeon_get_clock_info(rdev->ddev); 3524 /* Initialize power management */ 3525 radeon_pm_init(rdev); 3526 /* Get vram informations */ 3527 r100_vram_info(rdev); 3528 /* Initialize memory controller (also test AGP) */ 3529 r = r100_mc_init(rdev); 3530 if (r) 3531 return r; 3532 /* Fence driver */ 3533 r = radeon_fence_driver_init(rdev); 3534 if (r) 3535 return r; 3536 r = radeon_irq_kms_init(rdev); 3537 if (r) 3538 return r; 3539 /* Memory manager */ 3540 r = radeon_bo_init(rdev); 3541 if (r) 3542 return r; 3543 if (rdev->flags & RADEON_IS_PCI) { 3544 r = r100_pci_gart_init(rdev); 3545 if (r) 3546 return r; 3547 } 3548 r100_set_safe_registers(rdev); 3549 rdev->accel_working = true; 3550 r = r100_startup(rdev); 3551 if (r) { 3552 /* Somethings want wront with the accel init stop accel */ 3553 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 3554 r100_cp_fini(rdev); 3555 r100_wb_fini(rdev); 3556 r100_ib_fini(rdev); 3557 radeon_irq_kms_fini(rdev); 3558 if (rdev->flags & RADEON_IS_PCI) 3559 r100_pci_gart_fini(rdev); 3560 rdev->accel_working = false; 3561 } 3562 return 0; 3563 } 3564