1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 /* RS600 / Radeon X1250/X1270 integrated GPU 29 * 30 * This file gather function specific to RS600 which is the IGP of 31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740 32 * is the X1250/X1270 supporting AMD CPU). The display engine are 33 * the avivo one, bios is an atombios, 3D block are the one of the 34 * R4XX family. The GART is different from the RS400 one and is very 35 * close to the one of the R600 family (R600 likely being an evolution 36 * of the RS600 GART block). 37 */ 38 #include <drm/drmP.h> 39 #include "radeon.h" 40 #include "radeon_asic.h" 41 #include "atom.h" 42 #include "rs600d.h" 43 44 #include "rs600_reg_safe.h" 45 46 void rs600_gpu_init(struct radeon_device *rdev); 47 int rs600_mc_wait_for_idle(struct radeon_device *rdev); 48 49 void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) 50 { 51 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; 52 int i; 53 54 if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) { 55 for (i = 0; i < rdev->usec_timeout; i++) { 56 if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)) 57 break; 58 udelay(1); 59 } 60 for (i = 0; i < rdev->usec_timeout; i++) { 61 if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK) 62 break; 63 udelay(1); 64 } 65 } 66 } 67 68 void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) 69 { 70 /* enable the pflip int */ 71 radeon_irq_kms_pflip_irq_get(rdev, crtc); 72 } 73 74 void rs600_post_page_flip(struct radeon_device *rdev, int crtc) 75 { 76 /* disable the pflip int */ 77 radeon_irq_kms_pflip_irq_put(rdev, crtc); 78 } 79 80 u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 81 { 82 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 83 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 84 int i; 85 86 /* Lock the graphics update lock */ 87 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 88 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 89 90 /* update the scanout addresses */ 91 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 92 (u32)crtc_base); 93 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 94 (u32)crtc_base); 95 96 /* Wait for update_pending to go high. */ 97 for (i = 0; i < rdev->usec_timeout; i++) { 98 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) 99 break; 100 udelay(1); 101 } 102 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 103 104 /* Unlock the lock, so double-buffering can take place inside vblank */ 105 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 106 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 107 108 /* Return current update_pending status: */ 109 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 110 } 111 112 void rs600_pm_misc(struct radeon_device *rdev) 113 { 114 int requested_index = rdev->pm.requested_power_state_index; 115 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 116 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 117 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; 118 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; 119 120 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 121 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 122 tmp = RREG32(voltage->gpio.reg); 123 if (voltage->active_high) 124 tmp |= voltage->gpio.mask; 125 else 126 tmp &= ~(voltage->gpio.mask); 127 WREG32(voltage->gpio.reg, tmp); 128 if (voltage->delay) 129 udelay(voltage->delay); 130 } else { 131 tmp = RREG32(voltage->gpio.reg); 132 if (voltage->active_high) 133 tmp &= ~voltage->gpio.mask; 134 else 135 tmp |= voltage->gpio.mask; 136 WREG32(voltage->gpio.reg, tmp); 137 if (voltage->delay) 138 udelay(voltage->delay); 139 } 140 } else if (voltage->type == VOLTAGE_VDDC) 141 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); 142 143 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); 144 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); 145 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); 146 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 147 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { 148 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); 149 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); 150 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { 151 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); 152 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); 153 } 154 } else { 155 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); 156 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); 157 } 158 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); 159 160 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); 161 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 162 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; 163 if (voltage->delay) { 164 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; 165 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); 166 } else 167 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; 168 } else 169 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; 170 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); 171 172 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); 173 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 174 hdp_dyn_cntl &= ~HDP_FORCEON; 175 else 176 hdp_dyn_cntl |= HDP_FORCEON; 177 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); 178 #if 0 179 /* mc_host_dyn seems to cause hangs from time to time */ 180 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); 181 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) 182 mc_host_dyn_cntl &= ~MC_HOST_FORCEON; 183 else 184 mc_host_dyn_cntl |= MC_HOST_FORCEON; 185 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); 186 #endif 187 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); 188 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) 189 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; 190 else 191 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; 192 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); 193 194 /* set pcie lanes */ 195 if ((rdev->flags & RADEON_IS_PCIE) && 196 !(rdev->flags & RADEON_IS_IGP) && 197 rdev->asic->pm.set_pcie_lanes && 198 (ps->pcie_lanes != 199 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 200 radeon_set_pcie_lanes(rdev, 201 ps->pcie_lanes); 202 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); 203 } 204 } 205 206 void rs600_pm_prepare(struct radeon_device *rdev) 207 { 208 struct drm_device *ddev = rdev->ddev; 209 struct drm_crtc *crtc; 210 struct radeon_crtc *radeon_crtc; 211 u32 tmp; 212 213 /* disable any active CRTCs */ 214 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 215 radeon_crtc = to_radeon_crtc(crtc); 216 if (radeon_crtc->enabled) { 217 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 218 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 219 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 220 } 221 } 222 } 223 224 void rs600_pm_finish(struct radeon_device *rdev) 225 { 226 struct drm_device *ddev = rdev->ddev; 227 struct drm_crtc *crtc; 228 struct radeon_crtc *radeon_crtc; 229 u32 tmp; 230 231 /* enable any active CRTCs */ 232 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 233 radeon_crtc = to_radeon_crtc(crtc); 234 if (radeon_crtc->enabled) { 235 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 236 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 237 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 238 } 239 } 240 } 241 242 /* hpd for digital panel detect/disconnect */ 243 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 244 { 245 u32 tmp; 246 bool connected = false; 247 248 switch (hpd) { 249 case RADEON_HPD_1: 250 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); 251 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) 252 connected = true; 253 break; 254 case RADEON_HPD_2: 255 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); 256 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) 257 connected = true; 258 break; 259 default: 260 break; 261 } 262 return connected; 263 } 264 265 void rs600_hpd_set_polarity(struct radeon_device *rdev, 266 enum radeon_hpd_id hpd) 267 { 268 u32 tmp; 269 bool connected = rs600_hpd_sense(rdev, hpd); 270 271 switch (hpd) { 272 case RADEON_HPD_1: 273 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 274 if (connected) 275 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 276 else 277 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 278 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 279 break; 280 case RADEON_HPD_2: 281 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 282 if (connected) 283 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 284 else 285 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 286 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 287 break; 288 default: 289 break; 290 } 291 } 292 293 void rs600_hpd_init(struct radeon_device *rdev) 294 { 295 struct drm_device *dev = rdev->ddev; 296 struct drm_connector *connector; 297 unsigned enable = 0; 298 299 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 300 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 301 switch (radeon_connector->hpd.hpd) { 302 case RADEON_HPD_1: 303 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 304 S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); 305 break; 306 case RADEON_HPD_2: 307 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 308 S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); 309 break; 310 default: 311 break; 312 } 313 enable |= 1 << radeon_connector->hpd.hpd; 314 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 315 } 316 radeon_irq_kms_enable_hpd(rdev, enable); 317 } 318 319 void rs600_hpd_fini(struct radeon_device *rdev) 320 { 321 struct drm_device *dev = rdev->ddev; 322 struct drm_connector *connector; 323 unsigned disable = 0; 324 325 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 326 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 327 switch (radeon_connector->hpd.hpd) { 328 case RADEON_HPD_1: 329 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 330 S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); 331 break; 332 case RADEON_HPD_2: 333 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 334 S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); 335 break; 336 default: 337 break; 338 } 339 disable |= 1 << radeon_connector->hpd.hpd; 340 } 341 radeon_irq_kms_disable_hpd(rdev, disable); 342 } 343 344 int rs600_asic_reset(struct radeon_device *rdev) 345 { 346 struct rv515_mc_save save; 347 u32 status, tmp; 348 int ret = 0; 349 350 status = RREG32(R_000E40_RBBM_STATUS); 351 if (!G_000E40_GUI_ACTIVE(status)) { 352 return 0; 353 } 354 /* Stops all mc clients */ 355 rv515_mc_stop(rdev, &save); 356 status = RREG32(R_000E40_RBBM_STATUS); 357 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 358 /* stop CP */ 359 WREG32(RADEON_CP_CSQ_CNTL, 0); 360 tmp = RREG32(RADEON_CP_RB_CNTL); 361 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 362 WREG32(RADEON_CP_RB_RPTR_WR, 0); 363 WREG32(RADEON_CP_RB_WPTR, 0); 364 WREG32(RADEON_CP_RB_CNTL, tmp); 365 pci_save_state(rdev->pdev); 366 /* disable bus mastering */ 367 pci_clear_master(rdev->pdev); 368 mdelay(1); 369 /* reset GA+VAP */ 370 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 371 S_0000F0_SOFT_RESET_GA(1)); 372 RREG32(R_0000F0_RBBM_SOFT_RESET); 373 mdelay(500); 374 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 375 mdelay(1); 376 status = RREG32(R_000E40_RBBM_STATUS); 377 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 378 /* reset CP */ 379 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 380 RREG32(R_0000F0_RBBM_SOFT_RESET); 381 mdelay(500); 382 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 383 mdelay(1); 384 status = RREG32(R_000E40_RBBM_STATUS); 385 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 386 /* reset MC */ 387 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); 388 RREG32(R_0000F0_RBBM_SOFT_RESET); 389 mdelay(500); 390 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 391 mdelay(1); 392 status = RREG32(R_000E40_RBBM_STATUS); 393 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 394 /* restore PCI & busmastering */ 395 pci_restore_state(rdev->pdev); 396 /* Check if GPU is idle */ 397 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 398 dev_err(rdev->dev, "failed to reset GPU\n"); 399 ret = -1; 400 } else 401 dev_info(rdev->dev, "GPU reset succeed\n"); 402 rv515_mc_resume(rdev, &save); 403 return ret; 404 } 405 406 /* 407 * GART. 408 */ 409 void rs600_gart_tlb_flush(struct radeon_device *rdev) 410 { 411 uint32_t tmp; 412 413 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 414 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 415 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 416 417 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 418 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); 419 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 420 421 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 422 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 423 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 424 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 425 } 426 427 int rs600_gart_init(struct radeon_device *rdev) 428 { 429 int r; 430 431 if (rdev->gart.robj) { 432 WARN(1, "RS600 GART already initialized\n"); 433 return 0; 434 } 435 /* Initialize common gart structure */ 436 r = radeon_gart_init(rdev); 437 if (r) { 438 return r; 439 } 440 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 441 return radeon_gart_table_vram_alloc(rdev); 442 } 443 444 static int rs600_gart_enable(struct radeon_device *rdev) 445 { 446 u32 tmp; 447 int r, i; 448 449 if (rdev->gart.robj == NULL) { 450 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 451 return -EINVAL; 452 } 453 r = radeon_gart_table_vram_pin(rdev); 454 if (r) 455 return r; 456 radeon_gart_restore(rdev); 457 /* Enable bus master */ 458 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 459 WREG32(RADEON_BUS_CNTL, tmp); 460 /* FIXME: setup default page */ 461 WREG32_MC(R_000100_MC_PT0_CNTL, 462 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | 463 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); 464 465 for (i = 0; i < 19; i++) { 466 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, 467 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | 468 S_00016C_SYSTEM_ACCESS_MODE_MASK( 469 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | 470 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( 471 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | 472 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | 473 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | 474 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); 475 } 476 /* enable first context */ 477 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, 478 S_000102_ENABLE_PAGE_TABLE(1) | 479 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); 480 481 /* disable all other contexts */ 482 for (i = 1; i < 8; i++) 483 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); 484 485 /* setup the page table */ 486 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 487 rdev->gart.table_addr); 488 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); 489 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); 490 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 491 492 /* System context maps to VRAM space */ 493 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); 494 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); 495 496 /* enable page tables */ 497 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 498 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); 499 tmp = RREG32_MC(R_000009_MC_CNTL1); 500 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); 501 rs600_gart_tlb_flush(rdev); 502 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 503 (unsigned)(rdev->mc.gtt_size >> 20), 504 (unsigned long long)rdev->gart.table_addr); 505 rdev->gart.ready = true; 506 return 0; 507 } 508 509 void rs600_gart_disable(struct radeon_device *rdev) 510 { 511 u32 tmp; 512 513 /* FIXME: disable out of gart access */ 514 WREG32_MC(R_000100_MC_PT0_CNTL, 0); 515 tmp = RREG32_MC(R_000009_MC_CNTL1); 516 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); 517 radeon_gart_table_vram_unpin(rdev); 518 } 519 520 void rs600_gart_fini(struct radeon_device *rdev) 521 { 522 radeon_gart_fini(rdev); 523 rs600_gart_disable(rdev); 524 radeon_gart_table_vram_free(rdev); 525 } 526 527 #define R600_PTE_VALID (1 << 0) 528 #define R600_PTE_SYSTEM (1 << 1) 529 #define R600_PTE_SNOOPED (1 << 2) 530 #define R600_PTE_READABLE (1 << 5) 531 #define R600_PTE_WRITEABLE (1 << 6) 532 533 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 534 { 535 void __iomem *ptr = (void *)rdev->gart.ptr; 536 537 if (i < 0 || i > rdev->gart.num_gpu_pages) { 538 return -EINVAL; 539 } 540 addr = addr & 0xFFFFFFFFFFFFF000ULL; 541 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 542 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 543 writeq(addr, ptr + (i * 8)); 544 return 0; 545 } 546 547 int rs600_irq_set(struct radeon_device *rdev) 548 { 549 uint32_t tmp = 0; 550 uint32_t mode_int = 0; 551 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & 552 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 553 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & 554 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 555 u32 hdmi0; 556 if (ASIC_IS_DCE2(rdev)) 557 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) & 558 ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1); 559 else 560 hdmi0 = 0; 561 562 if (!rdev->irq.installed) { 563 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 564 WREG32(R_000040_GEN_INT_CNTL, 0); 565 return -EINVAL; 566 } 567 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 568 tmp |= S_000040_SW_INT_EN(1); 569 } 570 if (rdev->irq.gui_idle) { 571 tmp |= S_000040_GUI_IDLE(1); 572 } 573 if (rdev->irq.crtc_vblank_int[0] || 574 atomic_read(&rdev->irq.pflip[0])) { 575 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 576 } 577 if (rdev->irq.crtc_vblank_int[1] || 578 atomic_read(&rdev->irq.pflip[1])) { 579 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); 580 } 581 if (rdev->irq.hpd[0]) { 582 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 583 } 584 if (rdev->irq.hpd[1]) { 585 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 586 } 587 if (rdev->irq.afmt[0]) { 588 hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1); 589 } 590 WREG32(R_000040_GEN_INT_CNTL, tmp); 591 WREG32(R_006540_DxMODE_INT_MASK, mode_int); 592 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 593 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 594 if (ASIC_IS_DCE2(rdev)) 595 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 596 return 0; 597 } 598 599 static inline u32 rs600_irq_ack(struct radeon_device *rdev) 600 { 601 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 602 uint32_t irq_mask = S_000044_SW_INT(1); 603 u32 tmp; 604 605 /* the interrupt works, but the status bit is permanently asserted */ 606 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { 607 if (!rdev->irq.gui_idle_acked) 608 irq_mask |= S_000044_GUI_IDLE_STAT(1); 609 } 610 611 if (G_000044_DISPLAY_INT_STAT(irqs)) { 612 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 613 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 614 WREG32(R_006534_D1MODE_VBLANK_STATUS, 615 S_006534_D1MODE_VBLANK_ACK(1)); 616 } 617 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 618 WREG32(R_006D34_D2MODE_VBLANK_STATUS, 619 S_006D34_D2MODE_VBLANK_ACK(1)); 620 } 621 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 622 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 623 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); 624 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 625 } 626 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 627 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 628 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); 629 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 630 } 631 } else { 632 rdev->irq.stat_regs.r500.disp_int = 0; 633 } 634 635 if (ASIC_IS_DCE2(rdev)) { 636 rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) & 637 S_007404_HDMI0_AZ_FORMAT_WTRIG(1); 638 if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) { 639 tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL); 640 tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1); 641 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp); 642 } 643 } else 644 rdev->irq.stat_regs.r500.hdmi0_status = 0; 645 646 if (irqs) { 647 WREG32(R_000044_GEN_INT_STATUS, irqs); 648 } 649 return irqs & irq_mask; 650 } 651 652 void rs600_irq_disable(struct radeon_device *rdev) 653 { 654 u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) & 655 ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1); 656 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 657 WREG32(R_000040_GEN_INT_CNTL, 0); 658 WREG32(R_006540_DxMODE_INT_MASK, 0); 659 /* Wait and acknowledge irq */ 660 mdelay(1); 661 rs600_irq_ack(rdev); 662 } 663 664 int rs600_irq_process(struct radeon_device *rdev) 665 { 666 u32 status, msi_rearm; 667 bool queue_hotplug = false; 668 bool queue_hdmi = false; 669 670 /* reset gui idle ack. the status bit is broken */ 671 rdev->irq.gui_idle_acked = false; 672 673 status = rs600_irq_ack(rdev); 674 if (!status && 675 !rdev->irq.stat_regs.r500.disp_int && 676 !rdev->irq.stat_regs.r500.hdmi0_status) { 677 return IRQ_NONE; 678 } 679 while (status || 680 rdev->irq.stat_regs.r500.disp_int || 681 rdev->irq.stat_regs.r500.hdmi0_status) { 682 /* SW interrupt */ 683 if (G_000044_SW_INT(status)) { 684 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 685 } 686 /* GUI idle */ 687 if (G_000040_GUI_IDLE(status)) { 688 rdev->irq.gui_idle_acked = true; 689 wake_up(&rdev->irq.idle_queue); 690 } 691 /* Vertical blank interrupts */ 692 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 693 if (rdev->irq.crtc_vblank_int[0]) { 694 drm_handle_vblank(rdev->ddev, 0); 695 rdev->pm.vblank_sync = true; 696 wake_up(&rdev->irq.vblank_queue); 697 } 698 if (atomic_read(&rdev->irq.pflip[0])) 699 radeon_crtc_handle_flip(rdev, 0); 700 } 701 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 702 if (rdev->irq.crtc_vblank_int[1]) { 703 drm_handle_vblank(rdev->ddev, 1); 704 rdev->pm.vblank_sync = true; 705 wake_up(&rdev->irq.vblank_queue); 706 } 707 if (atomic_read(&rdev->irq.pflip[1])) 708 radeon_crtc_handle_flip(rdev, 1); 709 } 710 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 711 queue_hotplug = true; 712 DRM_DEBUG("HPD1\n"); 713 } 714 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 715 queue_hotplug = true; 716 DRM_DEBUG("HPD2\n"); 717 } 718 if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) { 719 queue_hdmi = true; 720 DRM_DEBUG("HDMI0\n"); 721 } 722 status = rs600_irq_ack(rdev); 723 } 724 /* reset gui idle ack. the status bit is broken */ 725 rdev->irq.gui_idle_acked = false; 726 if (queue_hotplug) 727 schedule_work(&rdev->hotplug_work); 728 if (queue_hdmi) 729 schedule_work(&rdev->audio_work); 730 if (rdev->msi_enabled) { 731 switch (rdev->family) { 732 case CHIP_RS600: 733 case CHIP_RS690: 734 case CHIP_RS740: 735 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM; 736 WREG32(RADEON_BUS_CNTL, msi_rearm); 737 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); 738 break; 739 default: 740 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); 741 break; 742 } 743 } 744 return IRQ_HANDLED; 745 } 746 747 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) 748 { 749 if (crtc == 0) 750 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); 751 else 752 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); 753 } 754 755 int rs600_mc_wait_for_idle(struct radeon_device *rdev) 756 { 757 unsigned i; 758 759 for (i = 0; i < rdev->usec_timeout; i++) { 760 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) 761 return 0; 762 udelay(1); 763 } 764 return -1; 765 } 766 767 void rs600_gpu_init(struct radeon_device *rdev) 768 { 769 r420_pipes_init(rdev); 770 /* Wait for mc idle */ 771 if (rs600_mc_wait_for_idle(rdev)) 772 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 773 } 774 775 void rs600_mc_init(struct radeon_device *rdev) 776 { 777 u64 base; 778 779 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 780 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 781 rdev->mc.vram_is_ddr = true; 782 rdev->mc.vram_width = 128; 783 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 784 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 785 rdev->mc.visible_vram_size = rdev->mc.aper_size; 786 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 787 base = RREG32_MC(R_000004_MC_FB_LOCATION); 788 base = G_000004_MC_FB_START(base) << 16; 789 radeon_vram_location(rdev, &rdev->mc, base); 790 rdev->mc.gtt_base_align = 0; 791 radeon_gtt_location(rdev, &rdev->mc); 792 radeon_update_bandwidth_info(rdev); 793 } 794 795 void rs600_bandwidth_update(struct radeon_device *rdev) 796 { 797 struct drm_display_mode *mode0 = NULL; 798 struct drm_display_mode *mode1 = NULL; 799 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; 800 /* FIXME: implement full support */ 801 802 radeon_update_display_priority(rdev); 803 804 if (rdev->mode_info.crtcs[0]->base.enabled) 805 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 806 if (rdev->mode_info.crtcs[1]->base.enabled) 807 mode1 = &rdev->mode_info.crtcs[1]->base.mode; 808 809 rs690_line_buffer_adjust(rdev, mode0, mode1); 810 811 if (rdev->disp_priority == 2) { 812 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT); 813 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT); 814 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 815 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 816 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 817 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 818 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 819 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 820 } 821 } 822 823 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 824 { 825 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 826 S_000070_MC_IND_CITF_ARB0(1)); 827 return RREG32(R_000074_MC_IND_DATA); 828 } 829 830 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 831 { 832 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 833 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 834 WREG32(R_000074_MC_IND_DATA, v); 835 } 836 837 void rs600_debugfs(struct radeon_device *rdev) 838 { 839 if (r100_debugfs_rbbm_init(rdev)) 840 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 841 } 842 843 void rs600_set_safe_registers(struct radeon_device *rdev) 844 { 845 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; 846 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); 847 } 848 849 static void rs600_mc_program(struct radeon_device *rdev) 850 { 851 struct rv515_mc_save save; 852 853 /* Stops all mc clients */ 854 rv515_mc_stop(rdev, &save); 855 856 /* Wait for mc idle */ 857 if (rs600_mc_wait_for_idle(rdev)) 858 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 859 860 /* FIXME: What does AGP means for such chipset ? */ 861 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); 862 WREG32_MC(R_000006_AGP_BASE, 0); 863 WREG32_MC(R_000007_AGP_BASE_2, 0); 864 /* Program MC */ 865 WREG32_MC(R_000004_MC_FB_LOCATION, 866 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | 867 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); 868 WREG32(R_000134_HDP_FB_LOCATION, 869 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); 870 871 rv515_mc_resume(rdev, &save); 872 } 873 874 static int rs600_startup(struct radeon_device *rdev) 875 { 876 int r; 877 878 rs600_mc_program(rdev); 879 /* Resume clock */ 880 rv515_clock_startup(rdev); 881 /* Initialize GPU configuration (# pipes, ...) */ 882 rs600_gpu_init(rdev); 883 /* Initialize GART (initialize after TTM so we can allocate 884 * memory through TTM but finalize after TTM) */ 885 r = rs600_gart_enable(rdev); 886 if (r) 887 return r; 888 889 /* allocate wb buffer */ 890 r = radeon_wb_init(rdev); 891 if (r) 892 return r; 893 894 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 895 if (r) { 896 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 897 return r; 898 } 899 900 /* Enable IRQ */ 901 rs600_irq_set(rdev); 902 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 903 /* 1M ring buffer */ 904 r = r100_cp_init(rdev, 1024 * 1024); 905 if (r) { 906 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 907 return r; 908 } 909 910 r = radeon_ib_pool_init(rdev); 911 if (r) { 912 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 913 return r; 914 } 915 916 r = r600_audio_init(rdev); 917 if (r) { 918 dev_err(rdev->dev, "failed initializing audio\n"); 919 return r; 920 } 921 922 return 0; 923 } 924 925 int rs600_resume(struct radeon_device *rdev) 926 { 927 int r; 928 929 /* Make sur GART are not working */ 930 rs600_gart_disable(rdev); 931 /* Resume clock before doing reset */ 932 rv515_clock_startup(rdev); 933 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 934 if (radeon_asic_reset(rdev)) { 935 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 936 RREG32(R_000E40_RBBM_STATUS), 937 RREG32(R_0007C0_CP_STAT)); 938 } 939 /* post */ 940 atom_asic_init(rdev->mode_info.atom_context); 941 /* Resume clock after posting */ 942 rv515_clock_startup(rdev); 943 /* Initialize surface registers */ 944 radeon_surface_init(rdev); 945 946 rdev->accel_working = true; 947 r = rs600_startup(rdev); 948 if (r) { 949 rdev->accel_working = false; 950 } 951 return r; 952 } 953 954 int rs600_suspend(struct radeon_device *rdev) 955 { 956 r600_audio_fini(rdev); 957 r100_cp_disable(rdev); 958 radeon_wb_disable(rdev); 959 rs600_irq_disable(rdev); 960 rs600_gart_disable(rdev); 961 return 0; 962 } 963 964 void rs600_fini(struct radeon_device *rdev) 965 { 966 r600_audio_fini(rdev); 967 r100_cp_fini(rdev); 968 radeon_wb_fini(rdev); 969 radeon_ib_pool_fini(rdev); 970 radeon_gem_fini(rdev); 971 rs600_gart_fini(rdev); 972 radeon_irq_kms_fini(rdev); 973 radeon_fence_driver_fini(rdev); 974 radeon_bo_fini(rdev); 975 radeon_atombios_fini(rdev); 976 kfree(rdev->bios); 977 rdev->bios = NULL; 978 } 979 980 int rs600_init(struct radeon_device *rdev) 981 { 982 int r; 983 984 /* Disable VGA */ 985 rv515_vga_render_disable(rdev); 986 /* Initialize scratch registers */ 987 radeon_scratch_init(rdev); 988 /* Initialize surface registers */ 989 radeon_surface_init(rdev); 990 /* restore some register to sane defaults */ 991 r100_restore_sanity(rdev); 992 /* BIOS */ 993 if (!radeon_get_bios(rdev)) { 994 if (ASIC_IS_AVIVO(rdev)) 995 return -EINVAL; 996 } 997 if (rdev->is_atom_bios) { 998 r = radeon_atombios_init(rdev); 999 if (r) 1000 return r; 1001 } else { 1002 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); 1003 return -EINVAL; 1004 } 1005 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1006 if (radeon_asic_reset(rdev)) { 1007 dev_warn(rdev->dev, 1008 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1009 RREG32(R_000E40_RBBM_STATUS), 1010 RREG32(R_0007C0_CP_STAT)); 1011 } 1012 /* check if cards are posted or not */ 1013 if (radeon_boot_test_post_card(rdev) == false) 1014 return -EINVAL; 1015 1016 /* Initialize clocks */ 1017 radeon_get_clock_info(rdev->ddev); 1018 /* initialize memory controller */ 1019 rs600_mc_init(rdev); 1020 rs600_debugfs(rdev); 1021 /* Fence driver */ 1022 r = radeon_fence_driver_init(rdev); 1023 if (r) 1024 return r; 1025 r = radeon_irq_kms_init(rdev); 1026 if (r) 1027 return r; 1028 /* Memory manager */ 1029 r = radeon_bo_init(rdev); 1030 if (r) 1031 return r; 1032 r = rs600_gart_init(rdev); 1033 if (r) 1034 return r; 1035 rs600_set_safe_registers(rdev); 1036 1037 rdev->accel_working = true; 1038 r = rs600_startup(rdev); 1039 if (r) { 1040 /* Somethings want wront with the accel init stop accel */ 1041 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1042 r100_cp_fini(rdev); 1043 radeon_wb_fini(rdev); 1044 radeon_ib_pool_fini(rdev); 1045 rs600_gart_fini(rdev); 1046 radeon_irq_kms_fini(rdev); 1047 rdev->accel_working = false; 1048 } 1049 return 0; 1050 } 1051