1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 /* RS600 / Radeon X1250/X1270 integrated GPU 29 * 30 * This file gather function specific to RS600 which is the IGP of 31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740 32 * is the X1250/X1270 supporting AMD CPU). The display engine are 33 * the avivo one, bios is an atombios, 3D block are the one of the 34 * R4XX family. The GART is different from the RS400 one and is very 35 * close to the one of the R600 family (R600 likely being an evolution 36 * of the RS600 GART block). 37 */ 38 #include "drmP.h" 39 #include "radeon.h" 40 #include "radeon_asic.h" 41 #include "atom.h" 42 #include "rs600d.h" 43 44 #include "rs600_reg_safe.h" 45 46 void rs600_gpu_init(struct radeon_device *rdev); 47 int rs600_mc_wait_for_idle(struct radeon_device *rdev); 48 49 void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) 50 { 51 /* enable the pflip int */ 52 radeon_irq_kms_pflip_irq_get(rdev, crtc); 53 } 54 55 void rs600_post_page_flip(struct radeon_device *rdev, int crtc) 56 { 57 /* disable the pflip int */ 58 radeon_irq_kms_pflip_irq_put(rdev, crtc); 59 } 60 61 u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 62 { 63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 65 int i; 66 67 /* Lock the graphics update lock */ 68 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 69 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 70 71 /* update the scanout addresses */ 72 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 73 (u32)crtc_base); 74 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 75 (u32)crtc_base); 76 77 /* Wait for update_pending to go high. */ 78 for (i = 0; i < rdev->usec_timeout; i++) { 79 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) 80 break; 81 udelay(1); 82 } 83 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 84 85 /* Unlock the lock, so double-buffering can take place inside vblank */ 86 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 87 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 88 89 /* Return current update_pending status: */ 90 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 91 } 92 93 void rs600_pm_misc(struct radeon_device *rdev) 94 { 95 int requested_index = rdev->pm.requested_power_state_index; 96 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 97 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 98 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; 99 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; 100 101 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 102 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 103 tmp = RREG32(voltage->gpio.reg); 104 if (voltage->active_high) 105 tmp |= voltage->gpio.mask; 106 else 107 tmp &= ~(voltage->gpio.mask); 108 WREG32(voltage->gpio.reg, tmp); 109 if (voltage->delay) 110 udelay(voltage->delay); 111 } else { 112 tmp = RREG32(voltage->gpio.reg); 113 if (voltage->active_high) 114 tmp &= ~voltage->gpio.mask; 115 else 116 tmp |= voltage->gpio.mask; 117 WREG32(voltage->gpio.reg, tmp); 118 if (voltage->delay) 119 udelay(voltage->delay); 120 } 121 } else if (voltage->type == VOLTAGE_VDDC) 122 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); 123 124 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); 125 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); 126 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); 127 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 128 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { 129 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); 130 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); 131 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { 132 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); 133 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); 134 } 135 } else { 136 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); 137 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); 138 } 139 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); 140 141 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); 142 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 143 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; 144 if (voltage->delay) { 145 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; 146 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); 147 } else 148 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; 149 } else 150 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; 151 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); 152 153 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); 154 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 155 hdp_dyn_cntl &= ~HDP_FORCEON; 156 else 157 hdp_dyn_cntl |= HDP_FORCEON; 158 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); 159 #if 0 160 /* mc_host_dyn seems to cause hangs from time to time */ 161 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); 162 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) 163 mc_host_dyn_cntl &= ~MC_HOST_FORCEON; 164 else 165 mc_host_dyn_cntl |= MC_HOST_FORCEON; 166 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); 167 #endif 168 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); 169 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) 170 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; 171 else 172 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; 173 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); 174 175 /* set pcie lanes */ 176 if ((rdev->flags & RADEON_IS_PCIE) && 177 !(rdev->flags & RADEON_IS_IGP) && 178 rdev->asic->set_pcie_lanes && 179 (ps->pcie_lanes != 180 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 181 radeon_set_pcie_lanes(rdev, 182 ps->pcie_lanes); 183 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); 184 } 185 } 186 187 void rs600_pm_prepare(struct radeon_device *rdev) 188 { 189 struct drm_device *ddev = rdev->ddev; 190 struct drm_crtc *crtc; 191 struct radeon_crtc *radeon_crtc; 192 u32 tmp; 193 194 /* disable any active CRTCs */ 195 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 196 radeon_crtc = to_radeon_crtc(crtc); 197 if (radeon_crtc->enabled) { 198 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 199 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 200 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 201 } 202 } 203 } 204 205 void rs600_pm_finish(struct radeon_device *rdev) 206 { 207 struct drm_device *ddev = rdev->ddev; 208 struct drm_crtc *crtc; 209 struct radeon_crtc *radeon_crtc; 210 u32 tmp; 211 212 /* enable any active CRTCs */ 213 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 214 radeon_crtc = to_radeon_crtc(crtc); 215 if (radeon_crtc->enabled) { 216 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 217 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 218 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 219 } 220 } 221 } 222 223 /* hpd for digital panel detect/disconnect */ 224 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 225 { 226 u32 tmp; 227 bool connected = false; 228 229 switch (hpd) { 230 case RADEON_HPD_1: 231 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); 232 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) 233 connected = true; 234 break; 235 case RADEON_HPD_2: 236 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); 237 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) 238 connected = true; 239 break; 240 default: 241 break; 242 } 243 return connected; 244 } 245 246 void rs600_hpd_set_polarity(struct radeon_device *rdev, 247 enum radeon_hpd_id hpd) 248 { 249 u32 tmp; 250 bool connected = rs600_hpd_sense(rdev, hpd); 251 252 switch (hpd) { 253 case RADEON_HPD_1: 254 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 255 if (connected) 256 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 257 else 258 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 259 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 260 break; 261 case RADEON_HPD_2: 262 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 263 if (connected) 264 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 265 else 266 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 267 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 268 break; 269 default: 270 break; 271 } 272 } 273 274 void rs600_hpd_init(struct radeon_device *rdev) 275 { 276 struct drm_device *dev = rdev->ddev; 277 struct drm_connector *connector; 278 279 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 280 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 281 switch (radeon_connector->hpd.hpd) { 282 case RADEON_HPD_1: 283 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 284 S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); 285 rdev->irq.hpd[0] = true; 286 break; 287 case RADEON_HPD_2: 288 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 289 S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); 290 rdev->irq.hpd[1] = true; 291 break; 292 default: 293 break; 294 } 295 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 296 } 297 if (rdev->irq.installed) 298 rs600_irq_set(rdev); 299 } 300 301 void rs600_hpd_fini(struct radeon_device *rdev) 302 { 303 struct drm_device *dev = rdev->ddev; 304 struct drm_connector *connector; 305 306 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 307 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 308 switch (radeon_connector->hpd.hpd) { 309 case RADEON_HPD_1: 310 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 311 S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); 312 rdev->irq.hpd[0] = false; 313 break; 314 case RADEON_HPD_2: 315 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 316 S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); 317 rdev->irq.hpd[1] = false; 318 break; 319 default: 320 break; 321 } 322 } 323 } 324 325 void rs600_bm_disable(struct radeon_device *rdev) 326 { 327 u32 tmp; 328 329 /* disable bus mastering */ 330 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); 331 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); 332 mdelay(1); 333 } 334 335 int rs600_asic_reset(struct radeon_device *rdev) 336 { 337 struct rv515_mc_save save; 338 u32 status, tmp; 339 int ret = 0; 340 341 status = RREG32(R_000E40_RBBM_STATUS); 342 if (!G_000E40_GUI_ACTIVE(status)) { 343 return 0; 344 } 345 /* Stops all mc clients */ 346 rv515_mc_stop(rdev, &save); 347 status = RREG32(R_000E40_RBBM_STATUS); 348 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 349 /* stop CP */ 350 WREG32(RADEON_CP_CSQ_CNTL, 0); 351 tmp = RREG32(RADEON_CP_RB_CNTL); 352 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 353 WREG32(RADEON_CP_RB_RPTR_WR, 0); 354 WREG32(RADEON_CP_RB_WPTR, 0); 355 WREG32(RADEON_CP_RB_CNTL, tmp); 356 pci_save_state(rdev->pdev); 357 /* disable bus mastering */ 358 rs600_bm_disable(rdev); 359 /* reset GA+VAP */ 360 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 361 S_0000F0_SOFT_RESET_GA(1)); 362 RREG32(R_0000F0_RBBM_SOFT_RESET); 363 mdelay(500); 364 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 365 mdelay(1); 366 status = RREG32(R_000E40_RBBM_STATUS); 367 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 368 /* reset CP */ 369 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 370 RREG32(R_0000F0_RBBM_SOFT_RESET); 371 mdelay(500); 372 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 373 mdelay(1); 374 status = RREG32(R_000E40_RBBM_STATUS); 375 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 376 /* reset MC */ 377 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); 378 RREG32(R_0000F0_RBBM_SOFT_RESET); 379 mdelay(500); 380 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 381 mdelay(1); 382 status = RREG32(R_000E40_RBBM_STATUS); 383 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 384 /* restore PCI & busmastering */ 385 pci_restore_state(rdev->pdev); 386 /* Check if GPU is idle */ 387 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 388 dev_err(rdev->dev, "failed to reset GPU\n"); 389 rdev->gpu_lockup = true; 390 ret = -1; 391 } else 392 dev_info(rdev->dev, "GPU reset succeed\n"); 393 rv515_mc_resume(rdev, &save); 394 return ret; 395 } 396 397 /* 398 * GART. 399 */ 400 void rs600_gart_tlb_flush(struct radeon_device *rdev) 401 { 402 uint32_t tmp; 403 404 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 405 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 406 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 407 408 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 409 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); 410 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 411 412 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 413 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 414 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 415 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 416 } 417 418 int rs600_gart_init(struct radeon_device *rdev) 419 { 420 int r; 421 422 if (rdev->gart.robj) { 423 WARN(1, "RS600 GART already initialized\n"); 424 return 0; 425 } 426 /* Initialize common gart structure */ 427 r = radeon_gart_init(rdev); 428 if (r) { 429 return r; 430 } 431 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 432 return radeon_gart_table_vram_alloc(rdev); 433 } 434 435 static int rs600_gart_enable(struct radeon_device *rdev) 436 { 437 u32 tmp; 438 int r, i; 439 440 if (rdev->gart.robj == NULL) { 441 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 442 return -EINVAL; 443 } 444 r = radeon_gart_table_vram_pin(rdev); 445 if (r) 446 return r; 447 radeon_gart_restore(rdev); 448 /* Enable bus master */ 449 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 450 WREG32(RADEON_BUS_CNTL, tmp); 451 /* FIXME: setup default page */ 452 WREG32_MC(R_000100_MC_PT0_CNTL, 453 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | 454 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); 455 456 for (i = 0; i < 19; i++) { 457 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, 458 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | 459 S_00016C_SYSTEM_ACCESS_MODE_MASK( 460 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | 461 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( 462 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | 463 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | 464 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | 465 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); 466 } 467 /* enable first context */ 468 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, 469 S_000102_ENABLE_PAGE_TABLE(1) | 470 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); 471 472 /* disable all other contexts */ 473 for (i = 1; i < 8; i++) 474 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); 475 476 /* setup the page table */ 477 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 478 rdev->gart.table_addr); 479 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); 480 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); 481 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 482 483 /* System context maps to VRAM space */ 484 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); 485 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); 486 487 /* enable page tables */ 488 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 489 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); 490 tmp = RREG32_MC(R_000009_MC_CNTL1); 491 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); 492 rs600_gart_tlb_flush(rdev); 493 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 494 (unsigned)(rdev->mc.gtt_size >> 20), 495 (unsigned long long)rdev->gart.table_addr); 496 rdev->gart.ready = true; 497 return 0; 498 } 499 500 void rs600_gart_disable(struct radeon_device *rdev) 501 { 502 u32 tmp; 503 504 /* FIXME: disable out of gart access */ 505 WREG32_MC(R_000100_MC_PT0_CNTL, 0); 506 tmp = RREG32_MC(R_000009_MC_CNTL1); 507 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); 508 radeon_gart_table_vram_unpin(rdev); 509 } 510 511 void rs600_gart_fini(struct radeon_device *rdev) 512 { 513 radeon_gart_fini(rdev); 514 rs600_gart_disable(rdev); 515 radeon_gart_table_vram_free(rdev); 516 } 517 518 #define R600_PTE_VALID (1 << 0) 519 #define R600_PTE_SYSTEM (1 << 1) 520 #define R600_PTE_SNOOPED (1 << 2) 521 #define R600_PTE_READABLE (1 << 5) 522 #define R600_PTE_WRITEABLE (1 << 6) 523 524 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 525 { 526 void __iomem *ptr = (void *)rdev->gart.ptr; 527 528 if (i < 0 || i > rdev->gart.num_gpu_pages) { 529 return -EINVAL; 530 } 531 addr = addr & 0xFFFFFFFFFFFFF000ULL; 532 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 533 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 534 writeq(addr, ptr + (i * 8)); 535 return 0; 536 } 537 538 int rs600_irq_set(struct radeon_device *rdev) 539 { 540 uint32_t tmp = 0; 541 uint32_t mode_int = 0; 542 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & 543 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 544 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & 545 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 546 547 if (!rdev->irq.installed) { 548 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 549 WREG32(R_000040_GEN_INT_CNTL, 0); 550 return -EINVAL; 551 } 552 if (rdev->irq.sw_int) { 553 tmp |= S_000040_SW_INT_EN(1); 554 } 555 if (rdev->irq.gui_idle) { 556 tmp |= S_000040_GUI_IDLE(1); 557 } 558 if (rdev->irq.crtc_vblank_int[0] || 559 rdev->irq.pflip[0]) { 560 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 561 } 562 if (rdev->irq.crtc_vblank_int[1] || 563 rdev->irq.pflip[1]) { 564 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); 565 } 566 if (rdev->irq.hpd[0]) { 567 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 568 } 569 if (rdev->irq.hpd[1]) { 570 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 571 } 572 WREG32(R_000040_GEN_INT_CNTL, tmp); 573 WREG32(R_006540_DxMODE_INT_MASK, mode_int); 574 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 575 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 576 return 0; 577 } 578 579 static inline u32 rs600_irq_ack(struct radeon_device *rdev) 580 { 581 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 582 uint32_t irq_mask = S_000044_SW_INT(1); 583 u32 tmp; 584 585 /* the interrupt works, but the status bit is permanently asserted */ 586 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { 587 if (!rdev->irq.gui_idle_acked) 588 irq_mask |= S_000044_GUI_IDLE_STAT(1); 589 } 590 591 if (G_000044_DISPLAY_INT_STAT(irqs)) { 592 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 593 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 594 WREG32(R_006534_D1MODE_VBLANK_STATUS, 595 S_006534_D1MODE_VBLANK_ACK(1)); 596 } 597 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 598 WREG32(R_006D34_D2MODE_VBLANK_STATUS, 599 S_006D34_D2MODE_VBLANK_ACK(1)); 600 } 601 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 602 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 603 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); 604 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 605 } 606 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 607 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 608 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); 609 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 610 } 611 } else { 612 rdev->irq.stat_regs.r500.disp_int = 0; 613 } 614 615 if (irqs) { 616 WREG32(R_000044_GEN_INT_STATUS, irqs); 617 } 618 return irqs & irq_mask; 619 } 620 621 void rs600_irq_disable(struct radeon_device *rdev) 622 { 623 WREG32(R_000040_GEN_INT_CNTL, 0); 624 WREG32(R_006540_DxMODE_INT_MASK, 0); 625 /* Wait and acknowledge irq */ 626 mdelay(1); 627 rs600_irq_ack(rdev); 628 } 629 630 int rs600_irq_process(struct radeon_device *rdev) 631 { 632 u32 status, msi_rearm; 633 bool queue_hotplug = false; 634 635 /* reset gui idle ack. the status bit is broken */ 636 rdev->irq.gui_idle_acked = false; 637 638 status = rs600_irq_ack(rdev); 639 if (!status && !rdev->irq.stat_regs.r500.disp_int) { 640 return IRQ_NONE; 641 } 642 while (status || rdev->irq.stat_regs.r500.disp_int) { 643 /* SW interrupt */ 644 if (G_000044_SW_INT(status)) { 645 radeon_fence_process(rdev); 646 } 647 /* GUI idle */ 648 if (G_000040_GUI_IDLE(status)) { 649 rdev->irq.gui_idle_acked = true; 650 rdev->pm.gui_idle = true; 651 wake_up(&rdev->irq.idle_queue); 652 } 653 /* Vertical blank interrupts */ 654 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 655 if (rdev->irq.crtc_vblank_int[0]) { 656 drm_handle_vblank(rdev->ddev, 0); 657 rdev->pm.vblank_sync = true; 658 wake_up(&rdev->irq.vblank_queue); 659 } 660 if (rdev->irq.pflip[0]) 661 radeon_crtc_handle_flip(rdev, 0); 662 } 663 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 664 if (rdev->irq.crtc_vblank_int[1]) { 665 drm_handle_vblank(rdev->ddev, 1); 666 rdev->pm.vblank_sync = true; 667 wake_up(&rdev->irq.vblank_queue); 668 } 669 if (rdev->irq.pflip[1]) 670 radeon_crtc_handle_flip(rdev, 1); 671 } 672 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 673 queue_hotplug = true; 674 DRM_DEBUG("HPD1\n"); 675 } 676 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 677 queue_hotplug = true; 678 DRM_DEBUG("HPD2\n"); 679 } 680 status = rs600_irq_ack(rdev); 681 } 682 /* reset gui idle ack. the status bit is broken */ 683 rdev->irq.gui_idle_acked = false; 684 if (queue_hotplug) 685 schedule_work(&rdev->hotplug_work); 686 if (rdev->msi_enabled) { 687 switch (rdev->family) { 688 case CHIP_RS600: 689 case CHIP_RS690: 690 case CHIP_RS740: 691 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM; 692 WREG32(RADEON_BUS_CNTL, msi_rearm); 693 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); 694 break; 695 default: 696 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; 697 WREG32(RADEON_MSI_REARM_EN, msi_rearm); 698 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); 699 break; 700 } 701 } 702 return IRQ_HANDLED; 703 } 704 705 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) 706 { 707 if (crtc == 0) 708 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); 709 else 710 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); 711 } 712 713 int rs600_mc_wait_for_idle(struct radeon_device *rdev) 714 { 715 unsigned i; 716 717 for (i = 0; i < rdev->usec_timeout; i++) { 718 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) 719 return 0; 720 udelay(1); 721 } 722 return -1; 723 } 724 725 void rs600_gpu_init(struct radeon_device *rdev) 726 { 727 r420_pipes_init(rdev); 728 /* Wait for mc idle */ 729 if (rs600_mc_wait_for_idle(rdev)) 730 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 731 } 732 733 void rs600_mc_init(struct radeon_device *rdev) 734 { 735 u64 base; 736 737 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 738 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 739 rdev->mc.vram_is_ddr = true; 740 rdev->mc.vram_width = 128; 741 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 742 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 743 rdev->mc.visible_vram_size = rdev->mc.aper_size; 744 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 745 base = RREG32_MC(R_000004_MC_FB_LOCATION); 746 base = G_000004_MC_FB_START(base) << 16; 747 radeon_vram_location(rdev, &rdev->mc, base); 748 rdev->mc.gtt_base_align = 0; 749 radeon_gtt_location(rdev, &rdev->mc); 750 radeon_update_bandwidth_info(rdev); 751 } 752 753 void rs600_bandwidth_update(struct radeon_device *rdev) 754 { 755 struct drm_display_mode *mode0 = NULL; 756 struct drm_display_mode *mode1 = NULL; 757 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; 758 /* FIXME: implement full support */ 759 760 radeon_update_display_priority(rdev); 761 762 if (rdev->mode_info.crtcs[0]->base.enabled) 763 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 764 if (rdev->mode_info.crtcs[1]->base.enabled) 765 mode1 = &rdev->mode_info.crtcs[1]->base.mode; 766 767 rs690_line_buffer_adjust(rdev, mode0, mode1); 768 769 if (rdev->disp_priority == 2) { 770 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT); 771 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT); 772 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 773 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 774 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 775 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 776 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 777 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 778 } 779 } 780 781 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 782 { 783 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 784 S_000070_MC_IND_CITF_ARB0(1)); 785 return RREG32(R_000074_MC_IND_DATA); 786 } 787 788 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 789 { 790 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 791 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 792 WREG32(R_000074_MC_IND_DATA, v); 793 } 794 795 void rs600_debugfs(struct radeon_device *rdev) 796 { 797 if (r100_debugfs_rbbm_init(rdev)) 798 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 799 } 800 801 void rs600_set_safe_registers(struct radeon_device *rdev) 802 { 803 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; 804 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); 805 } 806 807 static void rs600_mc_program(struct radeon_device *rdev) 808 { 809 struct rv515_mc_save save; 810 811 /* Stops all mc clients */ 812 rv515_mc_stop(rdev, &save); 813 814 /* Wait for mc idle */ 815 if (rs600_mc_wait_for_idle(rdev)) 816 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 817 818 /* FIXME: What does AGP means for such chipset ? */ 819 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); 820 WREG32_MC(R_000006_AGP_BASE, 0); 821 WREG32_MC(R_000007_AGP_BASE_2, 0); 822 /* Program MC */ 823 WREG32_MC(R_000004_MC_FB_LOCATION, 824 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | 825 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); 826 WREG32(R_000134_HDP_FB_LOCATION, 827 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); 828 829 rv515_mc_resume(rdev, &save); 830 } 831 832 static int rs600_startup(struct radeon_device *rdev) 833 { 834 int r; 835 836 rs600_mc_program(rdev); 837 /* Resume clock */ 838 rv515_clock_startup(rdev); 839 /* Initialize GPU configuration (# pipes, ...) */ 840 rs600_gpu_init(rdev); 841 /* Initialize GART (initialize after TTM so we can allocate 842 * memory through TTM but finalize after TTM) */ 843 r = rs600_gart_enable(rdev); 844 if (r) 845 return r; 846 847 /* allocate wb buffer */ 848 r = radeon_wb_init(rdev); 849 if (r) 850 return r; 851 852 /* Enable IRQ */ 853 rs600_irq_set(rdev); 854 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 855 /* 1M ring buffer */ 856 r = r100_cp_init(rdev, 1024 * 1024); 857 if (r) { 858 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 859 return r; 860 } 861 r = r100_ib_init(rdev); 862 if (r) { 863 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 864 return r; 865 } 866 867 r = r600_audio_init(rdev); 868 if (r) { 869 dev_err(rdev->dev, "failed initializing audio\n"); 870 return r; 871 } 872 873 return 0; 874 } 875 876 int rs600_resume(struct radeon_device *rdev) 877 { 878 /* Make sur GART are not working */ 879 rs600_gart_disable(rdev); 880 /* Resume clock before doing reset */ 881 rv515_clock_startup(rdev); 882 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 883 if (radeon_asic_reset(rdev)) { 884 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 885 RREG32(R_000E40_RBBM_STATUS), 886 RREG32(R_0007C0_CP_STAT)); 887 } 888 /* post */ 889 atom_asic_init(rdev->mode_info.atom_context); 890 /* Resume clock after posting */ 891 rv515_clock_startup(rdev); 892 /* Initialize surface registers */ 893 radeon_surface_init(rdev); 894 return rs600_startup(rdev); 895 } 896 897 int rs600_suspend(struct radeon_device *rdev) 898 { 899 r600_audio_fini(rdev); 900 r100_cp_disable(rdev); 901 radeon_wb_disable(rdev); 902 rs600_irq_disable(rdev); 903 rs600_gart_disable(rdev); 904 return 0; 905 } 906 907 void rs600_fini(struct radeon_device *rdev) 908 { 909 r600_audio_fini(rdev); 910 r100_cp_fini(rdev); 911 radeon_wb_fini(rdev); 912 r100_ib_fini(rdev); 913 radeon_gem_fini(rdev); 914 rs600_gart_fini(rdev); 915 radeon_irq_kms_fini(rdev); 916 radeon_fence_driver_fini(rdev); 917 radeon_bo_fini(rdev); 918 radeon_atombios_fini(rdev); 919 kfree(rdev->bios); 920 rdev->bios = NULL; 921 } 922 923 int rs600_init(struct radeon_device *rdev) 924 { 925 int r; 926 927 /* Disable VGA */ 928 rv515_vga_render_disable(rdev); 929 /* Initialize scratch registers */ 930 radeon_scratch_init(rdev); 931 /* Initialize surface registers */ 932 radeon_surface_init(rdev); 933 /* restore some register to sane defaults */ 934 r100_restore_sanity(rdev); 935 /* BIOS */ 936 if (!radeon_get_bios(rdev)) { 937 if (ASIC_IS_AVIVO(rdev)) 938 return -EINVAL; 939 } 940 if (rdev->is_atom_bios) { 941 r = radeon_atombios_init(rdev); 942 if (r) 943 return r; 944 } else { 945 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); 946 return -EINVAL; 947 } 948 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 949 if (radeon_asic_reset(rdev)) { 950 dev_warn(rdev->dev, 951 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 952 RREG32(R_000E40_RBBM_STATUS), 953 RREG32(R_0007C0_CP_STAT)); 954 } 955 /* check if cards are posted or not */ 956 if (radeon_boot_test_post_card(rdev) == false) 957 return -EINVAL; 958 959 /* Initialize clocks */ 960 radeon_get_clock_info(rdev->ddev); 961 /* initialize memory controller */ 962 rs600_mc_init(rdev); 963 rs600_debugfs(rdev); 964 /* Fence driver */ 965 r = radeon_fence_driver_init(rdev); 966 if (r) 967 return r; 968 r = radeon_irq_kms_init(rdev); 969 if (r) 970 return r; 971 /* Memory manager */ 972 r = radeon_bo_init(rdev); 973 if (r) 974 return r; 975 r = rs600_gart_init(rdev); 976 if (r) 977 return r; 978 rs600_set_safe_registers(rdev); 979 rdev->accel_working = true; 980 r = rs600_startup(rdev); 981 if (r) { 982 /* Somethings want wront with the accel init stop accel */ 983 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 984 r100_cp_fini(rdev); 985 radeon_wb_fini(rdev); 986 r100_ib_fini(rdev); 987 rs600_gart_fini(rdev); 988 radeon_irq_kms_fini(rdev); 989 rdev->accel_working = false; 990 } 991 return 0; 992 } 993