1 /* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <linux/platform_device.h> 26 #include <linux/slab.h> 27 #include "drmP.h" 28 #include "radeon.h" 29 #include "radeon_asic.h" 30 #include "radeon_drm.h" 31 #include "evergreend.h" 32 #include "atom.h" 33 #include "avivod.h" 34 #include "evergreen_reg.h" 35 #include "evergreen_blit_shaders.h" 36 37 #define EVERGREEN_PFP_UCODE_SIZE 1120 38 #define EVERGREEN_PM4_UCODE_SIZE 1376 39 40 static void evergreen_gpu_init(struct radeon_device *rdev); 41 void evergreen_fini(struct radeon_device *rdev); 42 void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 43 44 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) 45 { 46 u16 ctl, v; 47 int cap, err; 48 49 cap = pci_pcie_cap(rdev->pdev); 50 if (!cap) 51 return; 52 53 err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl); 54 if (err) 55 return; 56 57 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; 58 59 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it 60 * to avoid hangs or perfomance issues 61 */ 62 if ((v == 0) || (v == 6) || (v == 7)) { 63 ctl &= ~PCI_EXP_DEVCTL_READRQ; 64 ctl |= (2 << 12); 65 pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl); 66 } 67 } 68 69 void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) 70 { 71 /* enable the pflip int */ 72 radeon_irq_kms_pflip_irq_get(rdev, crtc); 73 } 74 75 void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) 76 { 77 /* disable the pflip int */ 78 radeon_irq_kms_pflip_irq_put(rdev, crtc); 79 } 80 81 u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 82 { 83 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 84 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); 85 int i; 86 87 /* Lock the graphics update lock */ 88 tmp |= EVERGREEN_GRPH_UPDATE_LOCK; 89 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 90 91 /* update the scanout addresses */ 92 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 93 upper_32_bits(crtc_base)); 94 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 95 (u32)crtc_base); 96 97 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 98 upper_32_bits(crtc_base)); 99 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 100 (u32)crtc_base); 101 102 /* Wait for update_pending to go high. */ 103 for (i = 0; i < rdev->usec_timeout; i++) { 104 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) 105 break; 106 udelay(1); 107 } 108 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 109 110 /* Unlock the lock, so double-buffering can take place inside vblank */ 111 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; 112 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 113 114 /* Return current update_pending status: */ 115 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; 116 } 117 118 /* get temperature in millidegrees */ 119 int evergreen_get_temp(struct radeon_device *rdev) 120 { 121 u32 temp, toffset; 122 int actual_temp = 0; 123 124 if (rdev->family == CHIP_JUNIPER) { 125 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >> 126 TOFFSET_SHIFT; 127 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >> 128 TS0_ADC_DOUT_SHIFT; 129 130 if (toffset & 0x100) 131 actual_temp = temp / 2 - (0x200 - toffset); 132 else 133 actual_temp = temp / 2 + toffset; 134 135 actual_temp = actual_temp * 1000; 136 137 } else { 138 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 139 ASIC_T_SHIFT; 140 141 if (temp & 0x400) 142 actual_temp = -256; 143 else if (temp & 0x200) 144 actual_temp = 255; 145 else if (temp & 0x100) { 146 actual_temp = temp & 0x1ff; 147 actual_temp |= ~0x1ff; 148 } else 149 actual_temp = temp & 0xff; 150 151 actual_temp = (actual_temp * 1000) / 2; 152 } 153 154 return actual_temp; 155 } 156 157 int sumo_get_temp(struct radeon_device *rdev) 158 { 159 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; 160 int actual_temp = temp - 49; 161 162 return actual_temp * 1000; 163 } 164 165 void sumo_pm_init_profile(struct radeon_device *rdev) 166 { 167 int idx; 168 169 /* default */ 170 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 171 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 172 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 173 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 174 175 /* low,mid sh/mh */ 176 if (rdev->flags & RADEON_IS_MOBILITY) 177 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 178 else 179 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 180 181 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; 182 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; 183 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 184 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 185 186 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; 187 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; 188 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 189 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 190 191 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; 192 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; 193 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 194 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 195 196 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; 197 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; 198 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 199 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 200 201 /* high sh/mh */ 202 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 203 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; 204 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; 205 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 206 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 207 rdev->pm.power_state[idx].num_clock_modes - 1; 208 209 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; 210 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; 211 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 212 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 213 rdev->pm.power_state[idx].num_clock_modes - 1; 214 } 215 216 void evergreen_pm_misc(struct radeon_device *rdev) 217 { 218 int req_ps_idx = rdev->pm.requested_power_state_index; 219 int req_cm_idx = rdev->pm.requested_clock_mode_index; 220 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; 221 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 222 223 if (voltage->type == VOLTAGE_SW) { 224 /* 0xff01 is a flag rather then an actual voltage */ 225 if (voltage->voltage == 0xff01) 226 return; 227 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { 228 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 229 rdev->pm.current_vddc = voltage->voltage; 230 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); 231 } 232 /* 0xff01 is a flag rather then an actual voltage */ 233 if (voltage->vddci == 0xff01) 234 return; 235 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { 236 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); 237 rdev->pm.current_vddci = voltage->vddci; 238 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci); 239 } 240 } 241 } 242 243 void evergreen_pm_prepare(struct radeon_device *rdev) 244 { 245 struct drm_device *ddev = rdev->ddev; 246 struct drm_crtc *crtc; 247 struct radeon_crtc *radeon_crtc; 248 u32 tmp; 249 250 /* disable any active CRTCs */ 251 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 252 radeon_crtc = to_radeon_crtc(crtc); 253 if (radeon_crtc->enabled) { 254 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); 255 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 256 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 257 } 258 } 259 } 260 261 void evergreen_pm_finish(struct radeon_device *rdev) 262 { 263 struct drm_device *ddev = rdev->ddev; 264 struct drm_crtc *crtc; 265 struct radeon_crtc *radeon_crtc; 266 u32 tmp; 267 268 /* enable any active CRTCs */ 269 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 270 radeon_crtc = to_radeon_crtc(crtc); 271 if (radeon_crtc->enabled) { 272 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); 273 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 274 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 275 } 276 } 277 } 278 279 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 280 { 281 bool connected = false; 282 283 switch (hpd) { 284 case RADEON_HPD_1: 285 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) 286 connected = true; 287 break; 288 case RADEON_HPD_2: 289 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) 290 connected = true; 291 break; 292 case RADEON_HPD_3: 293 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) 294 connected = true; 295 break; 296 case RADEON_HPD_4: 297 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) 298 connected = true; 299 break; 300 case RADEON_HPD_5: 301 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) 302 connected = true; 303 break; 304 case RADEON_HPD_6: 305 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) 306 connected = true; 307 break; 308 default: 309 break; 310 } 311 312 return connected; 313 } 314 315 void evergreen_hpd_set_polarity(struct radeon_device *rdev, 316 enum radeon_hpd_id hpd) 317 { 318 u32 tmp; 319 bool connected = evergreen_hpd_sense(rdev, hpd); 320 321 switch (hpd) { 322 case RADEON_HPD_1: 323 tmp = RREG32(DC_HPD1_INT_CONTROL); 324 if (connected) 325 tmp &= ~DC_HPDx_INT_POLARITY; 326 else 327 tmp |= DC_HPDx_INT_POLARITY; 328 WREG32(DC_HPD1_INT_CONTROL, tmp); 329 break; 330 case RADEON_HPD_2: 331 tmp = RREG32(DC_HPD2_INT_CONTROL); 332 if (connected) 333 tmp &= ~DC_HPDx_INT_POLARITY; 334 else 335 tmp |= DC_HPDx_INT_POLARITY; 336 WREG32(DC_HPD2_INT_CONTROL, tmp); 337 break; 338 case RADEON_HPD_3: 339 tmp = RREG32(DC_HPD3_INT_CONTROL); 340 if (connected) 341 tmp &= ~DC_HPDx_INT_POLARITY; 342 else 343 tmp |= DC_HPDx_INT_POLARITY; 344 WREG32(DC_HPD3_INT_CONTROL, tmp); 345 break; 346 case RADEON_HPD_4: 347 tmp = RREG32(DC_HPD4_INT_CONTROL); 348 if (connected) 349 tmp &= ~DC_HPDx_INT_POLARITY; 350 else 351 tmp |= DC_HPDx_INT_POLARITY; 352 WREG32(DC_HPD4_INT_CONTROL, tmp); 353 break; 354 case RADEON_HPD_5: 355 tmp = RREG32(DC_HPD5_INT_CONTROL); 356 if (connected) 357 tmp &= ~DC_HPDx_INT_POLARITY; 358 else 359 tmp |= DC_HPDx_INT_POLARITY; 360 WREG32(DC_HPD5_INT_CONTROL, tmp); 361 break; 362 case RADEON_HPD_6: 363 tmp = RREG32(DC_HPD6_INT_CONTROL); 364 if (connected) 365 tmp &= ~DC_HPDx_INT_POLARITY; 366 else 367 tmp |= DC_HPDx_INT_POLARITY; 368 WREG32(DC_HPD6_INT_CONTROL, tmp); 369 break; 370 default: 371 break; 372 } 373 } 374 375 void evergreen_hpd_init(struct radeon_device *rdev) 376 { 377 struct drm_device *dev = rdev->ddev; 378 struct drm_connector *connector; 379 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | 380 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; 381 382 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 383 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 384 switch (radeon_connector->hpd.hpd) { 385 case RADEON_HPD_1: 386 WREG32(DC_HPD1_CONTROL, tmp); 387 rdev->irq.hpd[0] = true; 388 break; 389 case RADEON_HPD_2: 390 WREG32(DC_HPD2_CONTROL, tmp); 391 rdev->irq.hpd[1] = true; 392 break; 393 case RADEON_HPD_3: 394 WREG32(DC_HPD3_CONTROL, tmp); 395 rdev->irq.hpd[2] = true; 396 break; 397 case RADEON_HPD_4: 398 WREG32(DC_HPD4_CONTROL, tmp); 399 rdev->irq.hpd[3] = true; 400 break; 401 case RADEON_HPD_5: 402 WREG32(DC_HPD5_CONTROL, tmp); 403 rdev->irq.hpd[4] = true; 404 break; 405 case RADEON_HPD_6: 406 WREG32(DC_HPD6_CONTROL, tmp); 407 rdev->irq.hpd[5] = true; 408 break; 409 default: 410 break; 411 } 412 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 413 } 414 if (rdev->irq.installed) 415 evergreen_irq_set(rdev); 416 } 417 418 void evergreen_hpd_fini(struct radeon_device *rdev) 419 { 420 struct drm_device *dev = rdev->ddev; 421 struct drm_connector *connector; 422 423 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 424 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 425 switch (radeon_connector->hpd.hpd) { 426 case RADEON_HPD_1: 427 WREG32(DC_HPD1_CONTROL, 0); 428 rdev->irq.hpd[0] = false; 429 break; 430 case RADEON_HPD_2: 431 WREG32(DC_HPD2_CONTROL, 0); 432 rdev->irq.hpd[1] = false; 433 break; 434 case RADEON_HPD_3: 435 WREG32(DC_HPD3_CONTROL, 0); 436 rdev->irq.hpd[2] = false; 437 break; 438 case RADEON_HPD_4: 439 WREG32(DC_HPD4_CONTROL, 0); 440 rdev->irq.hpd[3] = false; 441 break; 442 case RADEON_HPD_5: 443 WREG32(DC_HPD5_CONTROL, 0); 444 rdev->irq.hpd[4] = false; 445 break; 446 case RADEON_HPD_6: 447 WREG32(DC_HPD6_CONTROL, 0); 448 rdev->irq.hpd[5] = false; 449 break; 450 default: 451 break; 452 } 453 } 454 } 455 456 /* watermark setup */ 457 458 static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, 459 struct radeon_crtc *radeon_crtc, 460 struct drm_display_mode *mode, 461 struct drm_display_mode *other_mode) 462 { 463 u32 tmp; 464 /* 465 * Line Buffer Setup 466 * There are 3 line buffers, each one shared by 2 display controllers. 467 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between 468 * the display controllers. The paritioning is done via one of four 469 * preset allocations specified in bits 2:0: 470 * first display controller 471 * 0 - first half of lb (3840 * 2) 472 * 1 - first 3/4 of lb (5760 * 2) 473 * 2 - whole lb (7680 * 2), other crtc must be disabled 474 * 3 - first 1/4 of lb (1920 * 2) 475 * second display controller 476 * 4 - second half of lb (3840 * 2) 477 * 5 - second 3/4 of lb (5760 * 2) 478 * 6 - whole lb (7680 * 2), other crtc must be disabled 479 * 7 - last 1/4 of lb (1920 * 2) 480 */ 481 /* this can get tricky if we have two large displays on a paired group 482 * of crtcs. Ideally for multiple large displays we'd assign them to 483 * non-linked crtcs for maximum line buffer allocation. 484 */ 485 if (radeon_crtc->base.enabled && mode) { 486 if (other_mode) 487 tmp = 0; /* 1/2 */ 488 else 489 tmp = 2; /* whole */ 490 } else 491 tmp = 0; 492 493 /* second controller of the pair uses second half of the lb */ 494 if (radeon_crtc->crtc_id % 2) 495 tmp += 4; 496 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); 497 498 if (radeon_crtc->base.enabled && mode) { 499 switch (tmp) { 500 case 0: 501 case 4: 502 default: 503 if (ASIC_IS_DCE5(rdev)) 504 return 4096 * 2; 505 else 506 return 3840 * 2; 507 case 1: 508 case 5: 509 if (ASIC_IS_DCE5(rdev)) 510 return 6144 * 2; 511 else 512 return 5760 * 2; 513 case 2: 514 case 6: 515 if (ASIC_IS_DCE5(rdev)) 516 return 8192 * 2; 517 else 518 return 7680 * 2; 519 case 3: 520 case 7: 521 if (ASIC_IS_DCE5(rdev)) 522 return 2048 * 2; 523 else 524 return 1920 * 2; 525 } 526 } 527 528 /* controller not enabled, so no lb used */ 529 return 0; 530 } 531 532 static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) 533 { 534 u32 tmp = RREG32(MC_SHARED_CHMAP); 535 536 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 537 case 0: 538 default: 539 return 1; 540 case 1: 541 return 2; 542 case 2: 543 return 4; 544 case 3: 545 return 8; 546 } 547 } 548 549 struct evergreen_wm_params { 550 u32 dram_channels; /* number of dram channels */ 551 u32 yclk; /* bandwidth per dram data pin in kHz */ 552 u32 sclk; /* engine clock in kHz */ 553 u32 disp_clk; /* display clock in kHz */ 554 u32 src_width; /* viewport width */ 555 u32 active_time; /* active display time in ns */ 556 u32 blank_time; /* blank time in ns */ 557 bool interlaced; /* mode is interlaced */ 558 fixed20_12 vsc; /* vertical scale ratio */ 559 u32 num_heads; /* number of active crtcs */ 560 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 561 u32 lb_size; /* line buffer allocated to pipe */ 562 u32 vtaps; /* vertical scaler taps */ 563 }; 564 565 static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm) 566 { 567 /* Calculate DRAM Bandwidth and the part allocated to display. */ 568 fixed20_12 dram_efficiency; /* 0.7 */ 569 fixed20_12 yclk, dram_channels, bandwidth; 570 fixed20_12 a; 571 572 a.full = dfixed_const(1000); 573 yclk.full = dfixed_const(wm->yclk); 574 yclk.full = dfixed_div(yclk, a); 575 dram_channels.full = dfixed_const(wm->dram_channels * 4); 576 a.full = dfixed_const(10); 577 dram_efficiency.full = dfixed_const(7); 578 dram_efficiency.full = dfixed_div(dram_efficiency, a); 579 bandwidth.full = dfixed_mul(dram_channels, yclk); 580 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 581 582 return dfixed_trunc(bandwidth); 583 } 584 585 static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm) 586 { 587 /* Calculate DRAM Bandwidth and the part allocated to display. */ 588 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 589 fixed20_12 yclk, dram_channels, bandwidth; 590 fixed20_12 a; 591 592 a.full = dfixed_const(1000); 593 yclk.full = dfixed_const(wm->yclk); 594 yclk.full = dfixed_div(yclk, a); 595 dram_channels.full = dfixed_const(wm->dram_channels * 4); 596 a.full = dfixed_const(10); 597 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 598 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 599 bandwidth.full = dfixed_mul(dram_channels, yclk); 600 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 601 602 return dfixed_trunc(bandwidth); 603 } 604 605 static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm) 606 { 607 /* Calculate the display Data return Bandwidth */ 608 fixed20_12 return_efficiency; /* 0.8 */ 609 fixed20_12 sclk, bandwidth; 610 fixed20_12 a; 611 612 a.full = dfixed_const(1000); 613 sclk.full = dfixed_const(wm->sclk); 614 sclk.full = dfixed_div(sclk, a); 615 a.full = dfixed_const(10); 616 return_efficiency.full = dfixed_const(8); 617 return_efficiency.full = dfixed_div(return_efficiency, a); 618 a.full = dfixed_const(32); 619 bandwidth.full = dfixed_mul(a, sclk); 620 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 621 622 return dfixed_trunc(bandwidth); 623 } 624 625 static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm) 626 { 627 /* Calculate the DMIF Request Bandwidth */ 628 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 629 fixed20_12 disp_clk, bandwidth; 630 fixed20_12 a; 631 632 a.full = dfixed_const(1000); 633 disp_clk.full = dfixed_const(wm->disp_clk); 634 disp_clk.full = dfixed_div(disp_clk, a); 635 a.full = dfixed_const(10); 636 disp_clk_request_efficiency.full = dfixed_const(8); 637 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 638 a.full = dfixed_const(32); 639 bandwidth.full = dfixed_mul(a, disp_clk); 640 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency); 641 642 return dfixed_trunc(bandwidth); 643 } 644 645 static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm) 646 { 647 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 648 u32 dram_bandwidth = evergreen_dram_bandwidth(wm); 649 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm); 650 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm); 651 652 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 653 } 654 655 static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm) 656 { 657 /* Calculate the display mode Average Bandwidth 658 * DisplayMode should contain the source and destination dimensions, 659 * timing, etc. 660 */ 661 fixed20_12 bpp; 662 fixed20_12 line_time; 663 fixed20_12 src_width; 664 fixed20_12 bandwidth; 665 fixed20_12 a; 666 667 a.full = dfixed_const(1000); 668 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 669 line_time.full = dfixed_div(line_time, a); 670 bpp.full = dfixed_const(wm->bytes_per_pixel); 671 src_width.full = dfixed_const(wm->src_width); 672 bandwidth.full = dfixed_mul(src_width, bpp); 673 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 674 bandwidth.full = dfixed_div(bandwidth, line_time); 675 676 return dfixed_trunc(bandwidth); 677 } 678 679 static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm) 680 { 681 /* First calcualte the latency in ns */ 682 u32 mc_latency = 2000; /* 2000 ns. */ 683 u32 available_bandwidth = evergreen_available_bandwidth(wm); 684 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 685 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 686 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 687 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 688 (wm->num_heads * cursor_line_pair_return_time); 689 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 690 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 691 fixed20_12 a, b, c; 692 693 if (wm->num_heads == 0) 694 return 0; 695 696 a.full = dfixed_const(2); 697 b.full = dfixed_const(1); 698 if ((wm->vsc.full > a.full) || 699 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 700 (wm->vtaps >= 5) || 701 ((wm->vsc.full >= a.full) && wm->interlaced)) 702 max_src_lines_per_dst_line = 4; 703 else 704 max_src_lines_per_dst_line = 2; 705 706 a.full = dfixed_const(available_bandwidth); 707 b.full = dfixed_const(wm->num_heads); 708 a.full = dfixed_div(a, b); 709 710 b.full = dfixed_const(1000); 711 c.full = dfixed_const(wm->disp_clk); 712 b.full = dfixed_div(c, b); 713 c.full = dfixed_const(wm->bytes_per_pixel); 714 b.full = dfixed_mul(b, c); 715 716 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b)); 717 718 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 719 b.full = dfixed_const(1000); 720 c.full = dfixed_const(lb_fill_bw); 721 b.full = dfixed_div(c, b); 722 a.full = dfixed_div(a, b); 723 line_fill_time = dfixed_trunc(a); 724 725 if (line_fill_time < wm->active_time) 726 return latency; 727 else 728 return latency + (line_fill_time - wm->active_time); 729 730 } 731 732 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm) 733 { 734 if (evergreen_average_bandwidth(wm) <= 735 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads)) 736 return true; 737 else 738 return false; 739 }; 740 741 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm) 742 { 743 if (evergreen_average_bandwidth(wm) <= 744 (evergreen_available_bandwidth(wm) / wm->num_heads)) 745 return true; 746 else 747 return false; 748 }; 749 750 static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm) 751 { 752 u32 lb_partitions = wm->lb_size / wm->src_width; 753 u32 line_time = wm->active_time + wm->blank_time; 754 u32 latency_tolerant_lines; 755 u32 latency_hiding; 756 fixed20_12 a; 757 758 a.full = dfixed_const(1); 759 if (wm->vsc.full > a.full) 760 latency_tolerant_lines = 1; 761 else { 762 if (lb_partitions <= (wm->vtaps + 1)) 763 latency_tolerant_lines = 1; 764 else 765 latency_tolerant_lines = 2; 766 } 767 768 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 769 770 if (evergreen_latency_watermark(wm) <= latency_hiding) 771 return true; 772 else 773 return false; 774 } 775 776 static void evergreen_program_watermarks(struct radeon_device *rdev, 777 struct radeon_crtc *radeon_crtc, 778 u32 lb_size, u32 num_heads) 779 { 780 struct drm_display_mode *mode = &radeon_crtc->base.mode; 781 struct evergreen_wm_params wm; 782 u32 pixel_period; 783 u32 line_time = 0; 784 u32 latency_watermark_a = 0, latency_watermark_b = 0; 785 u32 priority_a_mark = 0, priority_b_mark = 0; 786 u32 priority_a_cnt = PRIORITY_OFF; 787 u32 priority_b_cnt = PRIORITY_OFF; 788 u32 pipe_offset = radeon_crtc->crtc_id * 16; 789 u32 tmp, arb_control3; 790 fixed20_12 a, b, c; 791 792 if (radeon_crtc->base.enabled && num_heads && mode) { 793 pixel_period = 1000000 / (u32)mode->clock; 794 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 795 priority_a_cnt = 0; 796 priority_b_cnt = 0; 797 798 wm.yclk = rdev->pm.current_mclk * 10; 799 wm.sclk = rdev->pm.current_sclk * 10; 800 wm.disp_clk = mode->clock; 801 wm.src_width = mode->crtc_hdisplay; 802 wm.active_time = mode->crtc_hdisplay * pixel_period; 803 wm.blank_time = line_time - wm.active_time; 804 wm.interlaced = false; 805 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 806 wm.interlaced = true; 807 wm.vsc = radeon_crtc->vsc; 808 wm.vtaps = 1; 809 if (radeon_crtc->rmx_type != RMX_OFF) 810 wm.vtaps = 2; 811 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ 812 wm.lb_size = lb_size; 813 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); 814 wm.num_heads = num_heads; 815 816 /* set for high clocks */ 817 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535); 818 /* set for low clocks */ 819 /* wm.yclk = low clk; wm.sclk = low clk */ 820 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535); 821 822 /* possibly force display priority to high */ 823 /* should really do this at mode validation time... */ 824 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || 825 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || 826 !evergreen_check_latency_hiding(&wm) || 827 (rdev->disp_priority == 2)) { 828 DRM_DEBUG_KMS("force priority to high\n"); 829 priority_a_cnt |= PRIORITY_ALWAYS_ON; 830 priority_b_cnt |= PRIORITY_ALWAYS_ON; 831 } 832 833 a.full = dfixed_const(1000); 834 b.full = dfixed_const(mode->clock); 835 b.full = dfixed_div(b, a); 836 c.full = dfixed_const(latency_watermark_a); 837 c.full = dfixed_mul(c, b); 838 c.full = dfixed_mul(c, radeon_crtc->hsc); 839 c.full = dfixed_div(c, a); 840 a.full = dfixed_const(16); 841 c.full = dfixed_div(c, a); 842 priority_a_mark = dfixed_trunc(c); 843 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; 844 845 a.full = dfixed_const(1000); 846 b.full = dfixed_const(mode->clock); 847 b.full = dfixed_div(b, a); 848 c.full = dfixed_const(latency_watermark_b); 849 c.full = dfixed_mul(c, b); 850 c.full = dfixed_mul(c, radeon_crtc->hsc); 851 c.full = dfixed_div(c, a); 852 a.full = dfixed_const(16); 853 c.full = dfixed_div(c, a); 854 priority_b_mark = dfixed_trunc(c); 855 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 856 } 857 858 /* select wm A */ 859 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); 860 tmp = arb_control3; 861 tmp &= ~LATENCY_WATERMARK_MASK(3); 862 tmp |= LATENCY_WATERMARK_MASK(1); 863 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); 864 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, 865 (LATENCY_LOW_WATERMARK(latency_watermark_a) | 866 LATENCY_HIGH_WATERMARK(line_time))); 867 /* select wm B */ 868 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); 869 tmp &= ~LATENCY_WATERMARK_MASK(3); 870 tmp |= LATENCY_WATERMARK_MASK(2); 871 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); 872 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, 873 (LATENCY_LOW_WATERMARK(latency_watermark_b) | 874 LATENCY_HIGH_WATERMARK(line_time))); 875 /* restore original selection */ 876 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3); 877 878 /* write the priority marks */ 879 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); 880 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); 881 882 } 883 884 void evergreen_bandwidth_update(struct radeon_device *rdev) 885 { 886 struct drm_display_mode *mode0 = NULL; 887 struct drm_display_mode *mode1 = NULL; 888 u32 num_heads = 0, lb_size; 889 int i; 890 891 radeon_update_display_priority(rdev); 892 893 for (i = 0; i < rdev->num_crtc; i++) { 894 if (rdev->mode_info.crtcs[i]->base.enabled) 895 num_heads++; 896 } 897 for (i = 0; i < rdev->num_crtc; i += 2) { 898 mode0 = &rdev->mode_info.crtcs[i]->base.mode; 899 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; 900 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); 901 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); 902 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); 903 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); 904 } 905 } 906 907 int evergreen_mc_wait_for_idle(struct radeon_device *rdev) 908 { 909 unsigned i; 910 u32 tmp; 911 912 for (i = 0; i < rdev->usec_timeout; i++) { 913 /* read MC_STATUS */ 914 tmp = RREG32(SRBM_STATUS) & 0x1F00; 915 if (!tmp) 916 return 0; 917 udelay(1); 918 } 919 return -1; 920 } 921 922 /* 923 * GART 924 */ 925 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) 926 { 927 unsigned i; 928 u32 tmp; 929 930 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 931 932 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 933 for (i = 0; i < rdev->usec_timeout; i++) { 934 /* read MC_STATUS */ 935 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); 936 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; 937 if (tmp == 2) { 938 printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); 939 return; 940 } 941 if (tmp) { 942 return; 943 } 944 udelay(1); 945 } 946 } 947 948 int evergreen_pcie_gart_enable(struct radeon_device *rdev) 949 { 950 u32 tmp; 951 int r; 952 953 if (rdev->gart.robj == NULL) { 954 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 955 return -EINVAL; 956 } 957 r = radeon_gart_table_vram_pin(rdev); 958 if (r) 959 return r; 960 radeon_gart_restore(rdev); 961 /* Setup L2 cache */ 962 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 963 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 964 EFFECTIVE_L2_QUEUE_SIZE(7)); 965 WREG32(VM_L2_CNTL2, 0); 966 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 967 /* Setup TLB control */ 968 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 969 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 970 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | 971 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 972 if (rdev->flags & RADEON_IS_IGP) { 973 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp); 974 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp); 975 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp); 976 } else { 977 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 978 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 979 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 980 } 981 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 982 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 983 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 984 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 985 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 986 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 987 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 988 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 989 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 990 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 991 (u32)(rdev->dummy_page.addr >> 12)); 992 WREG32(VM_CONTEXT1_CNTL, 0); 993 994 evergreen_pcie_gart_tlb_flush(rdev); 995 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 996 (unsigned)(rdev->mc.gtt_size >> 20), 997 (unsigned long long)rdev->gart.table_addr); 998 rdev->gart.ready = true; 999 return 0; 1000 } 1001 1002 void evergreen_pcie_gart_disable(struct radeon_device *rdev) 1003 { 1004 u32 tmp; 1005 1006 /* Disable all tables */ 1007 WREG32(VM_CONTEXT0_CNTL, 0); 1008 WREG32(VM_CONTEXT1_CNTL, 0); 1009 1010 /* Setup L2 cache */ 1011 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 1012 EFFECTIVE_L2_QUEUE_SIZE(7)); 1013 WREG32(VM_L2_CNTL2, 0); 1014 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 1015 /* Setup TLB control */ 1016 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 1017 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 1018 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 1019 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 1020 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 1021 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 1022 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 1023 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 1024 radeon_gart_table_vram_unpin(rdev); 1025 } 1026 1027 void evergreen_pcie_gart_fini(struct radeon_device *rdev) 1028 { 1029 evergreen_pcie_gart_disable(rdev); 1030 radeon_gart_table_vram_free(rdev); 1031 radeon_gart_fini(rdev); 1032 } 1033 1034 1035 void evergreen_agp_enable(struct radeon_device *rdev) 1036 { 1037 u32 tmp; 1038 1039 /* Setup L2 cache */ 1040 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 1041 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1042 EFFECTIVE_L2_QUEUE_SIZE(7)); 1043 WREG32(VM_L2_CNTL2, 0); 1044 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 1045 /* Setup TLB control */ 1046 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 1047 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1048 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | 1049 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 1050 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 1051 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 1052 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 1053 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 1054 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 1055 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 1056 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 1057 WREG32(VM_CONTEXT0_CNTL, 0); 1058 WREG32(VM_CONTEXT1_CNTL, 0); 1059 } 1060 1061 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 1062 { 1063 save->vga_control[0] = RREG32(D1VGA_CONTROL); 1064 save->vga_control[1] = RREG32(D2VGA_CONTROL); 1065 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 1066 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); 1067 save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); 1068 save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 1069 if (rdev->num_crtc >= 4) { 1070 save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); 1071 save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); 1072 save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); 1073 save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); 1074 } 1075 if (rdev->num_crtc >= 6) { 1076 save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); 1077 save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); 1078 save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); 1079 save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 1080 } 1081 1082 /* Stop all video */ 1083 WREG32(VGA_RENDER_CONTROL, 0); 1084 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); 1085 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); 1086 if (rdev->num_crtc >= 4) { 1087 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); 1088 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); 1089 } 1090 if (rdev->num_crtc >= 6) { 1091 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); 1092 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); 1093 } 1094 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 1095 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 1096 if (rdev->num_crtc >= 4) { 1097 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 1098 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 1099 } 1100 if (rdev->num_crtc >= 6) { 1101 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 1102 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 1103 } 1104 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 1105 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 1106 if (rdev->num_crtc >= 4) { 1107 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 1108 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 1109 } 1110 if (rdev->num_crtc >= 6) { 1111 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 1112 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 1113 } 1114 1115 WREG32(D1VGA_CONTROL, 0); 1116 WREG32(D2VGA_CONTROL, 0); 1117 if (rdev->num_crtc >= 4) { 1118 WREG32(EVERGREEN_D3VGA_CONTROL, 0); 1119 WREG32(EVERGREEN_D4VGA_CONTROL, 0); 1120 } 1121 if (rdev->num_crtc >= 6) { 1122 WREG32(EVERGREEN_D5VGA_CONTROL, 0); 1123 WREG32(EVERGREEN_D6VGA_CONTROL, 0); 1124 } 1125 } 1126 1127 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) 1128 { 1129 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, 1130 upper_32_bits(rdev->mc.vram_start)); 1131 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, 1132 upper_32_bits(rdev->mc.vram_start)); 1133 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, 1134 (u32)rdev->mc.vram_start); 1135 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, 1136 (u32)rdev->mc.vram_start); 1137 1138 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, 1139 upper_32_bits(rdev->mc.vram_start)); 1140 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, 1141 upper_32_bits(rdev->mc.vram_start)); 1142 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, 1143 (u32)rdev->mc.vram_start); 1144 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, 1145 (u32)rdev->mc.vram_start); 1146 1147 if (rdev->num_crtc >= 4) { 1148 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, 1149 upper_32_bits(rdev->mc.vram_start)); 1150 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, 1151 upper_32_bits(rdev->mc.vram_start)); 1152 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, 1153 (u32)rdev->mc.vram_start); 1154 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, 1155 (u32)rdev->mc.vram_start); 1156 1157 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, 1158 upper_32_bits(rdev->mc.vram_start)); 1159 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, 1160 upper_32_bits(rdev->mc.vram_start)); 1161 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, 1162 (u32)rdev->mc.vram_start); 1163 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, 1164 (u32)rdev->mc.vram_start); 1165 } 1166 if (rdev->num_crtc >= 6) { 1167 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, 1168 upper_32_bits(rdev->mc.vram_start)); 1169 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, 1170 upper_32_bits(rdev->mc.vram_start)); 1171 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, 1172 (u32)rdev->mc.vram_start); 1173 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, 1174 (u32)rdev->mc.vram_start); 1175 1176 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, 1177 upper_32_bits(rdev->mc.vram_start)); 1178 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, 1179 upper_32_bits(rdev->mc.vram_start)); 1180 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, 1181 (u32)rdev->mc.vram_start); 1182 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, 1183 (u32)rdev->mc.vram_start); 1184 } 1185 1186 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); 1187 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 1188 /* Unlock host access */ 1189 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); 1190 mdelay(1); 1191 /* Restore video state */ 1192 WREG32(D1VGA_CONTROL, save->vga_control[0]); 1193 WREG32(D2VGA_CONTROL, save->vga_control[1]); 1194 if (rdev->num_crtc >= 4) { 1195 WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); 1196 WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); 1197 } 1198 if (rdev->num_crtc >= 6) { 1199 WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); 1200 WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); 1201 } 1202 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); 1203 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); 1204 if (rdev->num_crtc >= 4) { 1205 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); 1206 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); 1207 } 1208 if (rdev->num_crtc >= 6) { 1209 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); 1210 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); 1211 } 1212 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); 1213 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); 1214 if (rdev->num_crtc >= 4) { 1215 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); 1216 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); 1217 } 1218 if (rdev->num_crtc >= 6) { 1219 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); 1220 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); 1221 } 1222 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 1223 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 1224 if (rdev->num_crtc >= 4) { 1225 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 1226 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 1227 } 1228 if (rdev->num_crtc >= 6) { 1229 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 1230 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 1231 } 1232 WREG32(VGA_RENDER_CONTROL, save->vga_render_control); 1233 } 1234 1235 void evergreen_mc_program(struct radeon_device *rdev) 1236 { 1237 struct evergreen_mc_save save; 1238 u32 tmp; 1239 int i, j; 1240 1241 /* Initialize HDP */ 1242 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1243 WREG32((0x2c14 + j), 0x00000000); 1244 WREG32((0x2c18 + j), 0x00000000); 1245 WREG32((0x2c1c + j), 0x00000000); 1246 WREG32((0x2c20 + j), 0x00000000); 1247 WREG32((0x2c24 + j), 0x00000000); 1248 } 1249 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 1250 1251 evergreen_mc_stop(rdev, &save); 1252 if (evergreen_mc_wait_for_idle(rdev)) { 1253 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1254 } 1255 /* Lockout access through VGA aperture*/ 1256 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 1257 /* Update configuration */ 1258 if (rdev->flags & RADEON_IS_AGP) { 1259 if (rdev->mc.vram_start < rdev->mc.gtt_start) { 1260 /* VRAM before AGP */ 1261 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1262 rdev->mc.vram_start >> 12); 1263 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1264 rdev->mc.gtt_end >> 12); 1265 } else { 1266 /* VRAM after AGP */ 1267 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1268 rdev->mc.gtt_start >> 12); 1269 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1270 rdev->mc.vram_end >> 12); 1271 } 1272 } else { 1273 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1274 rdev->mc.vram_start >> 12); 1275 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1276 rdev->mc.vram_end >> 12); 1277 } 1278 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); 1279 if (rdev->flags & RADEON_IS_IGP) { 1280 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; 1281 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; 1282 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20; 1283 WREG32(MC_FUS_VM_FB_OFFSET, tmp); 1284 } 1285 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 1286 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 1287 WREG32(MC_VM_FB_LOCATION, tmp); 1288 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 1289 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); 1290 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 1291 if (rdev->flags & RADEON_IS_AGP) { 1292 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); 1293 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); 1294 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 1295 } else { 1296 WREG32(MC_VM_AGP_BASE, 0); 1297 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 1298 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 1299 } 1300 if (evergreen_mc_wait_for_idle(rdev)) { 1301 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1302 } 1303 evergreen_mc_resume(rdev, &save); 1304 /* we need to own VRAM, so turn off the VGA renderer here 1305 * to stop it overwriting our objects */ 1306 rv515_vga_render_disable(rdev); 1307 } 1308 1309 /* 1310 * CP. 1311 */ 1312 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 1313 { 1314 /* set to DX10/11 mode */ 1315 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); 1316 radeon_ring_write(rdev, 1); 1317 /* FIXME: implement */ 1318 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 1319 radeon_ring_write(rdev, 1320 #ifdef __BIG_ENDIAN 1321 (2 << 0) | 1322 #endif 1323 (ib->gpu_addr & 0xFFFFFFFC)); 1324 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); 1325 radeon_ring_write(rdev, ib->length_dw); 1326 } 1327 1328 1329 static int evergreen_cp_load_microcode(struct radeon_device *rdev) 1330 { 1331 const __be32 *fw_data; 1332 int i; 1333 1334 if (!rdev->me_fw || !rdev->pfp_fw) 1335 return -EINVAL; 1336 1337 r700_cp_stop(rdev); 1338 WREG32(CP_RB_CNTL, 1339 #ifdef __BIG_ENDIAN 1340 BUF_SWAP_32BIT | 1341 #endif 1342 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 1343 1344 fw_data = (const __be32 *)rdev->pfp_fw->data; 1345 WREG32(CP_PFP_UCODE_ADDR, 0); 1346 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++) 1347 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 1348 WREG32(CP_PFP_UCODE_ADDR, 0); 1349 1350 fw_data = (const __be32 *)rdev->me_fw->data; 1351 WREG32(CP_ME_RAM_WADDR, 0); 1352 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++) 1353 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 1354 1355 WREG32(CP_PFP_UCODE_ADDR, 0); 1356 WREG32(CP_ME_RAM_WADDR, 0); 1357 WREG32(CP_ME_RAM_RADDR, 0); 1358 return 0; 1359 } 1360 1361 static int evergreen_cp_start(struct radeon_device *rdev) 1362 { 1363 int r, i; 1364 uint32_t cp_me; 1365 1366 r = radeon_ring_lock(rdev, 7); 1367 if (r) { 1368 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1369 return r; 1370 } 1371 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1372 radeon_ring_write(rdev, 0x1); 1373 radeon_ring_write(rdev, 0x0); 1374 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); 1375 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1376 radeon_ring_write(rdev, 0); 1377 radeon_ring_write(rdev, 0); 1378 radeon_ring_unlock_commit(rdev); 1379 1380 cp_me = 0xff; 1381 WREG32(CP_ME_CNTL, cp_me); 1382 1383 r = radeon_ring_lock(rdev, evergreen_default_size + 19); 1384 if (r) { 1385 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1386 return r; 1387 } 1388 1389 /* setup clear context state */ 1390 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1391 radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1392 1393 for (i = 0; i < evergreen_default_size; i++) 1394 radeon_ring_write(rdev, evergreen_default_state[i]); 1395 1396 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1397 radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); 1398 1399 /* set clear context state */ 1400 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 1401 radeon_ring_write(rdev, 0); 1402 1403 /* SQ_VTX_BASE_VTX_LOC */ 1404 radeon_ring_write(rdev, 0xc0026f00); 1405 radeon_ring_write(rdev, 0x00000000); 1406 radeon_ring_write(rdev, 0x00000000); 1407 radeon_ring_write(rdev, 0x00000000); 1408 1409 /* Clear consts */ 1410 radeon_ring_write(rdev, 0xc0036f00); 1411 radeon_ring_write(rdev, 0x00000bc4); 1412 radeon_ring_write(rdev, 0xffffffff); 1413 radeon_ring_write(rdev, 0xffffffff); 1414 radeon_ring_write(rdev, 0xffffffff); 1415 1416 radeon_ring_write(rdev, 0xc0026900); 1417 radeon_ring_write(rdev, 0x00000316); 1418 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1419 radeon_ring_write(rdev, 0x00000010); /* */ 1420 1421 radeon_ring_unlock_commit(rdev); 1422 1423 return 0; 1424 } 1425 1426 int evergreen_cp_resume(struct radeon_device *rdev) 1427 { 1428 u32 tmp; 1429 u32 rb_bufsz; 1430 int r; 1431 1432 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1433 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1434 SOFT_RESET_PA | 1435 SOFT_RESET_SH | 1436 SOFT_RESET_VGT | 1437 SOFT_RESET_SPI | 1438 SOFT_RESET_SX)); 1439 RREG32(GRBM_SOFT_RESET); 1440 mdelay(15); 1441 WREG32(GRBM_SOFT_RESET, 0); 1442 RREG32(GRBM_SOFT_RESET); 1443 1444 /* Set ring buffer size */ 1445 rb_bufsz = drm_order(rdev->cp.ring_size / 8); 1446 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1447 #ifdef __BIG_ENDIAN 1448 tmp |= BUF_SWAP_32BIT; 1449 #endif 1450 WREG32(CP_RB_CNTL, tmp); 1451 WREG32(CP_SEM_WAIT_TIMER, 0x4); 1452 1453 /* Set the write pointer delay */ 1454 WREG32(CP_RB_WPTR_DELAY, 0); 1455 1456 /* Initialize the ring buffer's read and write pointers */ 1457 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 1458 WREG32(CP_RB_RPTR_WR, 0); 1459 rdev->cp.wptr = 0; 1460 WREG32(CP_RB_WPTR, rdev->cp.wptr); 1461 1462 /* set the wb address wether it's enabled or not */ 1463 WREG32(CP_RB_RPTR_ADDR, 1464 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 1465 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1466 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1467 1468 if (rdev->wb.enabled) 1469 WREG32(SCRATCH_UMSK, 0xff); 1470 else { 1471 tmp |= RB_NO_UPDATE; 1472 WREG32(SCRATCH_UMSK, 0); 1473 } 1474 1475 mdelay(1); 1476 WREG32(CP_RB_CNTL, tmp); 1477 1478 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8); 1479 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 1480 1481 rdev->cp.rptr = RREG32(CP_RB_RPTR); 1482 1483 evergreen_cp_start(rdev); 1484 rdev->cp.ready = true; 1485 r = radeon_ring_test(rdev); 1486 if (r) { 1487 rdev->cp.ready = false; 1488 return r; 1489 } 1490 return 0; 1491 } 1492 1493 /* 1494 * Core functions 1495 */ 1496 static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, 1497 u32 num_tile_pipes, 1498 u32 num_backends, 1499 u32 backend_disable_mask) 1500 { 1501 u32 backend_map = 0; 1502 u32 enabled_backends_mask = 0; 1503 u32 enabled_backends_count = 0; 1504 u32 cur_pipe; 1505 u32 swizzle_pipe[EVERGREEN_MAX_PIPES]; 1506 u32 cur_backend = 0; 1507 u32 i; 1508 bool force_no_swizzle; 1509 1510 if (num_tile_pipes > EVERGREEN_MAX_PIPES) 1511 num_tile_pipes = EVERGREEN_MAX_PIPES; 1512 if (num_tile_pipes < 1) 1513 num_tile_pipes = 1; 1514 if (num_backends > EVERGREEN_MAX_BACKENDS) 1515 num_backends = EVERGREEN_MAX_BACKENDS; 1516 if (num_backends < 1) 1517 num_backends = 1; 1518 1519 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { 1520 if (((backend_disable_mask >> i) & 1) == 0) { 1521 enabled_backends_mask |= (1 << i); 1522 ++enabled_backends_count; 1523 } 1524 if (enabled_backends_count == num_backends) 1525 break; 1526 } 1527 1528 if (enabled_backends_count == 0) { 1529 enabled_backends_mask = 1; 1530 enabled_backends_count = 1; 1531 } 1532 1533 if (enabled_backends_count != num_backends) 1534 num_backends = enabled_backends_count; 1535 1536 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES); 1537 switch (rdev->family) { 1538 case CHIP_CEDAR: 1539 case CHIP_REDWOOD: 1540 case CHIP_PALM: 1541 case CHIP_SUMO: 1542 case CHIP_SUMO2: 1543 case CHIP_TURKS: 1544 case CHIP_CAICOS: 1545 force_no_swizzle = false; 1546 break; 1547 case CHIP_CYPRESS: 1548 case CHIP_HEMLOCK: 1549 case CHIP_JUNIPER: 1550 case CHIP_BARTS: 1551 default: 1552 force_no_swizzle = true; 1553 break; 1554 } 1555 if (force_no_swizzle) { 1556 bool last_backend_enabled = false; 1557 1558 force_no_swizzle = false; 1559 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { 1560 if (((enabled_backends_mask >> i) & 1) == 1) { 1561 if (last_backend_enabled) 1562 force_no_swizzle = true; 1563 last_backend_enabled = true; 1564 } else 1565 last_backend_enabled = false; 1566 } 1567 } 1568 1569 switch (num_tile_pipes) { 1570 case 1: 1571 case 3: 1572 case 5: 1573 case 7: 1574 DRM_ERROR("odd number of pipes!\n"); 1575 break; 1576 case 2: 1577 swizzle_pipe[0] = 0; 1578 swizzle_pipe[1] = 1; 1579 break; 1580 case 4: 1581 if (force_no_swizzle) { 1582 swizzle_pipe[0] = 0; 1583 swizzle_pipe[1] = 1; 1584 swizzle_pipe[2] = 2; 1585 swizzle_pipe[3] = 3; 1586 } else { 1587 swizzle_pipe[0] = 0; 1588 swizzle_pipe[1] = 2; 1589 swizzle_pipe[2] = 1; 1590 swizzle_pipe[3] = 3; 1591 } 1592 break; 1593 case 6: 1594 if (force_no_swizzle) { 1595 swizzle_pipe[0] = 0; 1596 swizzle_pipe[1] = 1; 1597 swizzle_pipe[2] = 2; 1598 swizzle_pipe[3] = 3; 1599 swizzle_pipe[4] = 4; 1600 swizzle_pipe[5] = 5; 1601 } else { 1602 swizzle_pipe[0] = 0; 1603 swizzle_pipe[1] = 2; 1604 swizzle_pipe[2] = 4; 1605 swizzle_pipe[3] = 1; 1606 swizzle_pipe[4] = 3; 1607 swizzle_pipe[5] = 5; 1608 } 1609 break; 1610 case 8: 1611 if (force_no_swizzle) { 1612 swizzle_pipe[0] = 0; 1613 swizzle_pipe[1] = 1; 1614 swizzle_pipe[2] = 2; 1615 swizzle_pipe[3] = 3; 1616 swizzle_pipe[4] = 4; 1617 swizzle_pipe[5] = 5; 1618 swizzle_pipe[6] = 6; 1619 swizzle_pipe[7] = 7; 1620 } else { 1621 swizzle_pipe[0] = 0; 1622 swizzle_pipe[1] = 2; 1623 swizzle_pipe[2] = 4; 1624 swizzle_pipe[3] = 6; 1625 swizzle_pipe[4] = 1; 1626 swizzle_pipe[5] = 3; 1627 swizzle_pipe[6] = 5; 1628 swizzle_pipe[7] = 7; 1629 } 1630 break; 1631 } 1632 1633 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 1634 while (((1 << cur_backend) & enabled_backends_mask) == 0) 1635 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; 1636 1637 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); 1638 1639 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; 1640 } 1641 1642 return backend_map; 1643 } 1644 1645 static void evergreen_gpu_init(struct radeon_device *rdev) 1646 { 1647 u32 cc_rb_backend_disable = 0; 1648 u32 cc_gc_shader_pipe_config; 1649 u32 gb_addr_config = 0; 1650 u32 mc_shared_chmap, mc_arb_ramcfg; 1651 u32 gb_backend_map; 1652 u32 grbm_gfx_index; 1653 u32 sx_debug_1; 1654 u32 smx_dc_ctl0; 1655 u32 sq_config; 1656 u32 sq_lds_resource_mgmt; 1657 u32 sq_gpr_resource_mgmt_1; 1658 u32 sq_gpr_resource_mgmt_2; 1659 u32 sq_gpr_resource_mgmt_3; 1660 u32 sq_thread_resource_mgmt; 1661 u32 sq_thread_resource_mgmt_2; 1662 u32 sq_stack_resource_mgmt_1; 1663 u32 sq_stack_resource_mgmt_2; 1664 u32 sq_stack_resource_mgmt_3; 1665 u32 vgt_cache_invalidation; 1666 u32 hdp_host_path_cntl, tmp; 1667 int i, j, num_shader_engines, ps_thread_count; 1668 1669 switch (rdev->family) { 1670 case CHIP_CYPRESS: 1671 case CHIP_HEMLOCK: 1672 rdev->config.evergreen.num_ses = 2; 1673 rdev->config.evergreen.max_pipes = 4; 1674 rdev->config.evergreen.max_tile_pipes = 8; 1675 rdev->config.evergreen.max_simds = 10; 1676 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; 1677 rdev->config.evergreen.max_gprs = 256; 1678 rdev->config.evergreen.max_threads = 248; 1679 rdev->config.evergreen.max_gs_threads = 32; 1680 rdev->config.evergreen.max_stack_entries = 512; 1681 rdev->config.evergreen.sx_num_of_sets = 4; 1682 rdev->config.evergreen.sx_max_export_size = 256; 1683 rdev->config.evergreen.sx_max_export_pos_size = 64; 1684 rdev->config.evergreen.sx_max_export_smx_size = 192; 1685 rdev->config.evergreen.max_hw_contexts = 8; 1686 rdev->config.evergreen.sq_num_cf_insts = 2; 1687 1688 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1689 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1690 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1691 break; 1692 case CHIP_JUNIPER: 1693 rdev->config.evergreen.num_ses = 1; 1694 rdev->config.evergreen.max_pipes = 4; 1695 rdev->config.evergreen.max_tile_pipes = 4; 1696 rdev->config.evergreen.max_simds = 10; 1697 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; 1698 rdev->config.evergreen.max_gprs = 256; 1699 rdev->config.evergreen.max_threads = 248; 1700 rdev->config.evergreen.max_gs_threads = 32; 1701 rdev->config.evergreen.max_stack_entries = 512; 1702 rdev->config.evergreen.sx_num_of_sets = 4; 1703 rdev->config.evergreen.sx_max_export_size = 256; 1704 rdev->config.evergreen.sx_max_export_pos_size = 64; 1705 rdev->config.evergreen.sx_max_export_smx_size = 192; 1706 rdev->config.evergreen.max_hw_contexts = 8; 1707 rdev->config.evergreen.sq_num_cf_insts = 2; 1708 1709 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1710 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1711 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1712 break; 1713 case CHIP_REDWOOD: 1714 rdev->config.evergreen.num_ses = 1; 1715 rdev->config.evergreen.max_pipes = 4; 1716 rdev->config.evergreen.max_tile_pipes = 4; 1717 rdev->config.evergreen.max_simds = 5; 1718 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; 1719 rdev->config.evergreen.max_gprs = 256; 1720 rdev->config.evergreen.max_threads = 248; 1721 rdev->config.evergreen.max_gs_threads = 32; 1722 rdev->config.evergreen.max_stack_entries = 256; 1723 rdev->config.evergreen.sx_num_of_sets = 4; 1724 rdev->config.evergreen.sx_max_export_size = 256; 1725 rdev->config.evergreen.sx_max_export_pos_size = 64; 1726 rdev->config.evergreen.sx_max_export_smx_size = 192; 1727 rdev->config.evergreen.max_hw_contexts = 8; 1728 rdev->config.evergreen.sq_num_cf_insts = 2; 1729 1730 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1731 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1732 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1733 break; 1734 case CHIP_CEDAR: 1735 default: 1736 rdev->config.evergreen.num_ses = 1; 1737 rdev->config.evergreen.max_pipes = 2; 1738 rdev->config.evergreen.max_tile_pipes = 2; 1739 rdev->config.evergreen.max_simds = 2; 1740 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; 1741 rdev->config.evergreen.max_gprs = 256; 1742 rdev->config.evergreen.max_threads = 192; 1743 rdev->config.evergreen.max_gs_threads = 16; 1744 rdev->config.evergreen.max_stack_entries = 256; 1745 rdev->config.evergreen.sx_num_of_sets = 4; 1746 rdev->config.evergreen.sx_max_export_size = 128; 1747 rdev->config.evergreen.sx_max_export_pos_size = 32; 1748 rdev->config.evergreen.sx_max_export_smx_size = 96; 1749 rdev->config.evergreen.max_hw_contexts = 4; 1750 rdev->config.evergreen.sq_num_cf_insts = 1; 1751 1752 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1753 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1754 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1755 break; 1756 case CHIP_PALM: 1757 rdev->config.evergreen.num_ses = 1; 1758 rdev->config.evergreen.max_pipes = 2; 1759 rdev->config.evergreen.max_tile_pipes = 2; 1760 rdev->config.evergreen.max_simds = 2; 1761 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; 1762 rdev->config.evergreen.max_gprs = 256; 1763 rdev->config.evergreen.max_threads = 192; 1764 rdev->config.evergreen.max_gs_threads = 16; 1765 rdev->config.evergreen.max_stack_entries = 256; 1766 rdev->config.evergreen.sx_num_of_sets = 4; 1767 rdev->config.evergreen.sx_max_export_size = 128; 1768 rdev->config.evergreen.sx_max_export_pos_size = 32; 1769 rdev->config.evergreen.sx_max_export_smx_size = 96; 1770 rdev->config.evergreen.max_hw_contexts = 4; 1771 rdev->config.evergreen.sq_num_cf_insts = 1; 1772 1773 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1774 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1775 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1776 break; 1777 case CHIP_SUMO: 1778 rdev->config.evergreen.num_ses = 1; 1779 rdev->config.evergreen.max_pipes = 4; 1780 rdev->config.evergreen.max_tile_pipes = 2; 1781 if (rdev->pdev->device == 0x9648) 1782 rdev->config.evergreen.max_simds = 3; 1783 else if ((rdev->pdev->device == 0x9647) || 1784 (rdev->pdev->device == 0x964a)) 1785 rdev->config.evergreen.max_simds = 4; 1786 else 1787 rdev->config.evergreen.max_simds = 5; 1788 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; 1789 rdev->config.evergreen.max_gprs = 256; 1790 rdev->config.evergreen.max_threads = 248; 1791 rdev->config.evergreen.max_gs_threads = 32; 1792 rdev->config.evergreen.max_stack_entries = 256; 1793 rdev->config.evergreen.sx_num_of_sets = 4; 1794 rdev->config.evergreen.sx_max_export_size = 256; 1795 rdev->config.evergreen.sx_max_export_pos_size = 64; 1796 rdev->config.evergreen.sx_max_export_smx_size = 192; 1797 rdev->config.evergreen.max_hw_contexts = 8; 1798 rdev->config.evergreen.sq_num_cf_insts = 2; 1799 1800 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1801 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1802 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1803 break; 1804 case CHIP_SUMO2: 1805 rdev->config.evergreen.num_ses = 1; 1806 rdev->config.evergreen.max_pipes = 4; 1807 rdev->config.evergreen.max_tile_pipes = 4; 1808 rdev->config.evergreen.max_simds = 2; 1809 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; 1810 rdev->config.evergreen.max_gprs = 256; 1811 rdev->config.evergreen.max_threads = 248; 1812 rdev->config.evergreen.max_gs_threads = 32; 1813 rdev->config.evergreen.max_stack_entries = 512; 1814 rdev->config.evergreen.sx_num_of_sets = 4; 1815 rdev->config.evergreen.sx_max_export_size = 256; 1816 rdev->config.evergreen.sx_max_export_pos_size = 64; 1817 rdev->config.evergreen.sx_max_export_smx_size = 192; 1818 rdev->config.evergreen.max_hw_contexts = 8; 1819 rdev->config.evergreen.sq_num_cf_insts = 2; 1820 1821 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1822 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1823 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1824 break; 1825 case CHIP_BARTS: 1826 rdev->config.evergreen.num_ses = 2; 1827 rdev->config.evergreen.max_pipes = 4; 1828 rdev->config.evergreen.max_tile_pipes = 8; 1829 rdev->config.evergreen.max_simds = 7; 1830 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; 1831 rdev->config.evergreen.max_gprs = 256; 1832 rdev->config.evergreen.max_threads = 248; 1833 rdev->config.evergreen.max_gs_threads = 32; 1834 rdev->config.evergreen.max_stack_entries = 512; 1835 rdev->config.evergreen.sx_num_of_sets = 4; 1836 rdev->config.evergreen.sx_max_export_size = 256; 1837 rdev->config.evergreen.sx_max_export_pos_size = 64; 1838 rdev->config.evergreen.sx_max_export_smx_size = 192; 1839 rdev->config.evergreen.max_hw_contexts = 8; 1840 rdev->config.evergreen.sq_num_cf_insts = 2; 1841 1842 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1843 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1844 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1845 break; 1846 case CHIP_TURKS: 1847 rdev->config.evergreen.num_ses = 1; 1848 rdev->config.evergreen.max_pipes = 4; 1849 rdev->config.evergreen.max_tile_pipes = 4; 1850 rdev->config.evergreen.max_simds = 6; 1851 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; 1852 rdev->config.evergreen.max_gprs = 256; 1853 rdev->config.evergreen.max_threads = 248; 1854 rdev->config.evergreen.max_gs_threads = 32; 1855 rdev->config.evergreen.max_stack_entries = 256; 1856 rdev->config.evergreen.sx_num_of_sets = 4; 1857 rdev->config.evergreen.sx_max_export_size = 256; 1858 rdev->config.evergreen.sx_max_export_pos_size = 64; 1859 rdev->config.evergreen.sx_max_export_smx_size = 192; 1860 rdev->config.evergreen.max_hw_contexts = 8; 1861 rdev->config.evergreen.sq_num_cf_insts = 2; 1862 1863 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1864 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1865 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1866 break; 1867 case CHIP_CAICOS: 1868 rdev->config.evergreen.num_ses = 1; 1869 rdev->config.evergreen.max_pipes = 4; 1870 rdev->config.evergreen.max_tile_pipes = 2; 1871 rdev->config.evergreen.max_simds = 2; 1872 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; 1873 rdev->config.evergreen.max_gprs = 256; 1874 rdev->config.evergreen.max_threads = 192; 1875 rdev->config.evergreen.max_gs_threads = 16; 1876 rdev->config.evergreen.max_stack_entries = 256; 1877 rdev->config.evergreen.sx_num_of_sets = 4; 1878 rdev->config.evergreen.sx_max_export_size = 128; 1879 rdev->config.evergreen.sx_max_export_pos_size = 32; 1880 rdev->config.evergreen.sx_max_export_smx_size = 96; 1881 rdev->config.evergreen.max_hw_contexts = 4; 1882 rdev->config.evergreen.sq_num_cf_insts = 1; 1883 1884 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1885 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1886 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1887 break; 1888 } 1889 1890 /* Initialize HDP */ 1891 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1892 WREG32((0x2c14 + j), 0x00000000); 1893 WREG32((0x2c18 + j), 0x00000000); 1894 WREG32((0x2c1c + j), 0x00000000); 1895 WREG32((0x2c20 + j), 0x00000000); 1896 WREG32((0x2c24 + j), 0x00000000); 1897 } 1898 1899 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 1900 1901 evergreen_fix_pci_max_read_req_size(rdev); 1902 1903 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; 1904 1905 cc_gc_shader_pipe_config |= 1906 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes) 1907 & EVERGREEN_MAX_PIPES_MASK); 1908 cc_gc_shader_pipe_config |= 1909 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds) 1910 & EVERGREEN_MAX_SIMDS_MASK); 1911 1912 cc_rb_backend_disable = 1913 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends) 1914 & EVERGREEN_MAX_BACKENDS_MASK); 1915 1916 1917 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1918 if (rdev->flags & RADEON_IS_IGP) 1919 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG); 1920 else 1921 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1922 1923 switch (rdev->config.evergreen.max_tile_pipes) { 1924 case 1: 1925 default: 1926 gb_addr_config |= NUM_PIPES(0); 1927 break; 1928 case 2: 1929 gb_addr_config |= NUM_PIPES(1); 1930 break; 1931 case 4: 1932 gb_addr_config |= NUM_PIPES(2); 1933 break; 1934 case 8: 1935 gb_addr_config |= NUM_PIPES(3); 1936 break; 1937 } 1938 1939 gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 1940 gb_addr_config |= BANK_INTERLEAVE_SIZE(0); 1941 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1); 1942 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1); 1943 gb_addr_config |= NUM_GPUS(0); /* Hemlock? */ 1944 gb_addr_config |= MULTI_GPU_TILE_SIZE(2); 1945 1946 if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2) 1947 gb_addr_config |= ROW_SIZE(2); 1948 else 1949 gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT); 1950 1951 if (rdev->ddev->pdev->device == 0x689e) { 1952 u32 efuse_straps_4; 1953 u32 efuse_straps_3; 1954 u8 efuse_box_bit_131_124; 1955 1956 WREG32(RCU_IND_INDEX, 0x204); 1957 efuse_straps_4 = RREG32(RCU_IND_DATA); 1958 WREG32(RCU_IND_INDEX, 0x203); 1959 efuse_straps_3 = RREG32(RCU_IND_DATA); 1960 efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); 1961 1962 switch(efuse_box_bit_131_124) { 1963 case 0x00: 1964 gb_backend_map = 0x76543210; 1965 break; 1966 case 0x55: 1967 gb_backend_map = 0x77553311; 1968 break; 1969 case 0x56: 1970 gb_backend_map = 0x77553300; 1971 break; 1972 case 0x59: 1973 gb_backend_map = 0x77552211; 1974 break; 1975 case 0x66: 1976 gb_backend_map = 0x77443300; 1977 break; 1978 case 0x99: 1979 gb_backend_map = 0x66552211; 1980 break; 1981 case 0x5a: 1982 gb_backend_map = 0x77552200; 1983 break; 1984 case 0xaa: 1985 gb_backend_map = 0x66442200; 1986 break; 1987 case 0x95: 1988 gb_backend_map = 0x66553311; 1989 break; 1990 default: 1991 DRM_ERROR("bad backend map, using default\n"); 1992 gb_backend_map = 1993 evergreen_get_tile_pipe_to_backend_map(rdev, 1994 rdev->config.evergreen.max_tile_pipes, 1995 rdev->config.evergreen.max_backends, 1996 ((EVERGREEN_MAX_BACKENDS_MASK << 1997 rdev->config.evergreen.max_backends) & 1998 EVERGREEN_MAX_BACKENDS_MASK)); 1999 break; 2000 } 2001 } else if (rdev->ddev->pdev->device == 0x68b9) { 2002 u32 efuse_straps_3; 2003 u8 efuse_box_bit_127_124; 2004 2005 WREG32(RCU_IND_INDEX, 0x203); 2006 efuse_straps_3 = RREG32(RCU_IND_DATA); 2007 efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28); 2008 2009 switch(efuse_box_bit_127_124) { 2010 case 0x0: 2011 gb_backend_map = 0x00003210; 2012 break; 2013 case 0x5: 2014 case 0x6: 2015 case 0x9: 2016 case 0xa: 2017 gb_backend_map = 0x00003311; 2018 break; 2019 default: 2020 DRM_ERROR("bad backend map, using default\n"); 2021 gb_backend_map = 2022 evergreen_get_tile_pipe_to_backend_map(rdev, 2023 rdev->config.evergreen.max_tile_pipes, 2024 rdev->config.evergreen.max_backends, 2025 ((EVERGREEN_MAX_BACKENDS_MASK << 2026 rdev->config.evergreen.max_backends) & 2027 EVERGREEN_MAX_BACKENDS_MASK)); 2028 break; 2029 } 2030 } else { 2031 switch (rdev->family) { 2032 case CHIP_CYPRESS: 2033 case CHIP_HEMLOCK: 2034 case CHIP_BARTS: 2035 gb_backend_map = 0x66442200; 2036 break; 2037 case CHIP_JUNIPER: 2038 gb_backend_map = 0x00002200; 2039 break; 2040 default: 2041 gb_backend_map = 2042 evergreen_get_tile_pipe_to_backend_map(rdev, 2043 rdev->config.evergreen.max_tile_pipes, 2044 rdev->config.evergreen.max_backends, 2045 ((EVERGREEN_MAX_BACKENDS_MASK << 2046 rdev->config.evergreen.max_backends) & 2047 EVERGREEN_MAX_BACKENDS_MASK)); 2048 } 2049 } 2050 2051 /* setup tiling info dword. gb_addr_config is not adequate since it does 2052 * not have bank info, so create a custom tiling dword. 2053 * bits 3:0 num_pipes 2054 * bits 7:4 num_banks 2055 * bits 11:8 group_size 2056 * bits 15:12 row_size 2057 */ 2058 rdev->config.evergreen.tile_config = 0; 2059 switch (rdev->config.evergreen.max_tile_pipes) { 2060 case 1: 2061 default: 2062 rdev->config.evergreen.tile_config |= (0 << 0); 2063 break; 2064 case 2: 2065 rdev->config.evergreen.tile_config |= (1 << 0); 2066 break; 2067 case 4: 2068 rdev->config.evergreen.tile_config |= (2 << 0); 2069 break; 2070 case 8: 2071 rdev->config.evergreen.tile_config |= (3 << 0); 2072 break; 2073 } 2074 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 2075 if (rdev->flags & RADEON_IS_IGP) 2076 rdev->config.evergreen.tile_config |= 1 << 4; 2077 else 2078 rdev->config.evergreen.tile_config |= 2079 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 2080 rdev->config.evergreen.tile_config |= 2081 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; 2082 rdev->config.evergreen.tile_config |= 2083 ((gb_addr_config & 0x30000000) >> 28) << 12; 2084 2085 rdev->config.evergreen.backend_map = gb_backend_map; 2086 WREG32(GB_BACKEND_MAP, gb_backend_map); 2087 WREG32(GB_ADDR_CONFIG, gb_addr_config); 2088 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2089 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2090 2091 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; 2092 grbm_gfx_index = INSTANCE_BROADCAST_WRITES; 2093 2094 for (i = 0; i < rdev->config.evergreen.num_ses; i++) { 2095 u32 rb = cc_rb_backend_disable | (0xf0 << 16); 2096 u32 sp = cc_gc_shader_pipe_config; 2097 u32 gfx = grbm_gfx_index | SE_INDEX(i); 2098 2099 if (i == num_shader_engines) { 2100 rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); 2101 sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); 2102 } 2103 2104 WREG32(GRBM_GFX_INDEX, gfx); 2105 WREG32(RLC_GFX_INDEX, gfx); 2106 2107 WREG32(CC_RB_BACKEND_DISABLE, rb); 2108 WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); 2109 WREG32(GC_USER_RB_BACKEND_DISABLE, rb); 2110 WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); 2111 } 2112 2113 grbm_gfx_index |= SE_BROADCAST_WRITES; 2114 WREG32(GRBM_GFX_INDEX, grbm_gfx_index); 2115 WREG32(RLC_GFX_INDEX, grbm_gfx_index); 2116 2117 WREG32(CGTS_SYS_TCC_DISABLE, 0); 2118 WREG32(CGTS_TCC_DISABLE, 0); 2119 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); 2120 WREG32(CGTS_USER_TCC_DISABLE, 0); 2121 2122 /* set HW defaults for 3D engine */ 2123 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 2124 ROQ_IB2_START(0x2b))); 2125 2126 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); 2127 2128 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | 2129 SYNC_GRADIENT | 2130 SYNC_WALKER | 2131 SYNC_ALIGNER)); 2132 2133 sx_debug_1 = RREG32(SX_DEBUG_1); 2134 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 2135 WREG32(SX_DEBUG_1, sx_debug_1); 2136 2137 2138 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 2139 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 2140 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); 2141 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 2142 2143 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | 2144 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | 2145 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); 2146 2147 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | 2148 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | 2149 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); 2150 2151 WREG32(VGT_NUM_INSTANCES, 1); 2152 WREG32(SPI_CONFIG_CNTL, 0); 2153 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); 2154 WREG32(CP_PERFMON_CNTL, 0); 2155 2156 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | 2157 FETCH_FIFO_HIWATER(0x4) | 2158 DONE_FIFO_HIWATER(0xe0) | 2159 ALU_UPDATE_FIFO_HIWATER(0x8))); 2160 2161 sq_config = RREG32(SQ_CONFIG); 2162 sq_config &= ~(PS_PRIO(3) | 2163 VS_PRIO(3) | 2164 GS_PRIO(3) | 2165 ES_PRIO(3)); 2166 sq_config |= (VC_ENABLE | 2167 EXPORT_SRC_C | 2168 PS_PRIO(0) | 2169 VS_PRIO(1) | 2170 GS_PRIO(2) | 2171 ES_PRIO(3)); 2172 2173 switch (rdev->family) { 2174 case CHIP_CEDAR: 2175 case CHIP_PALM: 2176 case CHIP_SUMO: 2177 case CHIP_SUMO2: 2178 case CHIP_CAICOS: 2179 /* no vertex cache */ 2180 sq_config &= ~VC_ENABLE; 2181 break; 2182 default: 2183 break; 2184 } 2185 2186 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); 2187 2188 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32); 2189 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32); 2190 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4); 2191 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); 2192 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); 2193 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); 2194 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); 2195 2196 switch (rdev->family) { 2197 case CHIP_CEDAR: 2198 case CHIP_PALM: 2199 case CHIP_SUMO: 2200 case CHIP_SUMO2: 2201 ps_thread_count = 96; 2202 break; 2203 default: 2204 ps_thread_count = 128; 2205 break; 2206 } 2207 2208 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); 2209 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2210 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2211 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2212 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2213 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2214 2215 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2216 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2217 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2218 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2219 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2220 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2221 2222 WREG32(SQ_CONFIG, sq_config); 2223 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); 2224 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); 2225 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3); 2226 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 2227 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2); 2228 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); 2229 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); 2230 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3); 2231 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); 2232 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt); 2233 2234 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 2235 FORCE_EOV_MAX_REZ_CNT(255))); 2236 2237 switch (rdev->family) { 2238 case CHIP_CEDAR: 2239 case CHIP_PALM: 2240 case CHIP_SUMO: 2241 case CHIP_SUMO2: 2242 case CHIP_CAICOS: 2243 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); 2244 break; 2245 default: 2246 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); 2247 break; 2248 } 2249 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); 2250 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); 2251 2252 WREG32(VGT_GS_VERTEX_REUSE, 16); 2253 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0); 2254 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 2255 2256 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); 2257 WREG32(VGT_OUT_DEALLOC_CNTL, 16); 2258 2259 WREG32(CB_PERF_CTR0_SEL_0, 0); 2260 WREG32(CB_PERF_CTR0_SEL_1, 0); 2261 WREG32(CB_PERF_CTR1_SEL_0, 0); 2262 WREG32(CB_PERF_CTR1_SEL_1, 0); 2263 WREG32(CB_PERF_CTR2_SEL_0, 0); 2264 WREG32(CB_PERF_CTR2_SEL_1, 0); 2265 WREG32(CB_PERF_CTR3_SEL_0, 0); 2266 WREG32(CB_PERF_CTR3_SEL_1, 0); 2267 2268 /* clear render buffer base addresses */ 2269 WREG32(CB_COLOR0_BASE, 0); 2270 WREG32(CB_COLOR1_BASE, 0); 2271 WREG32(CB_COLOR2_BASE, 0); 2272 WREG32(CB_COLOR3_BASE, 0); 2273 WREG32(CB_COLOR4_BASE, 0); 2274 WREG32(CB_COLOR5_BASE, 0); 2275 WREG32(CB_COLOR6_BASE, 0); 2276 WREG32(CB_COLOR7_BASE, 0); 2277 WREG32(CB_COLOR8_BASE, 0); 2278 WREG32(CB_COLOR9_BASE, 0); 2279 WREG32(CB_COLOR10_BASE, 0); 2280 WREG32(CB_COLOR11_BASE, 0); 2281 2282 /* set the shader const cache sizes to 0 */ 2283 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4) 2284 WREG32(i, 0); 2285 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) 2286 WREG32(i, 0); 2287 2288 tmp = RREG32(HDP_MISC_CNTL); 2289 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 2290 WREG32(HDP_MISC_CNTL, tmp); 2291 2292 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 2293 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 2294 2295 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 2296 2297 udelay(50); 2298 2299 } 2300 2301 int evergreen_mc_init(struct radeon_device *rdev) 2302 { 2303 u32 tmp; 2304 int chansize, numchan; 2305 2306 /* Get VRAM informations */ 2307 rdev->mc.vram_is_ddr = true; 2308 if (rdev->flags & RADEON_IS_IGP) 2309 tmp = RREG32(FUS_MC_ARB_RAMCFG); 2310 else 2311 tmp = RREG32(MC_ARB_RAMCFG); 2312 if (tmp & CHANSIZE_OVERRIDE) { 2313 chansize = 16; 2314 } else if (tmp & CHANSIZE_MASK) { 2315 chansize = 64; 2316 } else { 2317 chansize = 32; 2318 } 2319 tmp = RREG32(MC_SHARED_CHMAP); 2320 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 2321 case 0: 2322 default: 2323 numchan = 1; 2324 break; 2325 case 1: 2326 numchan = 2; 2327 break; 2328 case 2: 2329 numchan = 4; 2330 break; 2331 case 3: 2332 numchan = 8; 2333 break; 2334 } 2335 rdev->mc.vram_width = numchan * chansize; 2336 /* Could aper size report 0 ? */ 2337 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 2338 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 2339 /* Setup GPU memory space */ 2340 if (rdev->flags & RADEON_IS_IGP) { 2341 /* size in bytes on fusion */ 2342 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 2343 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 2344 } else { 2345 /* size in MB on evergreen */ 2346 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2347 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2348 } 2349 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2350 r700_vram_gtt_location(rdev, &rdev->mc); 2351 radeon_update_bandwidth_info(rdev); 2352 2353 return 0; 2354 } 2355 2356 bool evergreen_gpu_is_lockup(struct radeon_device *rdev) 2357 { 2358 u32 srbm_status; 2359 u32 grbm_status; 2360 u32 grbm_status_se0, grbm_status_se1; 2361 struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup; 2362 int r; 2363 2364 srbm_status = RREG32(SRBM_STATUS); 2365 grbm_status = RREG32(GRBM_STATUS); 2366 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 2367 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 2368 if (!(grbm_status & GUI_ACTIVE)) { 2369 r100_gpu_lockup_update(lockup, &rdev->cp); 2370 return false; 2371 } 2372 /* force CP activities */ 2373 r = radeon_ring_lock(rdev, 2); 2374 if (!r) { 2375 /* PACKET2 NOP */ 2376 radeon_ring_write(rdev, 0x80000000); 2377 radeon_ring_write(rdev, 0x80000000); 2378 radeon_ring_unlock_commit(rdev); 2379 } 2380 rdev->cp.rptr = RREG32(CP_RB_RPTR); 2381 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); 2382 } 2383 2384 static int evergreen_gpu_soft_reset(struct radeon_device *rdev) 2385 { 2386 struct evergreen_mc_save save; 2387 u32 grbm_reset = 0; 2388 2389 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2390 return 0; 2391 2392 dev_info(rdev->dev, "GPU softreset \n"); 2393 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 2394 RREG32(GRBM_STATUS)); 2395 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 2396 RREG32(GRBM_STATUS_SE0)); 2397 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 2398 RREG32(GRBM_STATUS_SE1)); 2399 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2400 RREG32(SRBM_STATUS)); 2401 evergreen_mc_stop(rdev, &save); 2402 if (evergreen_mc_wait_for_idle(rdev)) { 2403 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 2404 } 2405 /* Disable CP parsing/prefetching */ 2406 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 2407 2408 /* reset all the gfx blocks */ 2409 grbm_reset = (SOFT_RESET_CP | 2410 SOFT_RESET_CB | 2411 SOFT_RESET_DB | 2412 SOFT_RESET_PA | 2413 SOFT_RESET_SC | 2414 SOFT_RESET_SPI | 2415 SOFT_RESET_SH | 2416 SOFT_RESET_SX | 2417 SOFT_RESET_TC | 2418 SOFT_RESET_TA | 2419 SOFT_RESET_VC | 2420 SOFT_RESET_VGT); 2421 2422 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); 2423 WREG32(GRBM_SOFT_RESET, grbm_reset); 2424 (void)RREG32(GRBM_SOFT_RESET); 2425 udelay(50); 2426 WREG32(GRBM_SOFT_RESET, 0); 2427 (void)RREG32(GRBM_SOFT_RESET); 2428 /* Wait a little for things to settle down */ 2429 udelay(50); 2430 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 2431 RREG32(GRBM_STATUS)); 2432 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 2433 RREG32(GRBM_STATUS_SE0)); 2434 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 2435 RREG32(GRBM_STATUS_SE1)); 2436 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2437 RREG32(SRBM_STATUS)); 2438 evergreen_mc_resume(rdev, &save); 2439 return 0; 2440 } 2441 2442 int evergreen_asic_reset(struct radeon_device *rdev) 2443 { 2444 return evergreen_gpu_soft_reset(rdev); 2445 } 2446 2447 /* Interrupts */ 2448 2449 u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) 2450 { 2451 switch (crtc) { 2452 case 0: 2453 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET); 2454 case 1: 2455 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET); 2456 case 2: 2457 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET); 2458 case 3: 2459 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET); 2460 case 4: 2461 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET); 2462 case 5: 2463 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET); 2464 default: 2465 return 0; 2466 } 2467 } 2468 2469 void evergreen_disable_interrupt_state(struct radeon_device *rdev) 2470 { 2471 u32 tmp; 2472 2473 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 2474 WREG32(GRBM_INT_CNTL, 0); 2475 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 2476 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 2477 if (rdev->num_crtc >= 4) { 2478 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 2479 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 2480 } 2481 if (rdev->num_crtc >= 6) { 2482 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 2483 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 2484 } 2485 2486 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 2487 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 2488 if (rdev->num_crtc >= 4) { 2489 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 2490 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 2491 } 2492 if (rdev->num_crtc >= 6) { 2493 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 2494 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 2495 } 2496 2497 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 2498 WREG32(DACB_AUTODETECT_INT_CONTROL, 0); 2499 2500 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2501 WREG32(DC_HPD1_INT_CONTROL, tmp); 2502 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2503 WREG32(DC_HPD2_INT_CONTROL, tmp); 2504 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2505 WREG32(DC_HPD3_INT_CONTROL, tmp); 2506 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2507 WREG32(DC_HPD4_INT_CONTROL, tmp); 2508 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2509 WREG32(DC_HPD5_INT_CONTROL, tmp); 2510 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2511 WREG32(DC_HPD6_INT_CONTROL, tmp); 2512 2513 } 2514 2515 int evergreen_irq_set(struct radeon_device *rdev) 2516 { 2517 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 2518 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 2519 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 2520 u32 grbm_int_cntl = 0; 2521 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 2522 2523 if (!rdev->irq.installed) { 2524 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 2525 return -EINVAL; 2526 } 2527 /* don't enable anything if the ih is disabled */ 2528 if (!rdev->ih.enabled) { 2529 r600_disable_interrupts(rdev); 2530 /* force the active interrupt state to all disabled */ 2531 evergreen_disable_interrupt_state(rdev); 2532 return 0; 2533 } 2534 2535 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2536 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 2537 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 2538 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 2539 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 2540 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 2541 2542 if (rdev->irq.sw_int) { 2543 DRM_DEBUG("evergreen_irq_set: sw int\n"); 2544 cp_int_cntl |= RB_INT_ENABLE; 2545 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 2546 } 2547 if (rdev->irq.crtc_vblank_int[0] || 2548 rdev->irq.pflip[0]) { 2549 DRM_DEBUG("evergreen_irq_set: vblank 0\n"); 2550 crtc1 |= VBLANK_INT_MASK; 2551 } 2552 if (rdev->irq.crtc_vblank_int[1] || 2553 rdev->irq.pflip[1]) { 2554 DRM_DEBUG("evergreen_irq_set: vblank 1\n"); 2555 crtc2 |= VBLANK_INT_MASK; 2556 } 2557 if (rdev->irq.crtc_vblank_int[2] || 2558 rdev->irq.pflip[2]) { 2559 DRM_DEBUG("evergreen_irq_set: vblank 2\n"); 2560 crtc3 |= VBLANK_INT_MASK; 2561 } 2562 if (rdev->irq.crtc_vblank_int[3] || 2563 rdev->irq.pflip[3]) { 2564 DRM_DEBUG("evergreen_irq_set: vblank 3\n"); 2565 crtc4 |= VBLANK_INT_MASK; 2566 } 2567 if (rdev->irq.crtc_vblank_int[4] || 2568 rdev->irq.pflip[4]) { 2569 DRM_DEBUG("evergreen_irq_set: vblank 4\n"); 2570 crtc5 |= VBLANK_INT_MASK; 2571 } 2572 if (rdev->irq.crtc_vblank_int[5] || 2573 rdev->irq.pflip[5]) { 2574 DRM_DEBUG("evergreen_irq_set: vblank 5\n"); 2575 crtc6 |= VBLANK_INT_MASK; 2576 } 2577 if (rdev->irq.hpd[0]) { 2578 DRM_DEBUG("evergreen_irq_set: hpd 1\n"); 2579 hpd1 |= DC_HPDx_INT_EN; 2580 } 2581 if (rdev->irq.hpd[1]) { 2582 DRM_DEBUG("evergreen_irq_set: hpd 2\n"); 2583 hpd2 |= DC_HPDx_INT_EN; 2584 } 2585 if (rdev->irq.hpd[2]) { 2586 DRM_DEBUG("evergreen_irq_set: hpd 3\n"); 2587 hpd3 |= DC_HPDx_INT_EN; 2588 } 2589 if (rdev->irq.hpd[3]) { 2590 DRM_DEBUG("evergreen_irq_set: hpd 4\n"); 2591 hpd4 |= DC_HPDx_INT_EN; 2592 } 2593 if (rdev->irq.hpd[4]) { 2594 DRM_DEBUG("evergreen_irq_set: hpd 5\n"); 2595 hpd5 |= DC_HPDx_INT_EN; 2596 } 2597 if (rdev->irq.hpd[5]) { 2598 DRM_DEBUG("evergreen_irq_set: hpd 6\n"); 2599 hpd6 |= DC_HPDx_INT_EN; 2600 } 2601 if (rdev->irq.gui_idle) { 2602 DRM_DEBUG("gui idle\n"); 2603 grbm_int_cntl |= GUI_IDLE_INT_ENABLE; 2604 } 2605 2606 WREG32(CP_INT_CNTL, cp_int_cntl); 2607 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 2608 2609 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 2610 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); 2611 if (rdev->num_crtc >= 4) { 2612 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); 2613 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); 2614 } 2615 if (rdev->num_crtc >= 6) { 2616 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); 2617 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 2618 } 2619 2620 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); 2621 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); 2622 if (rdev->num_crtc >= 4) { 2623 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); 2624 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); 2625 } 2626 if (rdev->num_crtc >= 6) { 2627 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); 2628 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); 2629 } 2630 2631 WREG32(DC_HPD1_INT_CONTROL, hpd1); 2632 WREG32(DC_HPD2_INT_CONTROL, hpd2); 2633 WREG32(DC_HPD3_INT_CONTROL, hpd3); 2634 WREG32(DC_HPD4_INT_CONTROL, hpd4); 2635 WREG32(DC_HPD5_INT_CONTROL, hpd5); 2636 WREG32(DC_HPD6_INT_CONTROL, hpd6); 2637 2638 return 0; 2639 } 2640 2641 static void evergreen_irq_ack(struct radeon_device *rdev) 2642 { 2643 u32 tmp; 2644 2645 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); 2646 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 2647 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); 2648 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); 2649 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); 2650 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); 2651 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); 2652 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); 2653 if (rdev->num_crtc >= 4) { 2654 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); 2655 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); 2656 } 2657 if (rdev->num_crtc >= 6) { 2658 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); 2659 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); 2660 } 2661 2662 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) 2663 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2664 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) 2665 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2666 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) 2667 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); 2668 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) 2669 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); 2670 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) 2671 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); 2672 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) 2673 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); 2674 2675 if (rdev->num_crtc >= 4) { 2676 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED) 2677 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2678 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED) 2679 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2680 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) 2681 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); 2682 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) 2683 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); 2684 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) 2685 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); 2686 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) 2687 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); 2688 } 2689 2690 if (rdev->num_crtc >= 6) { 2691 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED) 2692 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2693 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED) 2694 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2695 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) 2696 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); 2697 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) 2698 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); 2699 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) 2700 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); 2701 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) 2702 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); 2703 } 2704 2705 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 2706 tmp = RREG32(DC_HPD1_INT_CONTROL); 2707 tmp |= DC_HPDx_INT_ACK; 2708 WREG32(DC_HPD1_INT_CONTROL, tmp); 2709 } 2710 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 2711 tmp = RREG32(DC_HPD2_INT_CONTROL); 2712 tmp |= DC_HPDx_INT_ACK; 2713 WREG32(DC_HPD2_INT_CONTROL, tmp); 2714 } 2715 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 2716 tmp = RREG32(DC_HPD3_INT_CONTROL); 2717 tmp |= DC_HPDx_INT_ACK; 2718 WREG32(DC_HPD3_INT_CONTROL, tmp); 2719 } 2720 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 2721 tmp = RREG32(DC_HPD4_INT_CONTROL); 2722 tmp |= DC_HPDx_INT_ACK; 2723 WREG32(DC_HPD4_INT_CONTROL, tmp); 2724 } 2725 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 2726 tmp = RREG32(DC_HPD5_INT_CONTROL); 2727 tmp |= DC_HPDx_INT_ACK; 2728 WREG32(DC_HPD5_INT_CONTROL, tmp); 2729 } 2730 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 2731 tmp = RREG32(DC_HPD5_INT_CONTROL); 2732 tmp |= DC_HPDx_INT_ACK; 2733 WREG32(DC_HPD6_INT_CONTROL, tmp); 2734 } 2735 } 2736 2737 void evergreen_irq_disable(struct radeon_device *rdev) 2738 { 2739 r600_disable_interrupts(rdev); 2740 /* Wait and acknowledge irq */ 2741 mdelay(1); 2742 evergreen_irq_ack(rdev); 2743 evergreen_disable_interrupt_state(rdev); 2744 } 2745 2746 void evergreen_irq_suspend(struct radeon_device *rdev) 2747 { 2748 evergreen_irq_disable(rdev); 2749 r600_rlc_stop(rdev); 2750 } 2751 2752 static u32 evergreen_get_ih_wptr(struct radeon_device *rdev) 2753 { 2754 u32 wptr, tmp; 2755 2756 if (rdev->wb.enabled) 2757 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 2758 else 2759 wptr = RREG32(IH_RB_WPTR); 2760 2761 if (wptr & RB_OVERFLOW) { 2762 /* When a ring buffer overflow happen start parsing interrupt 2763 * from the last not overwritten vector (wptr + 16). Hopefully 2764 * this should allow us to catchup. 2765 */ 2766 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 2767 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 2768 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 2769 tmp = RREG32(IH_RB_CNTL); 2770 tmp |= IH_WPTR_OVERFLOW_CLEAR; 2771 WREG32(IH_RB_CNTL, tmp); 2772 } 2773 return (wptr & rdev->ih.ptr_mask); 2774 } 2775 2776 int evergreen_irq_process(struct radeon_device *rdev) 2777 { 2778 u32 wptr; 2779 u32 rptr; 2780 u32 src_id, src_data; 2781 u32 ring_index; 2782 unsigned long flags; 2783 bool queue_hotplug = false; 2784 2785 if (!rdev->ih.enabled || rdev->shutdown) 2786 return IRQ_NONE; 2787 2788 wptr = evergreen_get_ih_wptr(rdev); 2789 rptr = rdev->ih.rptr; 2790 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2791 2792 spin_lock_irqsave(&rdev->ih.lock, flags); 2793 if (rptr == wptr) { 2794 spin_unlock_irqrestore(&rdev->ih.lock, flags); 2795 return IRQ_NONE; 2796 } 2797 restart_ih: 2798 /* Order reading of wptr vs. reading of IH ring data */ 2799 rmb(); 2800 2801 /* display interrupts */ 2802 evergreen_irq_ack(rdev); 2803 2804 rdev->ih.wptr = wptr; 2805 while (rptr != wptr) { 2806 /* wptr/rptr are in bytes! */ 2807 ring_index = rptr / 4; 2808 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; 2809 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; 2810 2811 switch (src_id) { 2812 case 1: /* D1 vblank/vline */ 2813 switch (src_data) { 2814 case 0: /* D1 vblank */ 2815 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 2816 if (rdev->irq.crtc_vblank_int[0]) { 2817 drm_handle_vblank(rdev->ddev, 0); 2818 rdev->pm.vblank_sync = true; 2819 wake_up(&rdev->irq.vblank_queue); 2820 } 2821 if (rdev->irq.pflip[0]) 2822 radeon_crtc_handle_flip(rdev, 0); 2823 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2824 DRM_DEBUG("IH: D1 vblank\n"); 2825 } 2826 break; 2827 case 1: /* D1 vline */ 2828 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 2829 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 2830 DRM_DEBUG("IH: D1 vline\n"); 2831 } 2832 break; 2833 default: 2834 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 2835 break; 2836 } 2837 break; 2838 case 2: /* D2 vblank/vline */ 2839 switch (src_data) { 2840 case 0: /* D2 vblank */ 2841 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 2842 if (rdev->irq.crtc_vblank_int[1]) { 2843 drm_handle_vblank(rdev->ddev, 1); 2844 rdev->pm.vblank_sync = true; 2845 wake_up(&rdev->irq.vblank_queue); 2846 } 2847 if (rdev->irq.pflip[1]) 2848 radeon_crtc_handle_flip(rdev, 1); 2849 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 2850 DRM_DEBUG("IH: D2 vblank\n"); 2851 } 2852 break; 2853 case 1: /* D2 vline */ 2854 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 2855 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 2856 DRM_DEBUG("IH: D2 vline\n"); 2857 } 2858 break; 2859 default: 2860 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 2861 break; 2862 } 2863 break; 2864 case 3: /* D3 vblank/vline */ 2865 switch (src_data) { 2866 case 0: /* D3 vblank */ 2867 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 2868 if (rdev->irq.crtc_vblank_int[2]) { 2869 drm_handle_vblank(rdev->ddev, 2); 2870 rdev->pm.vblank_sync = true; 2871 wake_up(&rdev->irq.vblank_queue); 2872 } 2873 if (rdev->irq.pflip[2]) 2874 radeon_crtc_handle_flip(rdev, 2); 2875 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 2876 DRM_DEBUG("IH: D3 vblank\n"); 2877 } 2878 break; 2879 case 1: /* D3 vline */ 2880 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 2881 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 2882 DRM_DEBUG("IH: D3 vline\n"); 2883 } 2884 break; 2885 default: 2886 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 2887 break; 2888 } 2889 break; 2890 case 4: /* D4 vblank/vline */ 2891 switch (src_data) { 2892 case 0: /* D4 vblank */ 2893 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 2894 if (rdev->irq.crtc_vblank_int[3]) { 2895 drm_handle_vblank(rdev->ddev, 3); 2896 rdev->pm.vblank_sync = true; 2897 wake_up(&rdev->irq.vblank_queue); 2898 } 2899 if (rdev->irq.pflip[3]) 2900 radeon_crtc_handle_flip(rdev, 3); 2901 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 2902 DRM_DEBUG("IH: D4 vblank\n"); 2903 } 2904 break; 2905 case 1: /* D4 vline */ 2906 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 2907 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 2908 DRM_DEBUG("IH: D4 vline\n"); 2909 } 2910 break; 2911 default: 2912 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 2913 break; 2914 } 2915 break; 2916 case 5: /* D5 vblank/vline */ 2917 switch (src_data) { 2918 case 0: /* D5 vblank */ 2919 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 2920 if (rdev->irq.crtc_vblank_int[4]) { 2921 drm_handle_vblank(rdev->ddev, 4); 2922 rdev->pm.vblank_sync = true; 2923 wake_up(&rdev->irq.vblank_queue); 2924 } 2925 if (rdev->irq.pflip[4]) 2926 radeon_crtc_handle_flip(rdev, 4); 2927 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 2928 DRM_DEBUG("IH: D5 vblank\n"); 2929 } 2930 break; 2931 case 1: /* D5 vline */ 2932 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 2933 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 2934 DRM_DEBUG("IH: D5 vline\n"); 2935 } 2936 break; 2937 default: 2938 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 2939 break; 2940 } 2941 break; 2942 case 6: /* D6 vblank/vline */ 2943 switch (src_data) { 2944 case 0: /* D6 vblank */ 2945 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 2946 if (rdev->irq.crtc_vblank_int[5]) { 2947 drm_handle_vblank(rdev->ddev, 5); 2948 rdev->pm.vblank_sync = true; 2949 wake_up(&rdev->irq.vblank_queue); 2950 } 2951 if (rdev->irq.pflip[5]) 2952 radeon_crtc_handle_flip(rdev, 5); 2953 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 2954 DRM_DEBUG("IH: D6 vblank\n"); 2955 } 2956 break; 2957 case 1: /* D6 vline */ 2958 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 2959 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 2960 DRM_DEBUG("IH: D6 vline\n"); 2961 } 2962 break; 2963 default: 2964 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 2965 break; 2966 } 2967 break; 2968 case 42: /* HPD hotplug */ 2969 switch (src_data) { 2970 case 0: 2971 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 2972 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 2973 queue_hotplug = true; 2974 DRM_DEBUG("IH: HPD1\n"); 2975 } 2976 break; 2977 case 1: 2978 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 2979 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 2980 queue_hotplug = true; 2981 DRM_DEBUG("IH: HPD2\n"); 2982 } 2983 break; 2984 case 2: 2985 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 2986 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 2987 queue_hotplug = true; 2988 DRM_DEBUG("IH: HPD3\n"); 2989 } 2990 break; 2991 case 3: 2992 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 2993 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 2994 queue_hotplug = true; 2995 DRM_DEBUG("IH: HPD4\n"); 2996 } 2997 break; 2998 case 4: 2999 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 3000 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 3001 queue_hotplug = true; 3002 DRM_DEBUG("IH: HPD5\n"); 3003 } 3004 break; 3005 case 5: 3006 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 3007 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 3008 queue_hotplug = true; 3009 DRM_DEBUG("IH: HPD6\n"); 3010 } 3011 break; 3012 default: 3013 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3014 break; 3015 } 3016 break; 3017 case 176: /* CP_INT in ring buffer */ 3018 case 177: /* CP_INT in IB1 */ 3019 case 178: /* CP_INT in IB2 */ 3020 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); 3021 radeon_fence_process(rdev); 3022 break; 3023 case 181: /* CP EOP event */ 3024 DRM_DEBUG("IH: CP EOP\n"); 3025 radeon_fence_process(rdev); 3026 break; 3027 case 233: /* GUI IDLE */ 3028 DRM_DEBUG("IH: GUI idle\n"); 3029 rdev->pm.gui_idle = true; 3030 wake_up(&rdev->irq.idle_queue); 3031 break; 3032 default: 3033 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3034 break; 3035 } 3036 3037 /* wptr/rptr are in bytes! */ 3038 rptr += 16; 3039 rptr &= rdev->ih.ptr_mask; 3040 } 3041 /* make sure wptr hasn't changed while processing */ 3042 wptr = evergreen_get_ih_wptr(rdev); 3043 if (wptr != rdev->ih.wptr) 3044 goto restart_ih; 3045 if (queue_hotplug) 3046 schedule_work(&rdev->hotplug_work); 3047 rdev->ih.rptr = rptr; 3048 WREG32(IH_RB_RPTR, rdev->ih.rptr); 3049 spin_unlock_irqrestore(&rdev->ih.lock, flags); 3050 return IRQ_HANDLED; 3051 } 3052 3053 static int evergreen_startup(struct radeon_device *rdev) 3054 { 3055 int r; 3056 3057 /* enable pcie gen2 link */ 3058 evergreen_pcie_gen2_enable(rdev); 3059 3060 if (ASIC_IS_DCE5(rdev)) { 3061 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 3062 r = ni_init_microcode(rdev); 3063 if (r) { 3064 DRM_ERROR("Failed to load firmware!\n"); 3065 return r; 3066 } 3067 } 3068 r = ni_mc_load_microcode(rdev); 3069 if (r) { 3070 DRM_ERROR("Failed to load MC firmware!\n"); 3071 return r; 3072 } 3073 } else { 3074 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 3075 r = r600_init_microcode(rdev); 3076 if (r) { 3077 DRM_ERROR("Failed to load firmware!\n"); 3078 return r; 3079 } 3080 } 3081 } 3082 3083 r = r600_vram_scratch_init(rdev); 3084 if (r) 3085 return r; 3086 3087 evergreen_mc_program(rdev); 3088 if (rdev->flags & RADEON_IS_AGP) { 3089 evergreen_agp_enable(rdev); 3090 } else { 3091 r = evergreen_pcie_gart_enable(rdev); 3092 if (r) 3093 return r; 3094 } 3095 evergreen_gpu_init(rdev); 3096 3097 r = evergreen_blit_init(rdev); 3098 if (r) { 3099 r600_blit_fini(rdev); 3100 rdev->asic->copy = NULL; 3101 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 3102 } 3103 3104 /* allocate wb buffer */ 3105 r = radeon_wb_init(rdev); 3106 if (r) 3107 return r; 3108 3109 /* Enable IRQ */ 3110 r = r600_irq_init(rdev); 3111 if (r) { 3112 DRM_ERROR("radeon: IH init failed (%d).\n", r); 3113 radeon_irq_kms_fini(rdev); 3114 return r; 3115 } 3116 evergreen_irq_set(rdev); 3117 3118 r = radeon_ring_init(rdev, rdev->cp.ring_size); 3119 if (r) 3120 return r; 3121 r = evergreen_cp_load_microcode(rdev); 3122 if (r) 3123 return r; 3124 r = evergreen_cp_resume(rdev); 3125 if (r) 3126 return r; 3127 3128 return 0; 3129 } 3130 3131 int evergreen_resume(struct radeon_device *rdev) 3132 { 3133 int r; 3134 3135 /* reset the asic, the gfx blocks are often in a bad state 3136 * after the driver is unloaded or after a resume 3137 */ 3138 if (radeon_asic_reset(rdev)) 3139 dev_warn(rdev->dev, "GPU reset failed !\n"); 3140 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 3141 * posting will perform necessary task to bring back GPU into good 3142 * shape. 3143 */ 3144 /* post card */ 3145 atom_asic_init(rdev->mode_info.atom_context); 3146 3147 r = evergreen_startup(rdev); 3148 if (r) { 3149 DRM_ERROR("evergreen startup failed on resume\n"); 3150 return r; 3151 } 3152 3153 r = r600_ib_test(rdev); 3154 if (r) { 3155 DRM_ERROR("radeon: failed testing IB (%d).\n", r); 3156 return r; 3157 } 3158 3159 return r; 3160 3161 } 3162 3163 int evergreen_suspend(struct radeon_device *rdev) 3164 { 3165 /* FIXME: we should wait for ring to be empty */ 3166 r700_cp_stop(rdev); 3167 rdev->cp.ready = false; 3168 evergreen_irq_suspend(rdev); 3169 radeon_wb_disable(rdev); 3170 evergreen_pcie_gart_disable(rdev); 3171 r600_blit_suspend(rdev); 3172 3173 return 0; 3174 } 3175 3176 /* Plan is to move initialization in that function and use 3177 * helper function so that radeon_device_init pretty much 3178 * do nothing more than calling asic specific function. This 3179 * should also allow to remove a bunch of callback function 3180 * like vram_info. 3181 */ 3182 int evergreen_init(struct radeon_device *rdev) 3183 { 3184 int r; 3185 3186 /* This don't do much */ 3187 r = radeon_gem_init(rdev); 3188 if (r) 3189 return r; 3190 /* Read BIOS */ 3191 if (!radeon_get_bios(rdev)) { 3192 if (ASIC_IS_AVIVO(rdev)) 3193 return -EINVAL; 3194 } 3195 /* Must be an ATOMBIOS */ 3196 if (!rdev->is_atom_bios) { 3197 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n"); 3198 return -EINVAL; 3199 } 3200 r = radeon_atombios_init(rdev); 3201 if (r) 3202 return r; 3203 /* reset the asic, the gfx blocks are often in a bad state 3204 * after the driver is unloaded or after a resume 3205 */ 3206 if (radeon_asic_reset(rdev)) 3207 dev_warn(rdev->dev, "GPU reset failed !\n"); 3208 /* Post card if necessary */ 3209 if (!radeon_card_posted(rdev)) { 3210 if (!rdev->bios) { 3211 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 3212 return -EINVAL; 3213 } 3214 DRM_INFO("GPU not posted. posting now...\n"); 3215 atom_asic_init(rdev->mode_info.atom_context); 3216 } 3217 /* Initialize scratch registers */ 3218 r600_scratch_init(rdev); 3219 /* Initialize surface registers */ 3220 radeon_surface_init(rdev); 3221 /* Initialize clocks */ 3222 radeon_get_clock_info(rdev->ddev); 3223 /* Fence driver */ 3224 r = radeon_fence_driver_init(rdev); 3225 if (r) 3226 return r; 3227 /* initialize AGP */ 3228 if (rdev->flags & RADEON_IS_AGP) { 3229 r = radeon_agp_init(rdev); 3230 if (r) 3231 radeon_agp_disable(rdev); 3232 } 3233 /* initialize memory controller */ 3234 r = evergreen_mc_init(rdev); 3235 if (r) 3236 return r; 3237 /* Memory manager */ 3238 r = radeon_bo_init(rdev); 3239 if (r) 3240 return r; 3241 3242 r = radeon_irq_kms_init(rdev); 3243 if (r) 3244 return r; 3245 3246 rdev->cp.ring_obj = NULL; 3247 r600_ring_init(rdev, 1024 * 1024); 3248 3249 rdev->ih.ring_obj = NULL; 3250 r600_ih_ring_init(rdev, 64 * 1024); 3251 3252 r = r600_pcie_gart_init(rdev); 3253 if (r) 3254 return r; 3255 3256 rdev->accel_working = true; 3257 r = evergreen_startup(rdev); 3258 if (r) { 3259 dev_err(rdev->dev, "disabling GPU acceleration\n"); 3260 r700_cp_fini(rdev); 3261 r600_irq_fini(rdev); 3262 radeon_wb_fini(rdev); 3263 radeon_irq_kms_fini(rdev); 3264 evergreen_pcie_gart_fini(rdev); 3265 rdev->accel_working = false; 3266 } 3267 if (rdev->accel_working) { 3268 r = radeon_ib_pool_init(rdev); 3269 if (r) { 3270 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 3271 rdev->accel_working = false; 3272 } 3273 r = r600_ib_test(rdev); 3274 if (r) { 3275 DRM_ERROR("radeon: failed testing IB (%d).\n", r); 3276 rdev->accel_working = false; 3277 } 3278 } 3279 return 0; 3280 } 3281 3282 void evergreen_fini(struct radeon_device *rdev) 3283 { 3284 r600_blit_fini(rdev); 3285 r700_cp_fini(rdev); 3286 r600_irq_fini(rdev); 3287 radeon_wb_fini(rdev); 3288 radeon_ib_pool_fini(rdev); 3289 radeon_irq_kms_fini(rdev); 3290 evergreen_pcie_gart_fini(rdev); 3291 r600_vram_scratch_fini(rdev); 3292 radeon_gem_fini(rdev); 3293 radeon_fence_driver_fini(rdev); 3294 radeon_agp_fini(rdev); 3295 radeon_bo_fini(rdev); 3296 radeon_atombios_fini(rdev); 3297 kfree(rdev->bios); 3298 rdev->bios = NULL; 3299 } 3300 3301 void evergreen_pcie_gen2_enable(struct radeon_device *rdev) 3302 { 3303 u32 link_width_cntl, speed_cntl; 3304 3305 if (radeon_pcie_gen2 == 0) 3306 return; 3307 3308 if (rdev->flags & RADEON_IS_IGP) 3309 return; 3310 3311 if (!(rdev->flags & RADEON_IS_PCIE)) 3312 return; 3313 3314 /* x2 cards have a special sequence */ 3315 if (ASIC_IS_X2(rdev)) 3316 return; 3317 3318 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3319 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || 3320 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 3321 3322 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 3323 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 3324 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 3325 3326 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3327 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 3328 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 3329 3330 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3331 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; 3332 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 3333 3334 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3335 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; 3336 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 3337 3338 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3339 speed_cntl |= LC_GEN2_EN_STRAP; 3340 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 3341 3342 } else { 3343 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 3344 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 3345 if (1) 3346 link_width_cntl |= LC_UPCONFIGURE_DIS; 3347 else 3348 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 3349 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 3350 } 3351 } 3352