1 /* 2 * Copyright 2007-8 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Dave Airlie 24 * Alex Deucher 25 */ 26 #include <drm/drmP.h> 27 #include <drm/radeon_drm.h> 28 #include "radeon.h" 29 30 #include "atom.h" 31 #include <asm/div64.h> 32 33 #include <linux/pm_runtime.h> 34 #include <drm/drm_crtc_helper.h> 35 #include <drm/drm_edid.h> 36 37 #include <linux/gcd.h> 38 39 static void avivo_crtc_load_lut(struct drm_crtc *crtc) 40 { 41 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 42 struct drm_device *dev = crtc->dev; 43 struct radeon_device *rdev = dev->dev_private; 44 int i; 45 46 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 47 WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0); 48 49 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 50 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 51 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 52 53 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 54 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 55 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 56 57 WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id); 58 WREG32(AVIVO_DC_LUT_RW_MODE, 0); 59 WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f); 60 61 WREG8(AVIVO_DC_LUT_RW_INDEX, 0); 62 for (i = 0; i < 256; i++) { 63 WREG32(AVIVO_DC_LUT_30_COLOR, 64 (radeon_crtc->lut_r[i] << 20) | 65 (radeon_crtc->lut_g[i] << 10) | 66 (radeon_crtc->lut_b[i] << 0)); 67 } 68 69 /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */ 70 WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1); 71 } 72 73 static void dce4_crtc_load_lut(struct drm_crtc *crtc) 74 { 75 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 76 struct drm_device *dev = crtc->dev; 77 struct radeon_device *rdev = dev->dev_private; 78 int i; 79 80 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 81 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); 82 83 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 84 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 85 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 86 87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 88 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 89 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 90 91 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); 92 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); 93 94 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); 95 for (i = 0; i < 256; i++) { 96 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, 97 (radeon_crtc->lut_r[i] << 20) | 98 (radeon_crtc->lut_g[i] << 10) | 99 (radeon_crtc->lut_b[i] << 0)); 100 } 101 } 102 103 static void dce5_crtc_load_lut(struct drm_crtc *crtc) 104 { 105 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 106 struct drm_device *dev = crtc->dev; 107 struct radeon_device *rdev = dev->dev_private; 108 int i; 109 110 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 111 112 WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, 113 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | 114 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); 115 WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset, 116 NI_GRPH_PRESCALE_BYPASS); 117 WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset, 118 NI_OVL_PRESCALE_BYPASS); 119 WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset, 120 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | 121 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); 122 123 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); 124 125 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 126 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 127 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 128 129 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 130 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 131 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 132 133 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); 134 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); 135 136 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); 137 for (i = 0; i < 256; i++) { 138 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, 139 (radeon_crtc->lut_r[i] << 20) | 140 (radeon_crtc->lut_g[i] << 10) | 141 (radeon_crtc->lut_b[i] << 0)); 142 } 143 144 WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset, 145 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 146 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 147 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 148 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); 149 WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset, 150 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | 151 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); 152 WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset, 153 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | 154 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); 155 WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, 156 (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) | 157 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); 158 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 159 WREG32(0x6940 + radeon_crtc->crtc_offset, 0); 160 if (ASIC_IS_DCE8(rdev)) { 161 /* XXX this only needs to be programmed once per crtc at startup, 162 * not sure where the best place for it is 163 */ 164 WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset, 165 CIK_CURSOR_ALPHA_BLND_ENA); 166 } 167 } 168 169 static void legacy_crtc_load_lut(struct drm_crtc *crtc) 170 { 171 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 172 struct drm_device *dev = crtc->dev; 173 struct radeon_device *rdev = dev->dev_private; 174 int i; 175 uint32_t dac2_cntl; 176 177 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 178 if (radeon_crtc->crtc_id == 0) 179 dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; 180 else 181 dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL; 182 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 183 184 WREG8(RADEON_PALETTE_INDEX, 0); 185 for (i = 0; i < 256; i++) { 186 WREG32(RADEON_PALETTE_30_DATA, 187 (radeon_crtc->lut_r[i] << 20) | 188 (radeon_crtc->lut_g[i] << 10) | 189 (radeon_crtc->lut_b[i] << 0)); 190 } 191 } 192 193 void radeon_crtc_load_lut(struct drm_crtc *crtc) 194 { 195 struct drm_device *dev = crtc->dev; 196 struct radeon_device *rdev = dev->dev_private; 197 198 if (!crtc->enabled) 199 return; 200 201 if (ASIC_IS_DCE5(rdev)) 202 dce5_crtc_load_lut(crtc); 203 else if (ASIC_IS_DCE4(rdev)) 204 dce4_crtc_load_lut(crtc); 205 else if (ASIC_IS_AVIVO(rdev)) 206 avivo_crtc_load_lut(crtc); 207 else 208 legacy_crtc_load_lut(crtc); 209 } 210 211 /** Sets the color ramps on behalf of fbcon */ 212 void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 213 u16 blue, int regno) 214 { 215 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 216 217 radeon_crtc->lut_r[regno] = red >> 6; 218 radeon_crtc->lut_g[regno] = green >> 6; 219 radeon_crtc->lut_b[regno] = blue >> 6; 220 } 221 222 /** Gets the color ramps on behalf of fbcon */ 223 void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 224 u16 *blue, int regno) 225 { 226 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 227 228 *red = radeon_crtc->lut_r[regno] << 6; 229 *green = radeon_crtc->lut_g[regno] << 6; 230 *blue = radeon_crtc->lut_b[regno] << 6; 231 } 232 233 static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 234 u16 *blue, uint32_t start, uint32_t size) 235 { 236 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 237 int end = (start + size > 256) ? 256 : start + size, i; 238 239 /* userspace palettes are always correct as is */ 240 for (i = start; i < end; i++) { 241 radeon_crtc->lut_r[i] = red[i] >> 6; 242 radeon_crtc->lut_g[i] = green[i] >> 6; 243 radeon_crtc->lut_b[i] = blue[i] >> 6; 244 } 245 radeon_crtc_load_lut(crtc); 246 } 247 248 static void radeon_crtc_destroy(struct drm_crtc *crtc) 249 { 250 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 251 252 drm_crtc_cleanup(crtc); 253 destroy_workqueue(radeon_crtc->flip_queue); 254 kfree(radeon_crtc); 255 } 256 257 /** 258 * radeon_unpin_work_func - unpin old buffer object 259 * 260 * @__work - kernel work item 261 * 262 * Unpin the old frame buffer object outside of the interrupt handler 263 */ 264 static void radeon_unpin_work_func(struct work_struct *__work) 265 { 266 struct radeon_flip_work *work = 267 container_of(__work, struct radeon_flip_work, unpin_work); 268 int r; 269 270 /* unpin of the old buffer */ 271 r = radeon_bo_reserve(work->old_rbo, false); 272 if (likely(r == 0)) { 273 r = radeon_bo_unpin(work->old_rbo); 274 if (unlikely(r != 0)) { 275 DRM_ERROR("failed to unpin buffer after flip\n"); 276 } 277 radeon_bo_unreserve(work->old_rbo); 278 } else 279 DRM_ERROR("failed to reserve buffer after flip\n"); 280 281 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 282 kfree(work); 283 } 284 285 void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) 286 { 287 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 288 unsigned long flags; 289 u32 update_pending; 290 int vpos, hpos; 291 292 /* can happen during initialization */ 293 if (radeon_crtc == NULL) 294 return; 295 296 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 297 if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { 298 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " 299 "RADEON_FLIP_SUBMITTED(%d)\n", 300 radeon_crtc->flip_status, 301 RADEON_FLIP_SUBMITTED); 302 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 303 return; 304 } 305 306 update_pending = radeon_page_flip_pending(rdev, crtc_id); 307 308 /* Has the pageflip already completed in crtc, or is it certain 309 * to complete in this vblank? 310 */ 311 if (update_pending && 312 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, 313 &vpos, &hpos, NULL, NULL)) && 314 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || 315 (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { 316 /* crtc didn't flip in this target vblank interval, 317 * but flip is pending in crtc. Based on the current 318 * scanout position we know that the current frame is 319 * (nearly) complete and the flip will (likely) 320 * complete before the start of the next frame. 321 */ 322 update_pending = 0; 323 } 324 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 325 if (!update_pending) 326 radeon_crtc_handle_flip(rdev, crtc_id); 327 } 328 329 /** 330 * radeon_crtc_handle_flip - page flip completed 331 * 332 * @rdev: radeon device pointer 333 * @crtc_id: crtc number this event is for 334 * 335 * Called when we are sure that a page flip for this crtc is completed. 336 */ 337 void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) 338 { 339 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 340 struct radeon_flip_work *work; 341 unsigned long flags; 342 343 /* this can happen at init */ 344 if (radeon_crtc == NULL) 345 return; 346 347 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 348 work = radeon_crtc->flip_work; 349 if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { 350 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " 351 "RADEON_FLIP_SUBMITTED(%d)\n", 352 radeon_crtc->flip_status, 353 RADEON_FLIP_SUBMITTED); 354 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 355 return; 356 } 357 358 /* Pageflip completed. Clean up. */ 359 radeon_crtc->flip_status = RADEON_FLIP_NONE; 360 radeon_crtc->flip_work = NULL; 361 362 /* wakeup userspace */ 363 if (work->event) 364 drm_send_vblank_event(rdev->ddev, crtc_id, work->event); 365 366 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 367 368 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 369 radeon_fence_unref(&work->fence); 370 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); 371 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 372 } 373 374 /** 375 * radeon_flip_work_func - page flip framebuffer 376 * 377 * @work - kernel work item 378 * 379 * Wait for the buffer object to become idle and do the actual page flip 380 */ 381 static void radeon_flip_work_func(struct work_struct *__work) 382 { 383 struct radeon_flip_work *work = 384 container_of(__work, struct radeon_flip_work, flip_work); 385 struct radeon_device *rdev = work->rdev; 386 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; 387 388 struct drm_crtc *crtc = &radeon_crtc->base; 389 struct drm_framebuffer *fb = work->fb; 390 391 uint32_t tiling_flags, pitch_pixels; 392 uint64_t base; 393 394 unsigned long flags; 395 int r; 396 397 down_read(&rdev->exclusive_lock); 398 while (work->fence) { 399 r = radeon_fence_wait(work->fence, false); 400 if (r == -EDEADLK) { 401 up_read(&rdev->exclusive_lock); 402 r = radeon_gpu_reset(rdev); 403 down_read(&rdev->exclusive_lock); 404 } 405 406 if (r) { 407 DRM_ERROR("failed to wait on page flip fence (%d)!\n", 408 r); 409 goto cleanup; 410 } else 411 radeon_fence_unref(&work->fence); 412 } 413 414 /* pin the new buffer */ 415 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 416 work->old_rbo, work->new_rbo); 417 418 r = radeon_bo_reserve(work->new_rbo, false); 419 if (unlikely(r != 0)) { 420 DRM_ERROR("failed to reserve new rbo buffer before flip\n"); 421 goto cleanup; 422 } 423 /* Only 27 bit offset for legacy CRTC */ 424 r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, 425 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); 426 if (unlikely(r != 0)) { 427 radeon_bo_unreserve(work->new_rbo); 428 r = -EINVAL; 429 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 430 goto cleanup; 431 } 432 radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); 433 radeon_bo_unreserve(work->new_rbo); 434 435 if (!ASIC_IS_AVIVO(rdev)) { 436 /* crtc offset is from display base addr not FB location */ 437 base -= radeon_crtc->legacy_display_base_addr; 438 pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8); 439 440 if (tiling_flags & RADEON_TILING_MACRO) { 441 if (ASIC_IS_R300(rdev)) { 442 base &= ~0x7ff; 443 } else { 444 int byteshift = fb->bits_per_pixel >> 4; 445 int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; 446 base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); 447 } 448 } else { 449 int offset = crtc->y * pitch_pixels + crtc->x; 450 switch (fb->bits_per_pixel) { 451 case 8: 452 default: 453 offset *= 1; 454 break; 455 case 15: 456 case 16: 457 offset *= 2; 458 break; 459 case 24: 460 offset *= 3; 461 break; 462 case 32: 463 offset *= 4; 464 break; 465 } 466 base += offset; 467 } 468 base &= ~7; 469 } 470 471 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); 472 if (r) { 473 DRM_ERROR("failed to get vblank before flip\n"); 474 goto pflip_cleanup; 475 } 476 477 /* We borrow the event spin lock for protecting flip_work */ 478 spin_lock_irqsave(&crtc->dev->event_lock, flags); 479 480 /* set the proper interrupt */ 481 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 482 483 /* do the flip (mmio) */ 484 radeon_page_flip(rdev, radeon_crtc->crtc_id, base); 485 486 radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; 487 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 488 up_read(&rdev->exclusive_lock); 489 490 return; 491 492 pflip_cleanup: 493 if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) { 494 DRM_ERROR("failed to reserve new rbo in error path\n"); 495 goto cleanup; 496 } 497 if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) { 498 DRM_ERROR("failed to unpin new rbo in error path\n"); 499 } 500 radeon_bo_unreserve(work->new_rbo); 501 502 cleanup: 503 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 504 radeon_fence_unref(&work->fence); 505 kfree(work); 506 up_read(&rdev->exclusive_lock); 507 } 508 509 static int radeon_crtc_page_flip(struct drm_crtc *crtc, 510 struct drm_framebuffer *fb, 511 struct drm_pending_vblank_event *event, 512 uint32_t page_flip_flags) 513 { 514 struct drm_device *dev = crtc->dev; 515 struct radeon_device *rdev = dev->dev_private; 516 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 517 struct radeon_framebuffer *old_radeon_fb; 518 struct radeon_framebuffer *new_radeon_fb; 519 struct drm_gem_object *obj; 520 struct radeon_flip_work *work; 521 unsigned long flags; 522 523 work = kzalloc(sizeof *work, GFP_KERNEL); 524 if (work == NULL) 525 return -ENOMEM; 526 527 INIT_WORK(&work->flip_work, radeon_flip_work_func); 528 INIT_WORK(&work->unpin_work, radeon_unpin_work_func); 529 530 work->rdev = rdev; 531 work->crtc_id = radeon_crtc->crtc_id; 532 work->fb = fb; 533 work->event = event; 534 535 /* schedule unpin of the old buffer */ 536 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 537 obj = old_radeon_fb->obj; 538 539 /* take a reference to the old object */ 540 drm_gem_object_reference(obj); 541 work->old_rbo = gem_to_radeon_bo(obj); 542 543 new_radeon_fb = to_radeon_framebuffer(fb); 544 obj = new_radeon_fb->obj; 545 work->new_rbo = gem_to_radeon_bo(obj); 546 547 spin_lock(&work->new_rbo->tbo.bdev->fence_lock); 548 if (work->new_rbo->tbo.sync_obj) 549 work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj); 550 spin_unlock(&work->new_rbo->tbo.bdev->fence_lock); 551 552 /* We borrow the event spin lock for protecting flip_work */ 553 spin_lock_irqsave(&crtc->dev->event_lock, flags); 554 555 if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { 556 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 557 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 558 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 559 radeon_fence_unref(&work->fence); 560 kfree(work); 561 return -EBUSY; 562 } 563 radeon_crtc->flip_status = RADEON_FLIP_PENDING; 564 radeon_crtc->flip_work = work; 565 566 /* update crtc fb */ 567 crtc->primary->fb = fb; 568 569 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 570 571 queue_work(radeon_crtc->flip_queue, &work->flip_work); 572 573 return 0; 574 } 575 576 static int 577 radeon_crtc_set_config(struct drm_mode_set *set) 578 { 579 struct drm_device *dev; 580 struct radeon_device *rdev; 581 struct drm_crtc *crtc; 582 bool active = false; 583 int ret; 584 585 if (!set || !set->crtc) 586 return -EINVAL; 587 588 dev = set->crtc->dev; 589 590 ret = pm_runtime_get_sync(dev->dev); 591 if (ret < 0) 592 return ret; 593 594 ret = drm_crtc_helper_set_config(set); 595 596 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 597 if (crtc->enabled) 598 active = true; 599 600 pm_runtime_mark_last_busy(dev->dev); 601 602 rdev = dev->dev_private; 603 /* if we have active crtcs and we don't have a power ref, 604 take the current one */ 605 if (active && !rdev->have_disp_power_ref) { 606 rdev->have_disp_power_ref = true; 607 return ret; 608 } 609 /* if we have no active crtcs, then drop the power ref 610 we got before */ 611 if (!active && rdev->have_disp_power_ref) { 612 pm_runtime_put_autosuspend(dev->dev); 613 rdev->have_disp_power_ref = false; 614 } 615 616 /* drop the power reference we got coming in here */ 617 pm_runtime_put_autosuspend(dev->dev); 618 return ret; 619 } 620 static const struct drm_crtc_funcs radeon_crtc_funcs = { 621 .cursor_set = radeon_crtc_cursor_set, 622 .cursor_move = radeon_crtc_cursor_move, 623 .gamma_set = radeon_crtc_gamma_set, 624 .set_config = radeon_crtc_set_config, 625 .destroy = radeon_crtc_destroy, 626 .page_flip = radeon_crtc_page_flip, 627 }; 628 629 static void radeon_crtc_init(struct drm_device *dev, int index) 630 { 631 struct radeon_device *rdev = dev->dev_private; 632 struct radeon_crtc *radeon_crtc; 633 int i; 634 635 radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 636 if (radeon_crtc == NULL) 637 return; 638 639 drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); 640 641 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); 642 radeon_crtc->crtc_id = index; 643 radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc"); 644 rdev->mode_info.crtcs[index] = radeon_crtc; 645 646 if (rdev->family >= CHIP_BONAIRE) { 647 radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH; 648 radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; 649 } else { 650 radeon_crtc->max_cursor_width = CURSOR_WIDTH; 651 radeon_crtc->max_cursor_height = CURSOR_HEIGHT; 652 } 653 dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; 654 dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; 655 656 #if 0 657 radeon_crtc->mode_set.crtc = &radeon_crtc->base; 658 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); 659 radeon_crtc->mode_set.num_connectors = 0; 660 #endif 661 662 for (i = 0; i < 256; i++) { 663 radeon_crtc->lut_r[i] = i << 2; 664 radeon_crtc->lut_g[i] = i << 2; 665 radeon_crtc->lut_b[i] = i << 2; 666 } 667 668 if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) 669 radeon_atombios_init_crtc(dev, radeon_crtc); 670 else 671 radeon_legacy_init_crtc(dev, radeon_crtc); 672 } 673 674 static const char *encoder_names[38] = { 675 "NONE", 676 "INTERNAL_LVDS", 677 "INTERNAL_TMDS1", 678 "INTERNAL_TMDS2", 679 "INTERNAL_DAC1", 680 "INTERNAL_DAC2", 681 "INTERNAL_SDVOA", 682 "INTERNAL_SDVOB", 683 "SI170B", 684 "CH7303", 685 "CH7301", 686 "INTERNAL_DVO1", 687 "EXTERNAL_SDVOA", 688 "EXTERNAL_SDVOB", 689 "TITFP513", 690 "INTERNAL_LVTM1", 691 "VT1623", 692 "HDMI_SI1930", 693 "HDMI_INTERNAL", 694 "INTERNAL_KLDSCP_TMDS1", 695 "INTERNAL_KLDSCP_DVO1", 696 "INTERNAL_KLDSCP_DAC1", 697 "INTERNAL_KLDSCP_DAC2", 698 "SI178", 699 "MVPU_FPGA", 700 "INTERNAL_DDI", 701 "VT1625", 702 "HDMI_SI1932", 703 "DP_AN9801", 704 "DP_DP501", 705 "INTERNAL_UNIPHY", 706 "INTERNAL_KLDSCP_LVTMA", 707 "INTERNAL_UNIPHY1", 708 "INTERNAL_UNIPHY2", 709 "NUTMEG", 710 "TRAVIS", 711 "INTERNAL_VCE", 712 "INTERNAL_UNIPHY3", 713 }; 714 715 static const char *hpd_names[6] = { 716 "HPD1", 717 "HPD2", 718 "HPD3", 719 "HPD4", 720 "HPD5", 721 "HPD6", 722 }; 723 724 static void radeon_print_display_setup(struct drm_device *dev) 725 { 726 struct drm_connector *connector; 727 struct radeon_connector *radeon_connector; 728 struct drm_encoder *encoder; 729 struct radeon_encoder *radeon_encoder; 730 uint32_t devices; 731 int i = 0; 732 733 DRM_INFO("Radeon Display Connectors\n"); 734 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 735 radeon_connector = to_radeon_connector(connector); 736 DRM_INFO("Connector %d:\n", i); 737 DRM_INFO(" %s\n", connector->name); 738 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 739 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 740 if (radeon_connector->ddc_bus) { 741 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 742 radeon_connector->ddc_bus->rec.mask_clk_reg, 743 radeon_connector->ddc_bus->rec.mask_data_reg, 744 radeon_connector->ddc_bus->rec.a_clk_reg, 745 radeon_connector->ddc_bus->rec.a_data_reg, 746 radeon_connector->ddc_bus->rec.en_clk_reg, 747 radeon_connector->ddc_bus->rec.en_data_reg, 748 radeon_connector->ddc_bus->rec.y_clk_reg, 749 radeon_connector->ddc_bus->rec.y_data_reg); 750 if (radeon_connector->router.ddc_valid) 751 DRM_INFO(" DDC Router 0x%x/0x%x\n", 752 radeon_connector->router.ddc_mux_control_pin, 753 radeon_connector->router.ddc_mux_state); 754 if (radeon_connector->router.cd_valid) 755 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", 756 radeon_connector->router.cd_mux_control_pin, 757 radeon_connector->router.cd_mux_state); 758 } else { 759 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 760 connector->connector_type == DRM_MODE_CONNECTOR_DVII || 761 connector->connector_type == DRM_MODE_CONNECTOR_DVID || 762 connector->connector_type == DRM_MODE_CONNECTOR_DVIA || 763 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 764 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) 765 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); 766 } 767 DRM_INFO(" Encoders:\n"); 768 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 769 radeon_encoder = to_radeon_encoder(encoder); 770 devices = radeon_encoder->devices & radeon_connector->devices; 771 if (devices) { 772 if (devices & ATOM_DEVICE_CRT1_SUPPORT) 773 DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]); 774 if (devices & ATOM_DEVICE_CRT2_SUPPORT) 775 DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]); 776 if (devices & ATOM_DEVICE_LCD1_SUPPORT) 777 DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]); 778 if (devices & ATOM_DEVICE_DFP1_SUPPORT) 779 DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]); 780 if (devices & ATOM_DEVICE_DFP2_SUPPORT) 781 DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]); 782 if (devices & ATOM_DEVICE_DFP3_SUPPORT) 783 DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]); 784 if (devices & ATOM_DEVICE_DFP4_SUPPORT) 785 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); 786 if (devices & ATOM_DEVICE_DFP5_SUPPORT) 787 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); 788 if (devices & ATOM_DEVICE_DFP6_SUPPORT) 789 DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); 790 if (devices & ATOM_DEVICE_TV1_SUPPORT) 791 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); 792 if (devices & ATOM_DEVICE_CV_SUPPORT) 793 DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]); 794 } 795 } 796 i++; 797 } 798 } 799 800 static bool radeon_setup_enc_conn(struct drm_device *dev) 801 { 802 struct radeon_device *rdev = dev->dev_private; 803 bool ret = false; 804 805 if (rdev->bios) { 806 if (rdev->is_atom_bios) { 807 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); 808 if (ret == false) 809 ret = radeon_get_atom_connector_info_from_object_table(dev); 810 } else { 811 ret = radeon_get_legacy_connector_info_from_bios(dev); 812 if (ret == false) 813 ret = radeon_get_legacy_connector_info_from_table(dev); 814 } 815 } else { 816 if (!ASIC_IS_AVIVO(rdev)) 817 ret = radeon_get_legacy_connector_info_from_table(dev); 818 } 819 if (ret) { 820 radeon_setup_encoder_clones(dev); 821 radeon_print_display_setup(dev); 822 } 823 824 return ret; 825 } 826 827 int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) 828 { 829 struct drm_device *dev = radeon_connector->base.dev; 830 struct radeon_device *rdev = dev->dev_private; 831 int ret = 0; 832 833 /* on hw with routers, select right port */ 834 if (radeon_connector->router.ddc_valid) 835 radeon_router_select_ddc_port(radeon_connector); 836 837 if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != 838 ENCODER_OBJECT_ID_NONE) { 839 if (radeon_connector->ddc_bus->has_aux) 840 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 841 &radeon_connector->ddc_bus->aux.ddc); 842 } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 843 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 844 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 845 846 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 847 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && 848 radeon_connector->ddc_bus->has_aux) 849 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 850 &radeon_connector->ddc_bus->aux.ddc); 851 else if (radeon_connector->ddc_bus && !radeon_connector->edid) 852 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 853 &radeon_connector->ddc_bus->adapter); 854 } else { 855 if (radeon_connector->ddc_bus && !radeon_connector->edid) 856 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 857 &radeon_connector->ddc_bus->adapter); 858 } 859 860 if (!radeon_connector->edid) { 861 if (rdev->is_atom_bios) { 862 /* some laptops provide a hardcoded edid in rom for LCDs */ 863 if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) || 864 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP))) 865 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); 866 } else 867 /* some servers provide a hardcoded edid in rom for KVMs */ 868 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); 869 } 870 if (radeon_connector->edid) { 871 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 872 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 873 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); 874 return ret; 875 } 876 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); 877 return 0; 878 } 879 880 /* avivo */ 881 882 /** 883 * avivo_reduce_ratio - fractional number reduction 884 * 885 * @nom: nominator 886 * @den: denominator 887 * @nom_min: minimum value for nominator 888 * @den_min: minimum value for denominator 889 * 890 * Find the greatest common divisor and apply it on both nominator and 891 * denominator, but make nominator and denominator are at least as large 892 * as their minimum values. 893 */ 894 static void avivo_reduce_ratio(unsigned *nom, unsigned *den, 895 unsigned nom_min, unsigned den_min) 896 { 897 unsigned tmp; 898 899 /* reduce the numbers to a simpler ratio */ 900 tmp = gcd(*nom, *den); 901 *nom /= tmp; 902 *den /= tmp; 903 904 /* make sure nominator is large enough */ 905 if (*nom < nom_min) { 906 tmp = DIV_ROUND_UP(nom_min, *nom); 907 *nom *= tmp; 908 *den *= tmp; 909 } 910 911 /* make sure the denominator is large enough */ 912 if (*den < den_min) { 913 tmp = DIV_ROUND_UP(den_min, *den); 914 *nom *= tmp; 915 *den *= tmp; 916 } 917 } 918 919 /** 920 * avivo_get_fb_ref_div - feedback and ref divider calculation 921 * 922 * @nom: nominator 923 * @den: denominator 924 * @post_div: post divider 925 * @fb_div_max: feedback divider maximum 926 * @ref_div_max: reference divider maximum 927 * @fb_div: resulting feedback divider 928 * @ref_div: resulting reference divider 929 * 930 * Calculate feedback and reference divider for a given post divider. Makes 931 * sure we stay within the limits. 932 */ 933 static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, 934 unsigned fb_div_max, unsigned ref_div_max, 935 unsigned *fb_div, unsigned *ref_div) 936 { 937 /* limit reference * post divider to a maximum */ 938 ref_div_max = max(min(100 / post_div, ref_div_max), 1u); 939 940 /* get matching reference and feedback divider */ 941 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); 942 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); 943 944 /* limit fb divider to its maximum */ 945 if (*fb_div > fb_div_max) { 946 *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); 947 *fb_div = fb_div_max; 948 } 949 } 950 951 /** 952 * radeon_compute_pll_avivo - compute PLL paramaters 953 * 954 * @pll: information about the PLL 955 * @dot_clock_p: resulting pixel clock 956 * fb_div_p: resulting feedback divider 957 * frac_fb_div_p: fractional part of the feedback divider 958 * ref_div_p: resulting reference divider 959 * post_div_p: resulting reference divider 960 * 961 * Try to calculate the PLL parameters to generate the given frequency: 962 * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div) 963 */ 964 void radeon_compute_pll_avivo(struct radeon_pll *pll, 965 u32 freq, 966 u32 *dot_clock_p, 967 u32 *fb_div_p, 968 u32 *frac_fb_div_p, 969 u32 *ref_div_p, 970 u32 *post_div_p) 971 { 972 unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? 973 freq : freq / 10; 974 975 unsigned fb_div_min, fb_div_max, fb_div; 976 unsigned post_div_min, post_div_max, post_div; 977 unsigned ref_div_min, ref_div_max, ref_div; 978 unsigned post_div_best, diff_best; 979 unsigned nom, den; 980 981 /* determine allowed feedback divider range */ 982 fb_div_min = pll->min_feedback_div; 983 fb_div_max = pll->max_feedback_div; 984 985 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 986 fb_div_min *= 10; 987 fb_div_max *= 10; 988 } 989 990 /* determine allowed ref divider range */ 991 if (pll->flags & RADEON_PLL_USE_REF_DIV) 992 ref_div_min = pll->reference_div; 993 else 994 ref_div_min = pll->min_ref_div; 995 996 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && 997 pll->flags & RADEON_PLL_USE_REF_DIV) 998 ref_div_max = pll->reference_div; 999 else 1000 ref_div_max = pll->max_ref_div; 1001 1002 /* determine allowed post divider range */ 1003 if (pll->flags & RADEON_PLL_USE_POST_DIV) { 1004 post_div_min = pll->post_div; 1005 post_div_max = pll->post_div; 1006 } else { 1007 unsigned vco_min, vco_max; 1008 1009 if (pll->flags & RADEON_PLL_IS_LCD) { 1010 vco_min = pll->lcd_pll_out_min; 1011 vco_max = pll->lcd_pll_out_max; 1012 } else { 1013 vco_min = pll->pll_out_min; 1014 vco_max = pll->pll_out_max; 1015 } 1016 1017 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 1018 vco_min *= 10; 1019 vco_max *= 10; 1020 } 1021 1022 post_div_min = vco_min / target_clock; 1023 if ((target_clock * post_div_min) < vco_min) 1024 ++post_div_min; 1025 if (post_div_min < pll->min_post_div) 1026 post_div_min = pll->min_post_div; 1027 1028 post_div_max = vco_max / target_clock; 1029 if ((target_clock * post_div_max) > vco_max) 1030 --post_div_max; 1031 if (post_div_max > pll->max_post_div) 1032 post_div_max = pll->max_post_div; 1033 } 1034 1035 /* represent the searched ratio as fractional number */ 1036 nom = target_clock; 1037 den = pll->reference_freq; 1038 1039 /* reduce the numbers to a simpler ratio */ 1040 avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min); 1041 1042 /* now search for a post divider */ 1043 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) 1044 post_div_best = post_div_min; 1045 else 1046 post_div_best = post_div_max; 1047 diff_best = ~0; 1048 1049 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) { 1050 unsigned diff; 1051 avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, 1052 ref_div_max, &fb_div, &ref_div); 1053 diff = abs(target_clock - (pll->reference_freq * fb_div) / 1054 (ref_div * post_div)); 1055 1056 if (diff < diff_best || (diff == diff_best && 1057 !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) { 1058 1059 post_div_best = post_div; 1060 diff_best = diff; 1061 } 1062 } 1063 post_div = post_div_best; 1064 1065 /* get the feedback and reference divider for the optimal value */ 1066 avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max, 1067 &fb_div, &ref_div); 1068 1069 /* reduce the numbers to a simpler ratio once more */ 1070 /* this also makes sure that the reference divider is large enough */ 1071 avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); 1072 1073 /* avoid high jitter with small fractional dividers */ 1074 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { 1075 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); 1076 if (fb_div < fb_div_min) { 1077 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); 1078 fb_div *= tmp; 1079 ref_div *= tmp; 1080 } 1081 } 1082 1083 /* and finally save the result */ 1084 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 1085 *fb_div_p = fb_div / 10; 1086 *frac_fb_div_p = fb_div % 10; 1087 } else { 1088 *fb_div_p = fb_div; 1089 *frac_fb_div_p = 0; 1090 } 1091 1092 *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) + 1093 (pll->reference_freq * *frac_fb_div_p)) / 1094 (ref_div * post_div * 10); 1095 *ref_div_p = ref_div; 1096 *post_div_p = post_div; 1097 1098 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1099 freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, 1100 ref_div, post_div); 1101 } 1102 1103 /* pre-avivo */ 1104 static inline uint32_t radeon_div(uint64_t n, uint32_t d) 1105 { 1106 uint64_t mod; 1107 1108 n += d / 2; 1109 1110 mod = do_div(n, d); 1111 return n; 1112 } 1113 1114 void radeon_compute_pll_legacy(struct radeon_pll *pll, 1115 uint64_t freq, 1116 uint32_t *dot_clock_p, 1117 uint32_t *fb_div_p, 1118 uint32_t *frac_fb_div_p, 1119 uint32_t *ref_div_p, 1120 uint32_t *post_div_p) 1121 { 1122 uint32_t min_ref_div = pll->min_ref_div; 1123 uint32_t max_ref_div = pll->max_ref_div; 1124 uint32_t min_post_div = pll->min_post_div; 1125 uint32_t max_post_div = pll->max_post_div; 1126 uint32_t min_fractional_feed_div = 0; 1127 uint32_t max_fractional_feed_div = 0; 1128 uint32_t best_vco = pll->best_vco; 1129 uint32_t best_post_div = 1; 1130 uint32_t best_ref_div = 1; 1131 uint32_t best_feedback_div = 1; 1132 uint32_t best_frac_feedback_div = 0; 1133 uint32_t best_freq = -1; 1134 uint32_t best_error = 0xffffffff; 1135 uint32_t best_vco_diff = 1; 1136 uint32_t post_div; 1137 u32 pll_out_min, pll_out_max; 1138 1139 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 1140 freq = freq * 1000; 1141 1142 if (pll->flags & RADEON_PLL_IS_LCD) { 1143 pll_out_min = pll->lcd_pll_out_min; 1144 pll_out_max = pll->lcd_pll_out_max; 1145 } else { 1146 pll_out_min = pll->pll_out_min; 1147 pll_out_max = pll->pll_out_max; 1148 } 1149 1150 if (pll_out_min > 64800) 1151 pll_out_min = 64800; 1152 1153 if (pll->flags & RADEON_PLL_USE_REF_DIV) 1154 min_ref_div = max_ref_div = pll->reference_div; 1155 else { 1156 while (min_ref_div < max_ref_div-1) { 1157 uint32_t mid = (min_ref_div + max_ref_div) / 2; 1158 uint32_t pll_in = pll->reference_freq / mid; 1159 if (pll_in < pll->pll_in_min) 1160 max_ref_div = mid; 1161 else if (pll_in > pll->pll_in_max) 1162 min_ref_div = mid; 1163 else 1164 break; 1165 } 1166 } 1167 1168 if (pll->flags & RADEON_PLL_USE_POST_DIV) 1169 min_post_div = max_post_div = pll->post_div; 1170 1171 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 1172 min_fractional_feed_div = pll->min_frac_feedback_div; 1173 max_fractional_feed_div = pll->max_frac_feedback_div; 1174 } 1175 1176 for (post_div = max_post_div; post_div >= min_post_div; --post_div) { 1177 uint32_t ref_div; 1178 1179 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 1180 continue; 1181 1182 /* legacy radeons only have a few post_divs */ 1183 if (pll->flags & RADEON_PLL_LEGACY) { 1184 if ((post_div == 5) || 1185 (post_div == 7) || 1186 (post_div == 9) || 1187 (post_div == 10) || 1188 (post_div == 11) || 1189 (post_div == 13) || 1190 (post_div == 14) || 1191 (post_div == 15)) 1192 continue; 1193 } 1194 1195 for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) { 1196 uint32_t feedback_div, current_freq = 0, error, vco_diff; 1197 uint32_t pll_in = pll->reference_freq / ref_div; 1198 uint32_t min_feed_div = pll->min_feedback_div; 1199 uint32_t max_feed_div = pll->max_feedback_div + 1; 1200 1201 if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max) 1202 continue; 1203 1204 while (min_feed_div < max_feed_div) { 1205 uint32_t vco; 1206 uint32_t min_frac_feed_div = min_fractional_feed_div; 1207 uint32_t max_frac_feed_div = max_fractional_feed_div + 1; 1208 uint32_t frac_feedback_div; 1209 uint64_t tmp; 1210 1211 feedback_div = (min_feed_div + max_feed_div) / 2; 1212 1213 tmp = (uint64_t)pll->reference_freq * feedback_div; 1214 vco = radeon_div(tmp, ref_div); 1215 1216 if (vco < pll_out_min) { 1217 min_feed_div = feedback_div + 1; 1218 continue; 1219 } else if (vco > pll_out_max) { 1220 max_feed_div = feedback_div; 1221 continue; 1222 } 1223 1224 while (min_frac_feed_div < max_frac_feed_div) { 1225 frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2; 1226 tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div; 1227 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 1228 current_freq = radeon_div(tmp, ref_div * post_div); 1229 1230 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 1231 if (freq < current_freq) 1232 error = 0xffffffff; 1233 else 1234 error = freq - current_freq; 1235 } else 1236 error = abs(current_freq - freq); 1237 vco_diff = abs(vco - best_vco); 1238 1239 if ((best_vco == 0 && error < best_error) || 1240 (best_vco != 0 && 1241 ((best_error > 100 && error < best_error - 100) || 1242 (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { 1243 best_post_div = post_div; 1244 best_ref_div = ref_div; 1245 best_feedback_div = feedback_div; 1246 best_frac_feedback_div = frac_feedback_div; 1247 best_freq = current_freq; 1248 best_error = error; 1249 best_vco_diff = vco_diff; 1250 } else if (current_freq == freq) { 1251 if (best_freq == -1) { 1252 best_post_div = post_div; 1253 best_ref_div = ref_div; 1254 best_feedback_div = feedback_div; 1255 best_frac_feedback_div = frac_feedback_div; 1256 best_freq = current_freq; 1257 best_error = error; 1258 best_vco_diff = vco_diff; 1259 } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 1260 ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 1261 ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 1262 ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 1263 ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 1264 ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 1265 best_post_div = post_div; 1266 best_ref_div = ref_div; 1267 best_feedback_div = feedback_div; 1268 best_frac_feedback_div = frac_feedback_div; 1269 best_freq = current_freq; 1270 best_error = error; 1271 best_vco_diff = vco_diff; 1272 } 1273 } 1274 if (current_freq < freq) 1275 min_frac_feed_div = frac_feedback_div + 1; 1276 else 1277 max_frac_feed_div = frac_feedback_div; 1278 } 1279 if (current_freq < freq) 1280 min_feed_div = feedback_div + 1; 1281 else 1282 max_feed_div = feedback_div; 1283 } 1284 } 1285 } 1286 1287 *dot_clock_p = best_freq / 10000; 1288 *fb_div_p = best_feedback_div; 1289 *frac_fb_div_p = best_frac_feedback_div; 1290 *ref_div_p = best_ref_div; 1291 *post_div_p = best_post_div; 1292 DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1293 (long long)freq, 1294 best_freq / 1000, best_feedback_div, best_frac_feedback_div, 1295 best_ref_div, best_post_div); 1296 1297 } 1298 1299 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 1300 { 1301 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 1302 1303 if (radeon_fb->obj) { 1304 drm_gem_object_unreference_unlocked(radeon_fb->obj); 1305 } 1306 drm_framebuffer_cleanup(fb); 1307 kfree(radeon_fb); 1308 } 1309 1310 static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb, 1311 struct drm_file *file_priv, 1312 unsigned int *handle) 1313 { 1314 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 1315 1316 return drm_gem_handle_create(file_priv, radeon_fb->obj, handle); 1317 } 1318 1319 static const struct drm_framebuffer_funcs radeon_fb_funcs = { 1320 .destroy = radeon_user_framebuffer_destroy, 1321 .create_handle = radeon_user_framebuffer_create_handle, 1322 }; 1323 1324 int 1325 radeon_framebuffer_init(struct drm_device *dev, 1326 struct radeon_framebuffer *rfb, 1327 struct drm_mode_fb_cmd2 *mode_cmd, 1328 struct drm_gem_object *obj) 1329 { 1330 int ret; 1331 rfb->obj = obj; 1332 drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); 1333 ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); 1334 if (ret) { 1335 rfb->obj = NULL; 1336 return ret; 1337 } 1338 return 0; 1339 } 1340 1341 static struct drm_framebuffer * 1342 radeon_user_framebuffer_create(struct drm_device *dev, 1343 struct drm_file *file_priv, 1344 struct drm_mode_fb_cmd2 *mode_cmd) 1345 { 1346 struct drm_gem_object *obj; 1347 struct radeon_framebuffer *radeon_fb; 1348 int ret; 1349 1350 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); 1351 if (obj == NULL) { 1352 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " 1353 "can't create framebuffer\n", mode_cmd->handles[0]); 1354 return ERR_PTR(-ENOENT); 1355 } 1356 1357 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1358 if (radeon_fb == NULL) { 1359 drm_gem_object_unreference_unlocked(obj); 1360 return ERR_PTR(-ENOMEM); 1361 } 1362 1363 ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); 1364 if (ret) { 1365 kfree(radeon_fb); 1366 drm_gem_object_unreference_unlocked(obj); 1367 return ERR_PTR(ret); 1368 } 1369 1370 return &radeon_fb->base; 1371 } 1372 1373 static void radeon_output_poll_changed(struct drm_device *dev) 1374 { 1375 struct radeon_device *rdev = dev->dev_private; 1376 radeon_fb_output_poll_changed(rdev); 1377 } 1378 1379 static const struct drm_mode_config_funcs radeon_mode_funcs = { 1380 .fb_create = radeon_user_framebuffer_create, 1381 .output_poll_changed = radeon_output_poll_changed 1382 }; 1383 1384 static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = 1385 { { 0, "driver" }, 1386 { 1, "bios" }, 1387 }; 1388 1389 static struct drm_prop_enum_list radeon_tv_std_enum_list[] = 1390 { { TV_STD_NTSC, "ntsc" }, 1391 { TV_STD_PAL, "pal" }, 1392 { TV_STD_PAL_M, "pal-m" }, 1393 { TV_STD_PAL_60, "pal-60" }, 1394 { TV_STD_NTSC_J, "ntsc-j" }, 1395 { TV_STD_SCART_PAL, "scart-pal" }, 1396 { TV_STD_PAL_CN, "pal-cn" }, 1397 { TV_STD_SECAM, "secam" }, 1398 }; 1399 1400 static struct drm_prop_enum_list radeon_underscan_enum_list[] = 1401 { { UNDERSCAN_OFF, "off" }, 1402 { UNDERSCAN_ON, "on" }, 1403 { UNDERSCAN_AUTO, "auto" }, 1404 }; 1405 1406 static struct drm_prop_enum_list radeon_audio_enum_list[] = 1407 { { RADEON_AUDIO_DISABLE, "off" }, 1408 { RADEON_AUDIO_ENABLE, "on" }, 1409 { RADEON_AUDIO_AUTO, "auto" }, 1410 }; 1411 1412 /* XXX support different dither options? spatial, temporal, both, etc. */ 1413 static struct drm_prop_enum_list radeon_dither_enum_list[] = 1414 { { RADEON_FMT_DITHER_DISABLE, "off" }, 1415 { RADEON_FMT_DITHER_ENABLE, "on" }, 1416 }; 1417 1418 static int radeon_modeset_create_props(struct radeon_device *rdev) 1419 { 1420 int sz; 1421 1422 if (rdev->is_atom_bios) { 1423 rdev->mode_info.coherent_mode_property = 1424 drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1); 1425 if (!rdev->mode_info.coherent_mode_property) 1426 return -ENOMEM; 1427 } 1428 1429 if (!ASIC_IS_AVIVO(rdev)) { 1430 sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); 1431 rdev->mode_info.tmds_pll_property = 1432 drm_property_create_enum(rdev->ddev, 0, 1433 "tmds_pll", 1434 radeon_tmds_pll_enum_list, sz); 1435 } 1436 1437 rdev->mode_info.load_detect_property = 1438 drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1); 1439 if (!rdev->mode_info.load_detect_property) 1440 return -ENOMEM; 1441 1442 drm_mode_create_scaling_mode_property(rdev->ddev); 1443 1444 sz = ARRAY_SIZE(radeon_tv_std_enum_list); 1445 rdev->mode_info.tv_std_property = 1446 drm_property_create_enum(rdev->ddev, 0, 1447 "tv standard", 1448 radeon_tv_std_enum_list, sz); 1449 1450 sz = ARRAY_SIZE(radeon_underscan_enum_list); 1451 rdev->mode_info.underscan_property = 1452 drm_property_create_enum(rdev->ddev, 0, 1453 "underscan", 1454 radeon_underscan_enum_list, sz); 1455 1456 rdev->mode_info.underscan_hborder_property = 1457 drm_property_create_range(rdev->ddev, 0, 1458 "underscan hborder", 0, 128); 1459 if (!rdev->mode_info.underscan_hborder_property) 1460 return -ENOMEM; 1461 1462 rdev->mode_info.underscan_vborder_property = 1463 drm_property_create_range(rdev->ddev, 0, 1464 "underscan vborder", 0, 128); 1465 if (!rdev->mode_info.underscan_vborder_property) 1466 return -ENOMEM; 1467 1468 sz = ARRAY_SIZE(radeon_audio_enum_list); 1469 rdev->mode_info.audio_property = 1470 drm_property_create_enum(rdev->ddev, 0, 1471 "audio", 1472 radeon_audio_enum_list, sz); 1473 1474 sz = ARRAY_SIZE(radeon_dither_enum_list); 1475 rdev->mode_info.dither_property = 1476 drm_property_create_enum(rdev->ddev, 0, 1477 "dither", 1478 radeon_dither_enum_list, sz); 1479 1480 return 0; 1481 } 1482 1483 void radeon_update_display_priority(struct radeon_device *rdev) 1484 { 1485 /* adjustment options for the display watermarks */ 1486 if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) { 1487 /* set display priority to high for r3xx, rv515 chips 1488 * this avoids flickering due to underflow to the 1489 * display controllers during heavy acceleration. 1490 * Don't force high on rs4xx igp chips as it seems to 1491 * affect the sound card. See kernel bug 15982. 1492 */ 1493 if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && 1494 !(rdev->flags & RADEON_IS_IGP)) 1495 rdev->disp_priority = 2; 1496 else 1497 rdev->disp_priority = 0; 1498 } else 1499 rdev->disp_priority = radeon_disp_priority; 1500 1501 } 1502 1503 /* 1504 * Allocate hdmi structs and determine register offsets 1505 */ 1506 static void radeon_afmt_init(struct radeon_device *rdev) 1507 { 1508 int i; 1509 1510 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) 1511 rdev->mode_info.afmt[i] = NULL; 1512 1513 if (ASIC_IS_NODCE(rdev)) { 1514 /* nothing to do */ 1515 } else if (ASIC_IS_DCE4(rdev)) { 1516 static uint32_t eg_offsets[] = { 1517 EVERGREEN_CRTC0_REGISTER_OFFSET, 1518 EVERGREEN_CRTC1_REGISTER_OFFSET, 1519 EVERGREEN_CRTC2_REGISTER_OFFSET, 1520 EVERGREEN_CRTC3_REGISTER_OFFSET, 1521 EVERGREEN_CRTC4_REGISTER_OFFSET, 1522 EVERGREEN_CRTC5_REGISTER_OFFSET, 1523 0x13830 - 0x7030, 1524 }; 1525 int num_afmt; 1526 1527 /* DCE8 has 7 audio blocks tied to DIG encoders */ 1528 /* DCE6 has 6 audio blocks tied to DIG encoders */ 1529 /* DCE4/5 has 6 audio blocks tied to DIG encoders */ 1530 /* DCE4.1 has 2 audio blocks tied to DIG encoders */ 1531 if (ASIC_IS_DCE8(rdev)) 1532 num_afmt = 7; 1533 else if (ASIC_IS_DCE6(rdev)) 1534 num_afmt = 6; 1535 else if (ASIC_IS_DCE5(rdev)) 1536 num_afmt = 6; 1537 else if (ASIC_IS_DCE41(rdev)) 1538 num_afmt = 2; 1539 else /* DCE4 */ 1540 num_afmt = 6; 1541 1542 BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets)); 1543 for (i = 0; i < num_afmt; i++) { 1544 rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1545 if (rdev->mode_info.afmt[i]) { 1546 rdev->mode_info.afmt[i]->offset = eg_offsets[i]; 1547 rdev->mode_info.afmt[i]->id = i; 1548 } 1549 } 1550 } else if (ASIC_IS_DCE3(rdev)) { 1551 /* DCE3.x has 2 audio blocks tied to DIG encoders */ 1552 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1553 if (rdev->mode_info.afmt[0]) { 1554 rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0; 1555 rdev->mode_info.afmt[0]->id = 0; 1556 } 1557 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1558 if (rdev->mode_info.afmt[1]) { 1559 rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1; 1560 rdev->mode_info.afmt[1]->id = 1; 1561 } 1562 } else if (ASIC_IS_DCE2(rdev)) { 1563 /* DCE2 has at least 1 routable audio block */ 1564 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1565 if (rdev->mode_info.afmt[0]) { 1566 rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0; 1567 rdev->mode_info.afmt[0]->id = 0; 1568 } 1569 /* r6xx has 2 routable audio blocks */ 1570 if (rdev->family >= CHIP_R600) { 1571 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1572 if (rdev->mode_info.afmt[1]) { 1573 rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1; 1574 rdev->mode_info.afmt[1]->id = 1; 1575 } 1576 } 1577 } 1578 } 1579 1580 static void radeon_afmt_fini(struct radeon_device *rdev) 1581 { 1582 int i; 1583 1584 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) { 1585 kfree(rdev->mode_info.afmt[i]); 1586 rdev->mode_info.afmt[i] = NULL; 1587 } 1588 } 1589 1590 int radeon_modeset_init(struct radeon_device *rdev) 1591 { 1592 int i; 1593 int ret; 1594 1595 drm_mode_config_init(rdev->ddev); 1596 rdev->mode_info.mode_config_initialized = true; 1597 1598 rdev->ddev->mode_config.funcs = &radeon_mode_funcs; 1599 1600 if (ASIC_IS_DCE5(rdev)) { 1601 rdev->ddev->mode_config.max_width = 16384; 1602 rdev->ddev->mode_config.max_height = 16384; 1603 } else if (ASIC_IS_AVIVO(rdev)) { 1604 rdev->ddev->mode_config.max_width = 8192; 1605 rdev->ddev->mode_config.max_height = 8192; 1606 } else { 1607 rdev->ddev->mode_config.max_width = 4096; 1608 rdev->ddev->mode_config.max_height = 4096; 1609 } 1610 1611 rdev->ddev->mode_config.preferred_depth = 24; 1612 rdev->ddev->mode_config.prefer_shadow = 1; 1613 1614 rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; 1615 1616 ret = radeon_modeset_create_props(rdev); 1617 if (ret) { 1618 return ret; 1619 } 1620 1621 /* init i2c buses */ 1622 radeon_i2c_init(rdev); 1623 1624 /* check combios for a valid hardcoded EDID - Sun servers */ 1625 if (!rdev->is_atom_bios) { 1626 /* check for hardcoded EDID in BIOS */ 1627 radeon_combios_check_hardcoded_edid(rdev); 1628 } 1629 1630 /* allocate crtcs */ 1631 for (i = 0; i < rdev->num_crtc; i++) { 1632 radeon_crtc_init(rdev->ddev, i); 1633 } 1634 1635 /* okay we should have all the bios connectors */ 1636 ret = radeon_setup_enc_conn(rdev->ddev); 1637 if (!ret) { 1638 return ret; 1639 } 1640 1641 /* init dig PHYs, disp eng pll */ 1642 if (rdev->is_atom_bios) { 1643 radeon_atom_encoder_init(rdev); 1644 radeon_atom_disp_eng_pll_init(rdev); 1645 } 1646 1647 /* initialize hpd */ 1648 radeon_hpd_init(rdev); 1649 1650 /* setup afmt */ 1651 radeon_afmt_init(rdev); 1652 1653 radeon_fbdev_init(rdev); 1654 drm_kms_helper_poll_init(rdev->ddev); 1655 1656 if (rdev->pm.dpm_enabled) { 1657 /* do dpm late init */ 1658 ret = radeon_pm_late_init(rdev); 1659 if (ret) { 1660 rdev->pm.dpm_enabled = false; 1661 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1662 } 1663 /* set the dpm state for PX since there won't be 1664 * a modeset to call this. 1665 */ 1666 radeon_pm_compute_clocks(rdev); 1667 } 1668 1669 return 0; 1670 } 1671 1672 void radeon_modeset_fini(struct radeon_device *rdev) 1673 { 1674 radeon_fbdev_fini(rdev); 1675 kfree(rdev->mode_info.bios_hardcoded_edid); 1676 1677 if (rdev->mode_info.mode_config_initialized) { 1678 radeon_afmt_fini(rdev); 1679 drm_kms_helper_poll_fini(rdev->ddev); 1680 radeon_hpd_fini(rdev); 1681 drm_mode_config_cleanup(rdev->ddev); 1682 rdev->mode_info.mode_config_initialized = false; 1683 } 1684 /* free i2c buses */ 1685 radeon_i2c_fini(rdev); 1686 } 1687 1688 static bool is_hdtv_mode(const struct drm_display_mode *mode) 1689 { 1690 /* try and guess if this is a tv or a monitor */ 1691 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ 1692 (mode->vdisplay == 576) || /* 576p */ 1693 (mode->vdisplay == 720) || /* 720p */ 1694 (mode->vdisplay == 1080)) /* 1080p */ 1695 return true; 1696 else 1697 return false; 1698 } 1699 1700 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 1701 const struct drm_display_mode *mode, 1702 struct drm_display_mode *adjusted_mode) 1703 { 1704 struct drm_device *dev = crtc->dev; 1705 struct radeon_device *rdev = dev->dev_private; 1706 struct drm_encoder *encoder; 1707 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1708 struct radeon_encoder *radeon_encoder; 1709 struct drm_connector *connector; 1710 struct radeon_connector *radeon_connector; 1711 bool first = true; 1712 u32 src_v = 1, dst_v = 1; 1713 u32 src_h = 1, dst_h = 1; 1714 1715 radeon_crtc->h_border = 0; 1716 radeon_crtc->v_border = 0; 1717 1718 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1719 if (encoder->crtc != crtc) 1720 continue; 1721 radeon_encoder = to_radeon_encoder(encoder); 1722 connector = radeon_get_connector_for_encoder(encoder); 1723 radeon_connector = to_radeon_connector(connector); 1724 1725 if (first) { 1726 /* set scaling */ 1727 if (radeon_encoder->rmx_type == RMX_OFF) 1728 radeon_crtc->rmx_type = RMX_OFF; 1729 else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay || 1730 mode->vdisplay < radeon_encoder->native_mode.vdisplay) 1731 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 1732 else 1733 radeon_crtc->rmx_type = RMX_OFF; 1734 /* copy native mode */ 1735 memcpy(&radeon_crtc->native_mode, 1736 &radeon_encoder->native_mode, 1737 sizeof(struct drm_display_mode)); 1738 src_v = crtc->mode.vdisplay; 1739 dst_v = radeon_crtc->native_mode.vdisplay; 1740 src_h = crtc->mode.hdisplay; 1741 dst_h = radeon_crtc->native_mode.hdisplay; 1742 1743 /* fix up for overscan on hdmi */ 1744 if (ASIC_IS_AVIVO(rdev) && 1745 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && 1746 ((radeon_encoder->underscan_type == UNDERSCAN_ON) || 1747 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && 1748 drm_detect_hdmi_monitor(radeon_connector->edid) && 1749 is_hdtv_mode(mode)))) { 1750 if (radeon_encoder->underscan_hborder != 0) 1751 radeon_crtc->h_border = radeon_encoder->underscan_hborder; 1752 else 1753 radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; 1754 if (radeon_encoder->underscan_vborder != 0) 1755 radeon_crtc->v_border = radeon_encoder->underscan_vborder; 1756 else 1757 radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; 1758 radeon_crtc->rmx_type = RMX_FULL; 1759 src_v = crtc->mode.vdisplay; 1760 dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); 1761 src_h = crtc->mode.hdisplay; 1762 dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2); 1763 } 1764 first = false; 1765 } else { 1766 if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { 1767 /* WARNING: Right now this can't happen but 1768 * in the future we need to check that scaling 1769 * are consistent across different encoder 1770 * (ie all encoder can work with the same 1771 * scaling). 1772 */ 1773 DRM_ERROR("Scaling not consistent across encoder.\n"); 1774 return false; 1775 } 1776 } 1777 } 1778 if (radeon_crtc->rmx_type != RMX_OFF) { 1779 fixed20_12 a, b; 1780 a.full = dfixed_const(src_v); 1781 b.full = dfixed_const(dst_v); 1782 radeon_crtc->vsc.full = dfixed_div(a, b); 1783 a.full = dfixed_const(src_h); 1784 b.full = dfixed_const(dst_h); 1785 radeon_crtc->hsc.full = dfixed_div(a, b); 1786 } else { 1787 radeon_crtc->vsc.full = dfixed_const(1); 1788 radeon_crtc->hsc.full = dfixed_const(1); 1789 } 1790 return true; 1791 } 1792 1793 /* 1794 * Retrieve current video scanout position of crtc on a given gpu, and 1795 * an optional accurate timestamp of when query happened. 1796 * 1797 * \param dev Device to query. 1798 * \param crtc Crtc to query. 1799 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 1800 * \param *vpos Location where vertical scanout position should be stored. 1801 * \param *hpos Location where horizontal scanout position should go. 1802 * \param *stime Target location for timestamp taken immediately before 1803 * scanout position query. Can be NULL to skip timestamp. 1804 * \param *etime Target location for timestamp taken immediately after 1805 * scanout position query. Can be NULL to skip timestamp. 1806 * 1807 * Returns vpos as a positive number while in active scanout area. 1808 * Returns vpos as a negative number inside vblank, counting the number 1809 * of scanlines to go until end of vblank, e.g., -1 means "one scanline 1810 * until start of active scanout / end of vblank." 1811 * 1812 * \return Flags, or'ed together as follows: 1813 * 1814 * DRM_SCANOUTPOS_VALID = Query successful. 1815 * DRM_SCANOUTPOS_INVBL = Inside vblank. 1816 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of 1817 * this flag means that returned position may be offset by a constant but 1818 * unknown small number of scanlines wrt. real scanout position. 1819 * 1820 */ 1821 int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, 1822 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) 1823 { 1824 u32 stat_crtc = 0, vbl = 0, position = 0; 1825 int vbl_start, vbl_end, vtotal, ret = 0; 1826 bool in_vbl = true; 1827 1828 struct radeon_device *rdev = dev->dev_private; 1829 1830 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1831 1832 /* Get optional system timestamp before query. */ 1833 if (stime) 1834 *stime = ktime_get(); 1835 1836 if (ASIC_IS_DCE4(rdev)) { 1837 if (crtc == 0) { 1838 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1839 EVERGREEN_CRTC0_REGISTER_OFFSET); 1840 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1841 EVERGREEN_CRTC0_REGISTER_OFFSET); 1842 ret |= DRM_SCANOUTPOS_VALID; 1843 } 1844 if (crtc == 1) { 1845 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1846 EVERGREEN_CRTC1_REGISTER_OFFSET); 1847 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1848 EVERGREEN_CRTC1_REGISTER_OFFSET); 1849 ret |= DRM_SCANOUTPOS_VALID; 1850 } 1851 if (crtc == 2) { 1852 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1853 EVERGREEN_CRTC2_REGISTER_OFFSET); 1854 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1855 EVERGREEN_CRTC2_REGISTER_OFFSET); 1856 ret |= DRM_SCANOUTPOS_VALID; 1857 } 1858 if (crtc == 3) { 1859 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1860 EVERGREEN_CRTC3_REGISTER_OFFSET); 1861 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1862 EVERGREEN_CRTC3_REGISTER_OFFSET); 1863 ret |= DRM_SCANOUTPOS_VALID; 1864 } 1865 if (crtc == 4) { 1866 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1867 EVERGREEN_CRTC4_REGISTER_OFFSET); 1868 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1869 EVERGREEN_CRTC4_REGISTER_OFFSET); 1870 ret |= DRM_SCANOUTPOS_VALID; 1871 } 1872 if (crtc == 5) { 1873 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1874 EVERGREEN_CRTC5_REGISTER_OFFSET); 1875 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1876 EVERGREEN_CRTC5_REGISTER_OFFSET); 1877 ret |= DRM_SCANOUTPOS_VALID; 1878 } 1879 } else if (ASIC_IS_AVIVO(rdev)) { 1880 if (crtc == 0) { 1881 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); 1882 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); 1883 ret |= DRM_SCANOUTPOS_VALID; 1884 } 1885 if (crtc == 1) { 1886 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); 1887 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); 1888 ret |= DRM_SCANOUTPOS_VALID; 1889 } 1890 } else { 1891 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ 1892 if (crtc == 0) { 1893 /* Assume vbl_end == 0, get vbl_start from 1894 * upper 16 bits. 1895 */ 1896 vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & 1897 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; 1898 /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ 1899 position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 1900 stat_crtc = RREG32(RADEON_CRTC_STATUS); 1901 if (!(stat_crtc & 1)) 1902 in_vbl = false; 1903 1904 ret |= DRM_SCANOUTPOS_VALID; 1905 } 1906 if (crtc == 1) { 1907 vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & 1908 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; 1909 position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 1910 stat_crtc = RREG32(RADEON_CRTC2_STATUS); 1911 if (!(stat_crtc & 1)) 1912 in_vbl = false; 1913 1914 ret |= DRM_SCANOUTPOS_VALID; 1915 } 1916 } 1917 1918 /* Get optional system timestamp after query. */ 1919 if (etime) 1920 *etime = ktime_get(); 1921 1922 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1923 1924 /* Decode into vertical and horizontal scanout position. */ 1925 *vpos = position & 0x1fff; 1926 *hpos = (position >> 16) & 0x1fff; 1927 1928 /* Valid vblank area boundaries from gpu retrieved? */ 1929 if (vbl > 0) { 1930 /* Yes: Decode. */ 1931 ret |= DRM_SCANOUTPOS_ACCURATE; 1932 vbl_start = vbl & 0x1fff; 1933 vbl_end = (vbl >> 16) & 0x1fff; 1934 } 1935 else { 1936 /* No: Fake something reasonable which gives at least ok results. */ 1937 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 1938 vbl_end = 0; 1939 } 1940 1941 /* Test scanout position against vblank region. */ 1942 if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 1943 in_vbl = false; 1944 1945 /* Check if inside vblank area and apply corrective offsets: 1946 * vpos will then be >=0 in video scanout area, but negative 1947 * within vblank area, counting down the number of lines until 1948 * start of scanout. 1949 */ 1950 1951 /* Inside "upper part" of vblank area? Apply corrective offset if so: */ 1952 if (in_vbl && (*vpos >= vbl_start)) { 1953 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 1954 *vpos = *vpos - vtotal; 1955 } 1956 1957 /* Correct for shifted end of vbl at vbl_end. */ 1958 *vpos = *vpos - vbl_end; 1959 1960 /* In vblank? */ 1961 if (in_vbl) 1962 ret |= DRM_SCANOUTPOS_INVBL; 1963 1964 /* Is vpos outside nominal vblank area, but less than 1965 * 1/100 of a frame height away from start of vblank? 1966 * If so, assume this isn't a massively delayed vblank 1967 * interrupt, but a vblank interrupt that fired a few 1968 * microseconds before true start of vblank. Compensate 1969 * by adding a full frame duration to the final timestamp. 1970 * Happens, e.g., on ATI R500, R600. 1971 * 1972 * We only do this if DRM_CALLED_FROM_VBLIRQ. 1973 */ 1974 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { 1975 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 1976 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 1977 1978 if (vbl_start - *vpos < vtotal / 100) { 1979 *vpos -= vtotal; 1980 1981 /* Signal this correction as "applied". */ 1982 ret |= 0x8; 1983 } 1984 } 1985 1986 return ret; 1987 } 1988