1 /* 2 * Copyright 2007-8 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Dave Airlie 24 * Alex Deucher 25 */ 26 #include <drm/drmP.h> 27 #include <drm/radeon_drm.h> 28 #include "radeon.h" 29 30 #include "atom.h" 31 #include <asm/div64.h> 32 33 #include <linux/pm_runtime.h> 34 #include <drm/drm_crtc_helper.h> 35 #include <drm/drm_edid.h> 36 37 #include <linux/gcd.h> 38 39 static void avivo_crtc_load_lut(struct drm_crtc *crtc) 40 { 41 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 42 struct drm_device *dev = crtc->dev; 43 struct radeon_device *rdev = dev->dev_private; 44 int i; 45 46 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 47 WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0); 48 49 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 50 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 51 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 52 53 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 54 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 55 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 56 57 WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id); 58 WREG32(AVIVO_DC_LUT_RW_MODE, 0); 59 WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f); 60 61 WREG8(AVIVO_DC_LUT_RW_INDEX, 0); 62 for (i = 0; i < 256; i++) { 63 WREG32(AVIVO_DC_LUT_30_COLOR, 64 (radeon_crtc->lut_r[i] << 20) | 65 (radeon_crtc->lut_g[i] << 10) | 66 (radeon_crtc->lut_b[i] << 0)); 67 } 68 69 /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */ 70 WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1); 71 } 72 73 static void dce4_crtc_load_lut(struct drm_crtc *crtc) 74 { 75 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 76 struct drm_device *dev = crtc->dev; 77 struct radeon_device *rdev = dev->dev_private; 78 int i; 79 80 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 81 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); 82 83 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 84 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 85 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 86 87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 88 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 89 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 90 91 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); 92 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); 93 94 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); 95 for (i = 0; i < 256; i++) { 96 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, 97 (radeon_crtc->lut_r[i] << 20) | 98 (radeon_crtc->lut_g[i] << 10) | 99 (radeon_crtc->lut_b[i] << 0)); 100 } 101 } 102 103 static void dce5_crtc_load_lut(struct drm_crtc *crtc) 104 { 105 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 106 struct drm_device *dev = crtc->dev; 107 struct radeon_device *rdev = dev->dev_private; 108 int i; 109 110 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 111 112 WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, 113 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | 114 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); 115 WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset, 116 NI_GRPH_PRESCALE_BYPASS); 117 WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset, 118 NI_OVL_PRESCALE_BYPASS); 119 WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset, 120 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | 121 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); 122 123 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); 124 125 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 126 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 127 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 128 129 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 130 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 131 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 132 133 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); 134 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); 135 136 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); 137 for (i = 0; i < 256; i++) { 138 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, 139 (radeon_crtc->lut_r[i] << 20) | 140 (radeon_crtc->lut_g[i] << 10) | 141 (radeon_crtc->lut_b[i] << 0)); 142 } 143 144 WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset, 145 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 146 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 147 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 148 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); 149 WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset, 150 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | 151 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); 152 WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset, 153 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | 154 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); 155 WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, 156 (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) | 157 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); 158 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 159 WREG32(0x6940 + radeon_crtc->crtc_offset, 0); 160 if (ASIC_IS_DCE8(rdev)) { 161 /* XXX this only needs to be programmed once per crtc at startup, 162 * not sure where the best place for it is 163 */ 164 WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset, 165 CIK_CURSOR_ALPHA_BLND_ENA); 166 } 167 } 168 169 static void legacy_crtc_load_lut(struct drm_crtc *crtc) 170 { 171 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 172 struct drm_device *dev = crtc->dev; 173 struct radeon_device *rdev = dev->dev_private; 174 int i; 175 uint32_t dac2_cntl; 176 177 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 178 if (radeon_crtc->crtc_id == 0) 179 dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; 180 else 181 dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL; 182 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 183 184 WREG8(RADEON_PALETTE_INDEX, 0); 185 for (i = 0; i < 256; i++) { 186 WREG32(RADEON_PALETTE_30_DATA, 187 (radeon_crtc->lut_r[i] << 20) | 188 (radeon_crtc->lut_g[i] << 10) | 189 (radeon_crtc->lut_b[i] << 0)); 190 } 191 } 192 193 void radeon_crtc_load_lut(struct drm_crtc *crtc) 194 { 195 struct drm_device *dev = crtc->dev; 196 struct radeon_device *rdev = dev->dev_private; 197 198 if (!crtc->enabled) 199 return; 200 201 if (ASIC_IS_DCE5(rdev)) 202 dce5_crtc_load_lut(crtc); 203 else if (ASIC_IS_DCE4(rdev)) 204 dce4_crtc_load_lut(crtc); 205 else if (ASIC_IS_AVIVO(rdev)) 206 avivo_crtc_load_lut(crtc); 207 else 208 legacy_crtc_load_lut(crtc); 209 } 210 211 /** Sets the color ramps on behalf of fbcon */ 212 void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 213 u16 blue, int regno) 214 { 215 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 216 217 radeon_crtc->lut_r[regno] = red >> 6; 218 radeon_crtc->lut_g[regno] = green >> 6; 219 radeon_crtc->lut_b[regno] = blue >> 6; 220 } 221 222 /** Gets the color ramps on behalf of fbcon */ 223 void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 224 u16 *blue, int regno) 225 { 226 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 227 228 *red = radeon_crtc->lut_r[regno] << 6; 229 *green = radeon_crtc->lut_g[regno] << 6; 230 *blue = radeon_crtc->lut_b[regno] << 6; 231 } 232 233 static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 234 u16 *blue, uint32_t start, uint32_t size) 235 { 236 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 237 int end = (start + size > 256) ? 256 : start + size, i; 238 239 /* userspace palettes are always correct as is */ 240 for (i = start; i < end; i++) { 241 radeon_crtc->lut_r[i] = red[i] >> 6; 242 radeon_crtc->lut_g[i] = green[i] >> 6; 243 radeon_crtc->lut_b[i] = blue[i] >> 6; 244 } 245 radeon_crtc_load_lut(crtc); 246 } 247 248 static void radeon_crtc_destroy(struct drm_crtc *crtc) 249 { 250 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 251 252 drm_crtc_cleanup(crtc); 253 destroy_workqueue(radeon_crtc->flip_queue); 254 kfree(radeon_crtc); 255 } 256 257 /** 258 * radeon_unpin_work_func - unpin old buffer object 259 * 260 * @__work - kernel work item 261 * 262 * Unpin the old frame buffer object outside of the interrupt handler 263 */ 264 static void radeon_unpin_work_func(struct work_struct *__work) 265 { 266 struct radeon_flip_work *work = 267 container_of(__work, struct radeon_flip_work, unpin_work); 268 int r; 269 270 /* unpin of the old buffer */ 271 r = radeon_bo_reserve(work->old_rbo, false); 272 if (likely(r == 0)) { 273 r = radeon_bo_unpin(work->old_rbo); 274 if (unlikely(r != 0)) { 275 DRM_ERROR("failed to unpin buffer after flip\n"); 276 } 277 radeon_bo_unreserve(work->old_rbo); 278 } else 279 DRM_ERROR("failed to reserve buffer after flip\n"); 280 281 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 282 kfree(work); 283 } 284 285 void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) 286 { 287 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 288 unsigned long flags; 289 u32 update_pending; 290 int vpos, hpos; 291 292 /* can happen during initialization */ 293 if (radeon_crtc == NULL) 294 return; 295 296 /* Skip the pageflip completion check below (based on polling) on 297 * asics which reliably support hw pageflip completion irqs. pflip 298 * irqs are a reliable and race-free method of handling pageflip 299 * completion detection. A use_pflipirq module parameter < 2 allows 300 * to override this in case of asics with faulty pflip irqs. 301 * A module parameter of 0 would only use this polling based path, 302 * a parameter of 1 would use pflip irq only as a backup to this 303 * path, as in Linux 3.16. 304 */ 305 if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev)) 306 return; 307 308 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 309 if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { 310 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " 311 "RADEON_FLIP_SUBMITTED(%d)\n", 312 radeon_crtc->flip_status, 313 RADEON_FLIP_SUBMITTED); 314 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 315 return; 316 } 317 318 update_pending = radeon_page_flip_pending(rdev, crtc_id); 319 320 /* Has the pageflip already completed in crtc, or is it certain 321 * to complete in this vblank? 322 */ 323 if (update_pending && 324 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, 325 &vpos, &hpos, NULL, NULL)) && 326 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || 327 (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { 328 /* crtc didn't flip in this target vblank interval, 329 * but flip is pending in crtc. Based on the current 330 * scanout position we know that the current frame is 331 * (nearly) complete and the flip will (likely) 332 * complete before the start of the next frame. 333 */ 334 update_pending = 0; 335 } 336 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 337 if (!update_pending) 338 radeon_crtc_handle_flip(rdev, crtc_id); 339 } 340 341 /** 342 * radeon_crtc_handle_flip - page flip completed 343 * 344 * @rdev: radeon device pointer 345 * @crtc_id: crtc number this event is for 346 * 347 * Called when we are sure that a page flip for this crtc is completed. 348 */ 349 void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) 350 { 351 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 352 struct radeon_flip_work *work; 353 unsigned long flags; 354 355 /* this can happen at init */ 356 if (radeon_crtc == NULL) 357 return; 358 359 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 360 work = radeon_crtc->flip_work; 361 if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { 362 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " 363 "RADEON_FLIP_SUBMITTED(%d)\n", 364 radeon_crtc->flip_status, 365 RADEON_FLIP_SUBMITTED); 366 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 367 return; 368 } 369 370 /* Pageflip completed. Clean up. */ 371 radeon_crtc->flip_status = RADEON_FLIP_NONE; 372 radeon_crtc->flip_work = NULL; 373 374 /* wakeup userspace */ 375 if (work->event) 376 drm_send_vblank_event(rdev->ddev, crtc_id, work->event); 377 378 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 379 380 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 381 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); 382 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 383 } 384 385 /** 386 * radeon_flip_work_func - page flip framebuffer 387 * 388 * @work - kernel work item 389 * 390 * Wait for the buffer object to become idle and do the actual page flip 391 */ 392 static void radeon_flip_work_func(struct work_struct *__work) 393 { 394 struct radeon_flip_work *work = 395 container_of(__work, struct radeon_flip_work, flip_work); 396 struct radeon_device *rdev = work->rdev; 397 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; 398 399 struct drm_crtc *crtc = &radeon_crtc->base; 400 unsigned long flags; 401 int r; 402 403 down_read(&rdev->exclusive_lock); 404 if (work->fence) { 405 r = radeon_fence_wait(work->fence, false); 406 if (r == -EDEADLK) { 407 up_read(&rdev->exclusive_lock); 408 r = radeon_gpu_reset(rdev); 409 down_read(&rdev->exclusive_lock); 410 } 411 if (r) 412 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); 413 414 /* We continue with the page flip even if we failed to wait on 415 * the fence, otherwise the DRM core and userspace will be 416 * confused about which BO the CRTC is scanning out 417 */ 418 419 radeon_fence_unref(&work->fence); 420 } 421 422 /* We borrow the event spin lock for protecting flip_status */ 423 spin_lock_irqsave(&crtc->dev->event_lock, flags); 424 425 /* set the proper interrupt */ 426 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 427 428 /* do the flip (mmio) */ 429 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); 430 431 radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; 432 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 433 up_read(&rdev->exclusive_lock); 434 } 435 436 static int radeon_crtc_page_flip(struct drm_crtc *crtc, 437 struct drm_framebuffer *fb, 438 struct drm_pending_vblank_event *event, 439 uint32_t page_flip_flags) 440 { 441 struct drm_device *dev = crtc->dev; 442 struct radeon_device *rdev = dev->dev_private; 443 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 444 struct radeon_framebuffer *old_radeon_fb; 445 struct radeon_framebuffer *new_radeon_fb; 446 struct drm_gem_object *obj; 447 struct radeon_flip_work *work; 448 struct radeon_bo *new_rbo; 449 uint32_t tiling_flags, pitch_pixels; 450 uint64_t base; 451 unsigned long flags; 452 int r; 453 454 work = kzalloc(sizeof *work, GFP_KERNEL); 455 if (work == NULL) 456 return -ENOMEM; 457 458 INIT_WORK(&work->flip_work, radeon_flip_work_func); 459 INIT_WORK(&work->unpin_work, radeon_unpin_work_func); 460 461 work->rdev = rdev; 462 work->crtc_id = radeon_crtc->crtc_id; 463 work->event = event; 464 465 /* schedule unpin of the old buffer */ 466 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 467 obj = old_radeon_fb->obj; 468 469 /* take a reference to the old object */ 470 drm_gem_object_reference(obj); 471 work->old_rbo = gem_to_radeon_bo(obj); 472 473 new_radeon_fb = to_radeon_framebuffer(fb); 474 obj = new_radeon_fb->obj; 475 new_rbo = gem_to_radeon_bo(obj); 476 477 spin_lock(&new_rbo->tbo.bdev->fence_lock); 478 if (new_rbo->tbo.sync_obj) 479 work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); 480 spin_unlock(&new_rbo->tbo.bdev->fence_lock); 481 482 /* pin the new buffer */ 483 DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", 484 work->old_rbo, new_rbo); 485 486 r = radeon_bo_reserve(new_rbo, false); 487 if (unlikely(r != 0)) { 488 DRM_ERROR("failed to reserve new rbo buffer before flip\n"); 489 goto cleanup; 490 } 491 /* Only 27 bit offset for legacy CRTC */ 492 r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM, 493 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); 494 if (unlikely(r != 0)) { 495 radeon_bo_unreserve(new_rbo); 496 r = -EINVAL; 497 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 498 goto cleanup; 499 } 500 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); 501 radeon_bo_unreserve(new_rbo); 502 503 if (!ASIC_IS_AVIVO(rdev)) { 504 /* crtc offset is from display base addr not FB location */ 505 base -= radeon_crtc->legacy_display_base_addr; 506 pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8); 507 508 if (tiling_flags & RADEON_TILING_MACRO) { 509 if (ASIC_IS_R300(rdev)) { 510 base &= ~0x7ff; 511 } else { 512 int byteshift = fb->bits_per_pixel >> 4; 513 int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; 514 base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); 515 } 516 } else { 517 int offset = crtc->y * pitch_pixels + crtc->x; 518 switch (fb->bits_per_pixel) { 519 case 8: 520 default: 521 offset *= 1; 522 break; 523 case 15: 524 case 16: 525 offset *= 2; 526 break; 527 case 24: 528 offset *= 3; 529 break; 530 case 32: 531 offset *= 4; 532 break; 533 } 534 base += offset; 535 } 536 base &= ~7; 537 } 538 work->base = base; 539 540 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); 541 if (r) { 542 DRM_ERROR("failed to get vblank before flip\n"); 543 goto pflip_cleanup; 544 } 545 546 /* We borrow the event spin lock for protecting flip_work */ 547 spin_lock_irqsave(&crtc->dev->event_lock, flags); 548 549 if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { 550 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 551 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 552 r = -EBUSY; 553 goto vblank_cleanup; 554 } 555 radeon_crtc->flip_status = RADEON_FLIP_PENDING; 556 radeon_crtc->flip_work = work; 557 558 /* update crtc fb */ 559 crtc->primary->fb = fb; 560 561 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 562 563 queue_work(radeon_crtc->flip_queue, &work->flip_work); 564 return 0; 565 566 vblank_cleanup: 567 drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); 568 569 pflip_cleanup: 570 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { 571 DRM_ERROR("failed to reserve new rbo in error path\n"); 572 goto cleanup; 573 } 574 if (unlikely(radeon_bo_unpin(new_rbo) != 0)) { 575 DRM_ERROR("failed to unpin new rbo in error path\n"); 576 } 577 radeon_bo_unreserve(new_rbo); 578 579 cleanup: 580 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 581 radeon_fence_unref(&work->fence); 582 kfree(work); 583 584 return r; 585 } 586 587 static int 588 radeon_crtc_set_config(struct drm_mode_set *set) 589 { 590 struct drm_device *dev; 591 struct radeon_device *rdev; 592 struct drm_crtc *crtc; 593 bool active = false; 594 int ret; 595 596 if (!set || !set->crtc) 597 return -EINVAL; 598 599 dev = set->crtc->dev; 600 601 ret = pm_runtime_get_sync(dev->dev); 602 if (ret < 0) 603 return ret; 604 605 ret = drm_crtc_helper_set_config(set); 606 607 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 608 if (crtc->enabled) 609 active = true; 610 611 pm_runtime_mark_last_busy(dev->dev); 612 613 rdev = dev->dev_private; 614 /* if we have active crtcs and we don't have a power ref, 615 take the current one */ 616 if (active && !rdev->have_disp_power_ref) { 617 rdev->have_disp_power_ref = true; 618 return ret; 619 } 620 /* if we have no active crtcs, then drop the power ref 621 we got before */ 622 if (!active && rdev->have_disp_power_ref) { 623 pm_runtime_put_autosuspend(dev->dev); 624 rdev->have_disp_power_ref = false; 625 } 626 627 /* drop the power reference we got coming in here */ 628 pm_runtime_put_autosuspend(dev->dev); 629 return ret; 630 } 631 static const struct drm_crtc_funcs radeon_crtc_funcs = { 632 .cursor_set = radeon_crtc_cursor_set, 633 .cursor_move = radeon_crtc_cursor_move, 634 .gamma_set = radeon_crtc_gamma_set, 635 .set_config = radeon_crtc_set_config, 636 .destroy = radeon_crtc_destroy, 637 .page_flip = radeon_crtc_page_flip, 638 }; 639 640 static void radeon_crtc_init(struct drm_device *dev, int index) 641 { 642 struct radeon_device *rdev = dev->dev_private; 643 struct radeon_crtc *radeon_crtc; 644 int i; 645 646 radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 647 if (radeon_crtc == NULL) 648 return; 649 650 drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); 651 652 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); 653 radeon_crtc->crtc_id = index; 654 radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc"); 655 rdev->mode_info.crtcs[index] = radeon_crtc; 656 657 if (rdev->family >= CHIP_BONAIRE) { 658 radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH; 659 radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; 660 } else { 661 radeon_crtc->max_cursor_width = CURSOR_WIDTH; 662 radeon_crtc->max_cursor_height = CURSOR_HEIGHT; 663 } 664 dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; 665 dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; 666 667 #if 0 668 radeon_crtc->mode_set.crtc = &radeon_crtc->base; 669 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); 670 radeon_crtc->mode_set.num_connectors = 0; 671 #endif 672 673 for (i = 0; i < 256; i++) { 674 radeon_crtc->lut_r[i] = i << 2; 675 radeon_crtc->lut_g[i] = i << 2; 676 radeon_crtc->lut_b[i] = i << 2; 677 } 678 679 if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) 680 radeon_atombios_init_crtc(dev, radeon_crtc); 681 else 682 radeon_legacy_init_crtc(dev, radeon_crtc); 683 } 684 685 static const char *encoder_names[38] = { 686 "NONE", 687 "INTERNAL_LVDS", 688 "INTERNAL_TMDS1", 689 "INTERNAL_TMDS2", 690 "INTERNAL_DAC1", 691 "INTERNAL_DAC2", 692 "INTERNAL_SDVOA", 693 "INTERNAL_SDVOB", 694 "SI170B", 695 "CH7303", 696 "CH7301", 697 "INTERNAL_DVO1", 698 "EXTERNAL_SDVOA", 699 "EXTERNAL_SDVOB", 700 "TITFP513", 701 "INTERNAL_LVTM1", 702 "VT1623", 703 "HDMI_SI1930", 704 "HDMI_INTERNAL", 705 "INTERNAL_KLDSCP_TMDS1", 706 "INTERNAL_KLDSCP_DVO1", 707 "INTERNAL_KLDSCP_DAC1", 708 "INTERNAL_KLDSCP_DAC2", 709 "SI178", 710 "MVPU_FPGA", 711 "INTERNAL_DDI", 712 "VT1625", 713 "HDMI_SI1932", 714 "DP_AN9801", 715 "DP_DP501", 716 "INTERNAL_UNIPHY", 717 "INTERNAL_KLDSCP_LVTMA", 718 "INTERNAL_UNIPHY1", 719 "INTERNAL_UNIPHY2", 720 "NUTMEG", 721 "TRAVIS", 722 "INTERNAL_VCE", 723 "INTERNAL_UNIPHY3", 724 }; 725 726 static const char *hpd_names[6] = { 727 "HPD1", 728 "HPD2", 729 "HPD3", 730 "HPD4", 731 "HPD5", 732 "HPD6", 733 }; 734 735 static void radeon_print_display_setup(struct drm_device *dev) 736 { 737 struct drm_connector *connector; 738 struct radeon_connector *radeon_connector; 739 struct drm_encoder *encoder; 740 struct radeon_encoder *radeon_encoder; 741 uint32_t devices; 742 int i = 0; 743 744 DRM_INFO("Radeon Display Connectors\n"); 745 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 746 radeon_connector = to_radeon_connector(connector); 747 DRM_INFO("Connector %d:\n", i); 748 DRM_INFO(" %s\n", connector->name); 749 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 750 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 751 if (radeon_connector->ddc_bus) { 752 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 753 radeon_connector->ddc_bus->rec.mask_clk_reg, 754 radeon_connector->ddc_bus->rec.mask_data_reg, 755 radeon_connector->ddc_bus->rec.a_clk_reg, 756 radeon_connector->ddc_bus->rec.a_data_reg, 757 radeon_connector->ddc_bus->rec.en_clk_reg, 758 radeon_connector->ddc_bus->rec.en_data_reg, 759 radeon_connector->ddc_bus->rec.y_clk_reg, 760 radeon_connector->ddc_bus->rec.y_data_reg); 761 if (radeon_connector->router.ddc_valid) 762 DRM_INFO(" DDC Router 0x%x/0x%x\n", 763 radeon_connector->router.ddc_mux_control_pin, 764 radeon_connector->router.ddc_mux_state); 765 if (radeon_connector->router.cd_valid) 766 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", 767 radeon_connector->router.cd_mux_control_pin, 768 radeon_connector->router.cd_mux_state); 769 } else { 770 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 771 connector->connector_type == DRM_MODE_CONNECTOR_DVII || 772 connector->connector_type == DRM_MODE_CONNECTOR_DVID || 773 connector->connector_type == DRM_MODE_CONNECTOR_DVIA || 774 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 775 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) 776 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); 777 } 778 DRM_INFO(" Encoders:\n"); 779 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 780 radeon_encoder = to_radeon_encoder(encoder); 781 devices = radeon_encoder->devices & radeon_connector->devices; 782 if (devices) { 783 if (devices & ATOM_DEVICE_CRT1_SUPPORT) 784 DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]); 785 if (devices & ATOM_DEVICE_CRT2_SUPPORT) 786 DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]); 787 if (devices & ATOM_DEVICE_LCD1_SUPPORT) 788 DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]); 789 if (devices & ATOM_DEVICE_DFP1_SUPPORT) 790 DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]); 791 if (devices & ATOM_DEVICE_DFP2_SUPPORT) 792 DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]); 793 if (devices & ATOM_DEVICE_DFP3_SUPPORT) 794 DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]); 795 if (devices & ATOM_DEVICE_DFP4_SUPPORT) 796 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); 797 if (devices & ATOM_DEVICE_DFP5_SUPPORT) 798 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); 799 if (devices & ATOM_DEVICE_DFP6_SUPPORT) 800 DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); 801 if (devices & ATOM_DEVICE_TV1_SUPPORT) 802 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); 803 if (devices & ATOM_DEVICE_CV_SUPPORT) 804 DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]); 805 } 806 } 807 i++; 808 } 809 } 810 811 static bool radeon_setup_enc_conn(struct drm_device *dev) 812 { 813 struct radeon_device *rdev = dev->dev_private; 814 bool ret = false; 815 816 if (rdev->bios) { 817 if (rdev->is_atom_bios) { 818 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); 819 if (ret == false) 820 ret = radeon_get_atom_connector_info_from_object_table(dev); 821 } else { 822 ret = radeon_get_legacy_connector_info_from_bios(dev); 823 if (ret == false) 824 ret = radeon_get_legacy_connector_info_from_table(dev); 825 } 826 } else { 827 if (!ASIC_IS_AVIVO(rdev)) 828 ret = radeon_get_legacy_connector_info_from_table(dev); 829 } 830 if (ret) { 831 radeon_setup_encoder_clones(dev); 832 radeon_print_display_setup(dev); 833 } 834 835 return ret; 836 } 837 838 /* avivo */ 839 840 /** 841 * avivo_reduce_ratio - fractional number reduction 842 * 843 * @nom: nominator 844 * @den: denominator 845 * @nom_min: minimum value for nominator 846 * @den_min: minimum value for denominator 847 * 848 * Find the greatest common divisor and apply it on both nominator and 849 * denominator, but make nominator and denominator are at least as large 850 * as their minimum values. 851 */ 852 static void avivo_reduce_ratio(unsigned *nom, unsigned *den, 853 unsigned nom_min, unsigned den_min) 854 { 855 unsigned tmp; 856 857 /* reduce the numbers to a simpler ratio */ 858 tmp = gcd(*nom, *den); 859 *nom /= tmp; 860 *den /= tmp; 861 862 /* make sure nominator is large enough */ 863 if (*nom < nom_min) { 864 tmp = DIV_ROUND_UP(nom_min, *nom); 865 *nom *= tmp; 866 *den *= tmp; 867 } 868 869 /* make sure the denominator is large enough */ 870 if (*den < den_min) { 871 tmp = DIV_ROUND_UP(den_min, *den); 872 *nom *= tmp; 873 *den *= tmp; 874 } 875 } 876 877 /** 878 * avivo_get_fb_ref_div - feedback and ref divider calculation 879 * 880 * @nom: nominator 881 * @den: denominator 882 * @post_div: post divider 883 * @fb_div_max: feedback divider maximum 884 * @ref_div_max: reference divider maximum 885 * @fb_div: resulting feedback divider 886 * @ref_div: resulting reference divider 887 * 888 * Calculate feedback and reference divider for a given post divider. Makes 889 * sure we stay within the limits. 890 */ 891 static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, 892 unsigned fb_div_max, unsigned ref_div_max, 893 unsigned *fb_div, unsigned *ref_div) 894 { 895 /* limit reference * post divider to a maximum */ 896 ref_div_max = max(min(100 / post_div, ref_div_max), 1u); 897 898 /* get matching reference and feedback divider */ 899 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); 900 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); 901 902 /* limit fb divider to its maximum */ 903 if (*fb_div > fb_div_max) { 904 *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); 905 *fb_div = fb_div_max; 906 } 907 } 908 909 /** 910 * radeon_compute_pll_avivo - compute PLL paramaters 911 * 912 * @pll: information about the PLL 913 * @dot_clock_p: resulting pixel clock 914 * fb_div_p: resulting feedback divider 915 * frac_fb_div_p: fractional part of the feedback divider 916 * ref_div_p: resulting reference divider 917 * post_div_p: resulting reference divider 918 * 919 * Try to calculate the PLL parameters to generate the given frequency: 920 * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div) 921 */ 922 void radeon_compute_pll_avivo(struct radeon_pll *pll, 923 u32 freq, 924 u32 *dot_clock_p, 925 u32 *fb_div_p, 926 u32 *frac_fb_div_p, 927 u32 *ref_div_p, 928 u32 *post_div_p) 929 { 930 unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? 931 freq : freq / 10; 932 933 unsigned fb_div_min, fb_div_max, fb_div; 934 unsigned post_div_min, post_div_max, post_div; 935 unsigned ref_div_min, ref_div_max, ref_div; 936 unsigned post_div_best, diff_best; 937 unsigned nom, den; 938 939 /* determine allowed feedback divider range */ 940 fb_div_min = pll->min_feedback_div; 941 fb_div_max = pll->max_feedback_div; 942 943 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 944 fb_div_min *= 10; 945 fb_div_max *= 10; 946 } 947 948 /* determine allowed ref divider range */ 949 if (pll->flags & RADEON_PLL_USE_REF_DIV) 950 ref_div_min = pll->reference_div; 951 else 952 ref_div_min = pll->min_ref_div; 953 954 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && 955 pll->flags & RADEON_PLL_USE_REF_DIV) 956 ref_div_max = pll->reference_div; 957 else 958 ref_div_max = pll->max_ref_div; 959 960 /* determine allowed post divider range */ 961 if (pll->flags & RADEON_PLL_USE_POST_DIV) { 962 post_div_min = pll->post_div; 963 post_div_max = pll->post_div; 964 } else { 965 unsigned vco_min, vco_max; 966 967 if (pll->flags & RADEON_PLL_IS_LCD) { 968 vco_min = pll->lcd_pll_out_min; 969 vco_max = pll->lcd_pll_out_max; 970 } else { 971 vco_min = pll->pll_out_min; 972 vco_max = pll->pll_out_max; 973 } 974 975 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 976 vco_min *= 10; 977 vco_max *= 10; 978 } 979 980 post_div_min = vco_min / target_clock; 981 if ((target_clock * post_div_min) < vco_min) 982 ++post_div_min; 983 if (post_div_min < pll->min_post_div) 984 post_div_min = pll->min_post_div; 985 986 post_div_max = vco_max / target_clock; 987 if ((target_clock * post_div_max) > vco_max) 988 --post_div_max; 989 if (post_div_max > pll->max_post_div) 990 post_div_max = pll->max_post_div; 991 } 992 993 /* represent the searched ratio as fractional number */ 994 nom = target_clock; 995 den = pll->reference_freq; 996 997 /* reduce the numbers to a simpler ratio */ 998 avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min); 999 1000 /* now search for a post divider */ 1001 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) 1002 post_div_best = post_div_min; 1003 else 1004 post_div_best = post_div_max; 1005 diff_best = ~0; 1006 1007 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) { 1008 unsigned diff; 1009 avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, 1010 ref_div_max, &fb_div, &ref_div); 1011 diff = abs(target_clock - (pll->reference_freq * fb_div) / 1012 (ref_div * post_div)); 1013 1014 if (diff < diff_best || (diff == diff_best && 1015 !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) { 1016 1017 post_div_best = post_div; 1018 diff_best = diff; 1019 } 1020 } 1021 post_div = post_div_best; 1022 1023 /* get the feedback and reference divider for the optimal value */ 1024 avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max, 1025 &fb_div, &ref_div); 1026 1027 /* reduce the numbers to a simpler ratio once more */ 1028 /* this also makes sure that the reference divider is large enough */ 1029 avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); 1030 1031 /* avoid high jitter with small fractional dividers */ 1032 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { 1033 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); 1034 if (fb_div < fb_div_min) { 1035 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); 1036 fb_div *= tmp; 1037 ref_div *= tmp; 1038 } 1039 } 1040 1041 /* and finally save the result */ 1042 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 1043 *fb_div_p = fb_div / 10; 1044 *frac_fb_div_p = fb_div % 10; 1045 } else { 1046 *fb_div_p = fb_div; 1047 *frac_fb_div_p = 0; 1048 } 1049 1050 *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) + 1051 (pll->reference_freq * *frac_fb_div_p)) / 1052 (ref_div * post_div * 10); 1053 *ref_div_p = ref_div; 1054 *post_div_p = post_div; 1055 1056 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1057 freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, 1058 ref_div, post_div); 1059 } 1060 1061 /* pre-avivo */ 1062 static inline uint32_t radeon_div(uint64_t n, uint32_t d) 1063 { 1064 uint64_t mod; 1065 1066 n += d / 2; 1067 1068 mod = do_div(n, d); 1069 return n; 1070 } 1071 1072 void radeon_compute_pll_legacy(struct radeon_pll *pll, 1073 uint64_t freq, 1074 uint32_t *dot_clock_p, 1075 uint32_t *fb_div_p, 1076 uint32_t *frac_fb_div_p, 1077 uint32_t *ref_div_p, 1078 uint32_t *post_div_p) 1079 { 1080 uint32_t min_ref_div = pll->min_ref_div; 1081 uint32_t max_ref_div = pll->max_ref_div; 1082 uint32_t min_post_div = pll->min_post_div; 1083 uint32_t max_post_div = pll->max_post_div; 1084 uint32_t min_fractional_feed_div = 0; 1085 uint32_t max_fractional_feed_div = 0; 1086 uint32_t best_vco = pll->best_vco; 1087 uint32_t best_post_div = 1; 1088 uint32_t best_ref_div = 1; 1089 uint32_t best_feedback_div = 1; 1090 uint32_t best_frac_feedback_div = 0; 1091 uint32_t best_freq = -1; 1092 uint32_t best_error = 0xffffffff; 1093 uint32_t best_vco_diff = 1; 1094 uint32_t post_div; 1095 u32 pll_out_min, pll_out_max; 1096 1097 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 1098 freq = freq * 1000; 1099 1100 if (pll->flags & RADEON_PLL_IS_LCD) { 1101 pll_out_min = pll->lcd_pll_out_min; 1102 pll_out_max = pll->lcd_pll_out_max; 1103 } else { 1104 pll_out_min = pll->pll_out_min; 1105 pll_out_max = pll->pll_out_max; 1106 } 1107 1108 if (pll_out_min > 64800) 1109 pll_out_min = 64800; 1110 1111 if (pll->flags & RADEON_PLL_USE_REF_DIV) 1112 min_ref_div = max_ref_div = pll->reference_div; 1113 else { 1114 while (min_ref_div < max_ref_div-1) { 1115 uint32_t mid = (min_ref_div + max_ref_div) / 2; 1116 uint32_t pll_in = pll->reference_freq / mid; 1117 if (pll_in < pll->pll_in_min) 1118 max_ref_div = mid; 1119 else if (pll_in > pll->pll_in_max) 1120 min_ref_div = mid; 1121 else 1122 break; 1123 } 1124 } 1125 1126 if (pll->flags & RADEON_PLL_USE_POST_DIV) 1127 min_post_div = max_post_div = pll->post_div; 1128 1129 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 1130 min_fractional_feed_div = pll->min_frac_feedback_div; 1131 max_fractional_feed_div = pll->max_frac_feedback_div; 1132 } 1133 1134 for (post_div = max_post_div; post_div >= min_post_div; --post_div) { 1135 uint32_t ref_div; 1136 1137 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 1138 continue; 1139 1140 /* legacy radeons only have a few post_divs */ 1141 if (pll->flags & RADEON_PLL_LEGACY) { 1142 if ((post_div == 5) || 1143 (post_div == 7) || 1144 (post_div == 9) || 1145 (post_div == 10) || 1146 (post_div == 11) || 1147 (post_div == 13) || 1148 (post_div == 14) || 1149 (post_div == 15)) 1150 continue; 1151 } 1152 1153 for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) { 1154 uint32_t feedback_div, current_freq = 0, error, vco_diff; 1155 uint32_t pll_in = pll->reference_freq / ref_div; 1156 uint32_t min_feed_div = pll->min_feedback_div; 1157 uint32_t max_feed_div = pll->max_feedback_div + 1; 1158 1159 if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max) 1160 continue; 1161 1162 while (min_feed_div < max_feed_div) { 1163 uint32_t vco; 1164 uint32_t min_frac_feed_div = min_fractional_feed_div; 1165 uint32_t max_frac_feed_div = max_fractional_feed_div + 1; 1166 uint32_t frac_feedback_div; 1167 uint64_t tmp; 1168 1169 feedback_div = (min_feed_div + max_feed_div) / 2; 1170 1171 tmp = (uint64_t)pll->reference_freq * feedback_div; 1172 vco = radeon_div(tmp, ref_div); 1173 1174 if (vco < pll_out_min) { 1175 min_feed_div = feedback_div + 1; 1176 continue; 1177 } else if (vco > pll_out_max) { 1178 max_feed_div = feedback_div; 1179 continue; 1180 } 1181 1182 while (min_frac_feed_div < max_frac_feed_div) { 1183 frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2; 1184 tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div; 1185 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 1186 current_freq = radeon_div(tmp, ref_div * post_div); 1187 1188 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 1189 if (freq < current_freq) 1190 error = 0xffffffff; 1191 else 1192 error = freq - current_freq; 1193 } else 1194 error = abs(current_freq - freq); 1195 vco_diff = abs(vco - best_vco); 1196 1197 if ((best_vco == 0 && error < best_error) || 1198 (best_vco != 0 && 1199 ((best_error > 100 && error < best_error - 100) || 1200 (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { 1201 best_post_div = post_div; 1202 best_ref_div = ref_div; 1203 best_feedback_div = feedback_div; 1204 best_frac_feedback_div = frac_feedback_div; 1205 best_freq = current_freq; 1206 best_error = error; 1207 best_vco_diff = vco_diff; 1208 } else if (current_freq == freq) { 1209 if (best_freq == -1) { 1210 best_post_div = post_div; 1211 best_ref_div = ref_div; 1212 best_feedback_div = feedback_div; 1213 best_frac_feedback_div = frac_feedback_div; 1214 best_freq = current_freq; 1215 best_error = error; 1216 best_vco_diff = vco_diff; 1217 } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 1218 ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 1219 ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 1220 ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 1221 ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 1222 ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 1223 best_post_div = post_div; 1224 best_ref_div = ref_div; 1225 best_feedback_div = feedback_div; 1226 best_frac_feedback_div = frac_feedback_div; 1227 best_freq = current_freq; 1228 best_error = error; 1229 best_vco_diff = vco_diff; 1230 } 1231 } 1232 if (current_freq < freq) 1233 min_frac_feed_div = frac_feedback_div + 1; 1234 else 1235 max_frac_feed_div = frac_feedback_div; 1236 } 1237 if (current_freq < freq) 1238 min_feed_div = feedback_div + 1; 1239 else 1240 max_feed_div = feedback_div; 1241 } 1242 } 1243 } 1244 1245 *dot_clock_p = best_freq / 10000; 1246 *fb_div_p = best_feedback_div; 1247 *frac_fb_div_p = best_frac_feedback_div; 1248 *ref_div_p = best_ref_div; 1249 *post_div_p = best_post_div; 1250 DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1251 (long long)freq, 1252 best_freq / 1000, best_feedback_div, best_frac_feedback_div, 1253 best_ref_div, best_post_div); 1254 1255 } 1256 1257 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 1258 { 1259 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 1260 1261 if (radeon_fb->obj) { 1262 drm_gem_object_unreference_unlocked(radeon_fb->obj); 1263 } 1264 drm_framebuffer_cleanup(fb); 1265 kfree(radeon_fb); 1266 } 1267 1268 static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb, 1269 struct drm_file *file_priv, 1270 unsigned int *handle) 1271 { 1272 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 1273 1274 return drm_gem_handle_create(file_priv, radeon_fb->obj, handle); 1275 } 1276 1277 static const struct drm_framebuffer_funcs radeon_fb_funcs = { 1278 .destroy = radeon_user_framebuffer_destroy, 1279 .create_handle = radeon_user_framebuffer_create_handle, 1280 }; 1281 1282 int 1283 radeon_framebuffer_init(struct drm_device *dev, 1284 struct radeon_framebuffer *rfb, 1285 struct drm_mode_fb_cmd2 *mode_cmd, 1286 struct drm_gem_object *obj) 1287 { 1288 int ret; 1289 rfb->obj = obj; 1290 drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); 1291 ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); 1292 if (ret) { 1293 rfb->obj = NULL; 1294 return ret; 1295 } 1296 return 0; 1297 } 1298 1299 static struct drm_framebuffer * 1300 radeon_user_framebuffer_create(struct drm_device *dev, 1301 struct drm_file *file_priv, 1302 struct drm_mode_fb_cmd2 *mode_cmd) 1303 { 1304 struct drm_gem_object *obj; 1305 struct radeon_framebuffer *radeon_fb; 1306 int ret; 1307 1308 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); 1309 if (obj == NULL) { 1310 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " 1311 "can't create framebuffer\n", mode_cmd->handles[0]); 1312 return ERR_PTR(-ENOENT); 1313 } 1314 1315 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1316 if (radeon_fb == NULL) { 1317 drm_gem_object_unreference_unlocked(obj); 1318 return ERR_PTR(-ENOMEM); 1319 } 1320 1321 ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); 1322 if (ret) { 1323 kfree(radeon_fb); 1324 drm_gem_object_unreference_unlocked(obj); 1325 return ERR_PTR(ret); 1326 } 1327 1328 return &radeon_fb->base; 1329 } 1330 1331 static void radeon_output_poll_changed(struct drm_device *dev) 1332 { 1333 struct radeon_device *rdev = dev->dev_private; 1334 radeon_fb_output_poll_changed(rdev); 1335 } 1336 1337 static const struct drm_mode_config_funcs radeon_mode_funcs = { 1338 .fb_create = radeon_user_framebuffer_create, 1339 .output_poll_changed = radeon_output_poll_changed 1340 }; 1341 1342 static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = 1343 { { 0, "driver" }, 1344 { 1, "bios" }, 1345 }; 1346 1347 static struct drm_prop_enum_list radeon_tv_std_enum_list[] = 1348 { { TV_STD_NTSC, "ntsc" }, 1349 { TV_STD_PAL, "pal" }, 1350 { TV_STD_PAL_M, "pal-m" }, 1351 { TV_STD_PAL_60, "pal-60" }, 1352 { TV_STD_NTSC_J, "ntsc-j" }, 1353 { TV_STD_SCART_PAL, "scart-pal" }, 1354 { TV_STD_PAL_CN, "pal-cn" }, 1355 { TV_STD_SECAM, "secam" }, 1356 }; 1357 1358 static struct drm_prop_enum_list radeon_underscan_enum_list[] = 1359 { { UNDERSCAN_OFF, "off" }, 1360 { UNDERSCAN_ON, "on" }, 1361 { UNDERSCAN_AUTO, "auto" }, 1362 }; 1363 1364 static struct drm_prop_enum_list radeon_audio_enum_list[] = 1365 { { RADEON_AUDIO_DISABLE, "off" }, 1366 { RADEON_AUDIO_ENABLE, "on" }, 1367 { RADEON_AUDIO_AUTO, "auto" }, 1368 }; 1369 1370 /* XXX support different dither options? spatial, temporal, both, etc. */ 1371 static struct drm_prop_enum_list radeon_dither_enum_list[] = 1372 { { RADEON_FMT_DITHER_DISABLE, "off" }, 1373 { RADEON_FMT_DITHER_ENABLE, "on" }, 1374 }; 1375 1376 static int radeon_modeset_create_props(struct radeon_device *rdev) 1377 { 1378 int sz; 1379 1380 if (rdev->is_atom_bios) { 1381 rdev->mode_info.coherent_mode_property = 1382 drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1); 1383 if (!rdev->mode_info.coherent_mode_property) 1384 return -ENOMEM; 1385 } 1386 1387 if (!ASIC_IS_AVIVO(rdev)) { 1388 sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); 1389 rdev->mode_info.tmds_pll_property = 1390 drm_property_create_enum(rdev->ddev, 0, 1391 "tmds_pll", 1392 radeon_tmds_pll_enum_list, sz); 1393 } 1394 1395 rdev->mode_info.load_detect_property = 1396 drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1); 1397 if (!rdev->mode_info.load_detect_property) 1398 return -ENOMEM; 1399 1400 drm_mode_create_scaling_mode_property(rdev->ddev); 1401 1402 sz = ARRAY_SIZE(radeon_tv_std_enum_list); 1403 rdev->mode_info.tv_std_property = 1404 drm_property_create_enum(rdev->ddev, 0, 1405 "tv standard", 1406 radeon_tv_std_enum_list, sz); 1407 1408 sz = ARRAY_SIZE(radeon_underscan_enum_list); 1409 rdev->mode_info.underscan_property = 1410 drm_property_create_enum(rdev->ddev, 0, 1411 "underscan", 1412 radeon_underscan_enum_list, sz); 1413 1414 rdev->mode_info.underscan_hborder_property = 1415 drm_property_create_range(rdev->ddev, 0, 1416 "underscan hborder", 0, 128); 1417 if (!rdev->mode_info.underscan_hborder_property) 1418 return -ENOMEM; 1419 1420 rdev->mode_info.underscan_vborder_property = 1421 drm_property_create_range(rdev->ddev, 0, 1422 "underscan vborder", 0, 128); 1423 if (!rdev->mode_info.underscan_vborder_property) 1424 return -ENOMEM; 1425 1426 sz = ARRAY_SIZE(radeon_audio_enum_list); 1427 rdev->mode_info.audio_property = 1428 drm_property_create_enum(rdev->ddev, 0, 1429 "audio", 1430 radeon_audio_enum_list, sz); 1431 1432 sz = ARRAY_SIZE(radeon_dither_enum_list); 1433 rdev->mode_info.dither_property = 1434 drm_property_create_enum(rdev->ddev, 0, 1435 "dither", 1436 radeon_dither_enum_list, sz); 1437 1438 return 0; 1439 } 1440 1441 void radeon_update_display_priority(struct radeon_device *rdev) 1442 { 1443 /* adjustment options for the display watermarks */ 1444 if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) { 1445 /* set display priority to high for r3xx, rv515 chips 1446 * this avoids flickering due to underflow to the 1447 * display controllers during heavy acceleration. 1448 * Don't force high on rs4xx igp chips as it seems to 1449 * affect the sound card. See kernel bug 15982. 1450 */ 1451 if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && 1452 !(rdev->flags & RADEON_IS_IGP)) 1453 rdev->disp_priority = 2; 1454 else 1455 rdev->disp_priority = 0; 1456 } else 1457 rdev->disp_priority = radeon_disp_priority; 1458 1459 } 1460 1461 /* 1462 * Allocate hdmi structs and determine register offsets 1463 */ 1464 static void radeon_afmt_init(struct radeon_device *rdev) 1465 { 1466 int i; 1467 1468 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) 1469 rdev->mode_info.afmt[i] = NULL; 1470 1471 if (ASIC_IS_NODCE(rdev)) { 1472 /* nothing to do */ 1473 } else if (ASIC_IS_DCE4(rdev)) { 1474 static uint32_t eg_offsets[] = { 1475 EVERGREEN_CRTC0_REGISTER_OFFSET, 1476 EVERGREEN_CRTC1_REGISTER_OFFSET, 1477 EVERGREEN_CRTC2_REGISTER_OFFSET, 1478 EVERGREEN_CRTC3_REGISTER_OFFSET, 1479 EVERGREEN_CRTC4_REGISTER_OFFSET, 1480 EVERGREEN_CRTC5_REGISTER_OFFSET, 1481 0x13830 - 0x7030, 1482 }; 1483 int num_afmt; 1484 1485 /* DCE8 has 7 audio blocks tied to DIG encoders */ 1486 /* DCE6 has 6 audio blocks tied to DIG encoders */ 1487 /* DCE4/5 has 6 audio blocks tied to DIG encoders */ 1488 /* DCE4.1 has 2 audio blocks tied to DIG encoders */ 1489 if (ASIC_IS_DCE8(rdev)) 1490 num_afmt = 7; 1491 else if (ASIC_IS_DCE6(rdev)) 1492 num_afmt = 6; 1493 else if (ASIC_IS_DCE5(rdev)) 1494 num_afmt = 6; 1495 else if (ASIC_IS_DCE41(rdev)) 1496 num_afmt = 2; 1497 else /* DCE4 */ 1498 num_afmt = 6; 1499 1500 BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets)); 1501 for (i = 0; i < num_afmt; i++) { 1502 rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1503 if (rdev->mode_info.afmt[i]) { 1504 rdev->mode_info.afmt[i]->offset = eg_offsets[i]; 1505 rdev->mode_info.afmt[i]->id = i; 1506 } 1507 } 1508 } else if (ASIC_IS_DCE3(rdev)) { 1509 /* DCE3.x has 2 audio blocks tied to DIG encoders */ 1510 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1511 if (rdev->mode_info.afmt[0]) { 1512 rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0; 1513 rdev->mode_info.afmt[0]->id = 0; 1514 } 1515 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1516 if (rdev->mode_info.afmt[1]) { 1517 rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1; 1518 rdev->mode_info.afmt[1]->id = 1; 1519 } 1520 } else if (ASIC_IS_DCE2(rdev)) { 1521 /* DCE2 has at least 1 routable audio block */ 1522 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1523 if (rdev->mode_info.afmt[0]) { 1524 rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0; 1525 rdev->mode_info.afmt[0]->id = 0; 1526 } 1527 /* r6xx has 2 routable audio blocks */ 1528 if (rdev->family >= CHIP_R600) { 1529 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1530 if (rdev->mode_info.afmt[1]) { 1531 rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1; 1532 rdev->mode_info.afmt[1]->id = 1; 1533 } 1534 } 1535 } 1536 } 1537 1538 static void radeon_afmt_fini(struct radeon_device *rdev) 1539 { 1540 int i; 1541 1542 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) { 1543 kfree(rdev->mode_info.afmt[i]); 1544 rdev->mode_info.afmt[i] = NULL; 1545 } 1546 } 1547 1548 int radeon_modeset_init(struct radeon_device *rdev) 1549 { 1550 int i; 1551 int ret; 1552 1553 drm_mode_config_init(rdev->ddev); 1554 rdev->mode_info.mode_config_initialized = true; 1555 1556 rdev->ddev->mode_config.funcs = &radeon_mode_funcs; 1557 1558 if (ASIC_IS_DCE5(rdev)) { 1559 rdev->ddev->mode_config.max_width = 16384; 1560 rdev->ddev->mode_config.max_height = 16384; 1561 } else if (ASIC_IS_AVIVO(rdev)) { 1562 rdev->ddev->mode_config.max_width = 8192; 1563 rdev->ddev->mode_config.max_height = 8192; 1564 } else { 1565 rdev->ddev->mode_config.max_width = 4096; 1566 rdev->ddev->mode_config.max_height = 4096; 1567 } 1568 1569 rdev->ddev->mode_config.preferred_depth = 24; 1570 rdev->ddev->mode_config.prefer_shadow = 1; 1571 1572 rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; 1573 1574 ret = radeon_modeset_create_props(rdev); 1575 if (ret) { 1576 return ret; 1577 } 1578 1579 /* init i2c buses */ 1580 radeon_i2c_init(rdev); 1581 1582 /* check combios for a valid hardcoded EDID - Sun servers */ 1583 if (!rdev->is_atom_bios) { 1584 /* check for hardcoded EDID in BIOS */ 1585 radeon_combios_check_hardcoded_edid(rdev); 1586 } 1587 1588 /* allocate crtcs */ 1589 for (i = 0; i < rdev->num_crtc; i++) { 1590 radeon_crtc_init(rdev->ddev, i); 1591 } 1592 1593 /* okay we should have all the bios connectors */ 1594 ret = radeon_setup_enc_conn(rdev->ddev); 1595 if (!ret) { 1596 return ret; 1597 } 1598 1599 /* init dig PHYs, disp eng pll */ 1600 if (rdev->is_atom_bios) { 1601 radeon_atom_encoder_init(rdev); 1602 radeon_atom_disp_eng_pll_init(rdev); 1603 } 1604 1605 /* initialize hpd */ 1606 radeon_hpd_init(rdev); 1607 1608 /* setup afmt */ 1609 radeon_afmt_init(rdev); 1610 1611 radeon_fbdev_init(rdev); 1612 drm_kms_helper_poll_init(rdev->ddev); 1613 1614 if (rdev->pm.dpm_enabled) { 1615 /* do dpm late init */ 1616 ret = radeon_pm_late_init(rdev); 1617 if (ret) { 1618 rdev->pm.dpm_enabled = false; 1619 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1620 } 1621 /* set the dpm state for PX since there won't be 1622 * a modeset to call this. 1623 */ 1624 radeon_pm_compute_clocks(rdev); 1625 } 1626 1627 return 0; 1628 } 1629 1630 void radeon_modeset_fini(struct radeon_device *rdev) 1631 { 1632 radeon_fbdev_fini(rdev); 1633 kfree(rdev->mode_info.bios_hardcoded_edid); 1634 1635 if (rdev->mode_info.mode_config_initialized) { 1636 radeon_afmt_fini(rdev); 1637 drm_kms_helper_poll_fini(rdev->ddev); 1638 radeon_hpd_fini(rdev); 1639 drm_mode_config_cleanup(rdev->ddev); 1640 rdev->mode_info.mode_config_initialized = false; 1641 } 1642 /* free i2c buses */ 1643 radeon_i2c_fini(rdev); 1644 } 1645 1646 static bool is_hdtv_mode(const struct drm_display_mode *mode) 1647 { 1648 /* try and guess if this is a tv or a monitor */ 1649 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ 1650 (mode->vdisplay == 576) || /* 576p */ 1651 (mode->vdisplay == 720) || /* 720p */ 1652 (mode->vdisplay == 1080)) /* 1080p */ 1653 return true; 1654 else 1655 return false; 1656 } 1657 1658 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 1659 const struct drm_display_mode *mode, 1660 struct drm_display_mode *adjusted_mode) 1661 { 1662 struct drm_device *dev = crtc->dev; 1663 struct radeon_device *rdev = dev->dev_private; 1664 struct drm_encoder *encoder; 1665 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1666 struct radeon_encoder *radeon_encoder; 1667 struct drm_connector *connector; 1668 struct radeon_connector *radeon_connector; 1669 bool first = true; 1670 u32 src_v = 1, dst_v = 1; 1671 u32 src_h = 1, dst_h = 1; 1672 1673 radeon_crtc->h_border = 0; 1674 radeon_crtc->v_border = 0; 1675 1676 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1677 if (encoder->crtc != crtc) 1678 continue; 1679 radeon_encoder = to_radeon_encoder(encoder); 1680 connector = radeon_get_connector_for_encoder(encoder); 1681 radeon_connector = to_radeon_connector(connector); 1682 1683 if (first) { 1684 /* set scaling */ 1685 if (radeon_encoder->rmx_type == RMX_OFF) 1686 radeon_crtc->rmx_type = RMX_OFF; 1687 else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay || 1688 mode->vdisplay < radeon_encoder->native_mode.vdisplay) 1689 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 1690 else 1691 radeon_crtc->rmx_type = RMX_OFF; 1692 /* copy native mode */ 1693 memcpy(&radeon_crtc->native_mode, 1694 &radeon_encoder->native_mode, 1695 sizeof(struct drm_display_mode)); 1696 src_v = crtc->mode.vdisplay; 1697 dst_v = radeon_crtc->native_mode.vdisplay; 1698 src_h = crtc->mode.hdisplay; 1699 dst_h = radeon_crtc->native_mode.hdisplay; 1700 1701 /* fix up for overscan on hdmi */ 1702 if (ASIC_IS_AVIVO(rdev) && 1703 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && 1704 ((radeon_encoder->underscan_type == UNDERSCAN_ON) || 1705 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && 1706 drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && 1707 is_hdtv_mode(mode)))) { 1708 if (radeon_encoder->underscan_hborder != 0) 1709 radeon_crtc->h_border = radeon_encoder->underscan_hborder; 1710 else 1711 radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; 1712 if (radeon_encoder->underscan_vborder != 0) 1713 radeon_crtc->v_border = radeon_encoder->underscan_vborder; 1714 else 1715 radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; 1716 radeon_crtc->rmx_type = RMX_FULL; 1717 src_v = crtc->mode.vdisplay; 1718 dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); 1719 src_h = crtc->mode.hdisplay; 1720 dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2); 1721 } 1722 first = false; 1723 } else { 1724 if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { 1725 /* WARNING: Right now this can't happen but 1726 * in the future we need to check that scaling 1727 * are consistent across different encoder 1728 * (ie all encoder can work with the same 1729 * scaling). 1730 */ 1731 DRM_ERROR("Scaling not consistent across encoder.\n"); 1732 return false; 1733 } 1734 } 1735 } 1736 if (radeon_crtc->rmx_type != RMX_OFF) { 1737 fixed20_12 a, b; 1738 a.full = dfixed_const(src_v); 1739 b.full = dfixed_const(dst_v); 1740 radeon_crtc->vsc.full = dfixed_div(a, b); 1741 a.full = dfixed_const(src_h); 1742 b.full = dfixed_const(dst_h); 1743 radeon_crtc->hsc.full = dfixed_div(a, b); 1744 } else { 1745 radeon_crtc->vsc.full = dfixed_const(1); 1746 radeon_crtc->hsc.full = dfixed_const(1); 1747 } 1748 return true; 1749 } 1750 1751 /* 1752 * Retrieve current video scanout position of crtc on a given gpu, and 1753 * an optional accurate timestamp of when query happened. 1754 * 1755 * \param dev Device to query. 1756 * \param crtc Crtc to query. 1757 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 1758 * \param *vpos Location where vertical scanout position should be stored. 1759 * \param *hpos Location where horizontal scanout position should go. 1760 * \param *stime Target location for timestamp taken immediately before 1761 * scanout position query. Can be NULL to skip timestamp. 1762 * \param *etime Target location for timestamp taken immediately after 1763 * scanout position query. Can be NULL to skip timestamp. 1764 * 1765 * Returns vpos as a positive number while in active scanout area. 1766 * Returns vpos as a negative number inside vblank, counting the number 1767 * of scanlines to go until end of vblank, e.g., -1 means "one scanline 1768 * until start of active scanout / end of vblank." 1769 * 1770 * \return Flags, or'ed together as follows: 1771 * 1772 * DRM_SCANOUTPOS_VALID = Query successful. 1773 * DRM_SCANOUTPOS_INVBL = Inside vblank. 1774 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of 1775 * this flag means that returned position may be offset by a constant but 1776 * unknown small number of scanlines wrt. real scanout position. 1777 * 1778 */ 1779 int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, 1780 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) 1781 { 1782 u32 stat_crtc = 0, vbl = 0, position = 0; 1783 int vbl_start, vbl_end, vtotal, ret = 0; 1784 bool in_vbl = true; 1785 1786 struct radeon_device *rdev = dev->dev_private; 1787 1788 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1789 1790 /* Get optional system timestamp before query. */ 1791 if (stime) 1792 *stime = ktime_get(); 1793 1794 if (ASIC_IS_DCE4(rdev)) { 1795 if (crtc == 0) { 1796 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1797 EVERGREEN_CRTC0_REGISTER_OFFSET); 1798 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1799 EVERGREEN_CRTC0_REGISTER_OFFSET); 1800 ret |= DRM_SCANOUTPOS_VALID; 1801 } 1802 if (crtc == 1) { 1803 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1804 EVERGREEN_CRTC1_REGISTER_OFFSET); 1805 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1806 EVERGREEN_CRTC1_REGISTER_OFFSET); 1807 ret |= DRM_SCANOUTPOS_VALID; 1808 } 1809 if (crtc == 2) { 1810 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1811 EVERGREEN_CRTC2_REGISTER_OFFSET); 1812 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1813 EVERGREEN_CRTC2_REGISTER_OFFSET); 1814 ret |= DRM_SCANOUTPOS_VALID; 1815 } 1816 if (crtc == 3) { 1817 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1818 EVERGREEN_CRTC3_REGISTER_OFFSET); 1819 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1820 EVERGREEN_CRTC3_REGISTER_OFFSET); 1821 ret |= DRM_SCANOUTPOS_VALID; 1822 } 1823 if (crtc == 4) { 1824 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1825 EVERGREEN_CRTC4_REGISTER_OFFSET); 1826 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1827 EVERGREEN_CRTC4_REGISTER_OFFSET); 1828 ret |= DRM_SCANOUTPOS_VALID; 1829 } 1830 if (crtc == 5) { 1831 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1832 EVERGREEN_CRTC5_REGISTER_OFFSET); 1833 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1834 EVERGREEN_CRTC5_REGISTER_OFFSET); 1835 ret |= DRM_SCANOUTPOS_VALID; 1836 } 1837 } else if (ASIC_IS_AVIVO(rdev)) { 1838 if (crtc == 0) { 1839 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); 1840 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); 1841 ret |= DRM_SCANOUTPOS_VALID; 1842 } 1843 if (crtc == 1) { 1844 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); 1845 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); 1846 ret |= DRM_SCANOUTPOS_VALID; 1847 } 1848 } else { 1849 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ 1850 if (crtc == 0) { 1851 /* Assume vbl_end == 0, get vbl_start from 1852 * upper 16 bits. 1853 */ 1854 vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & 1855 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; 1856 /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ 1857 position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 1858 stat_crtc = RREG32(RADEON_CRTC_STATUS); 1859 if (!(stat_crtc & 1)) 1860 in_vbl = false; 1861 1862 ret |= DRM_SCANOUTPOS_VALID; 1863 } 1864 if (crtc == 1) { 1865 vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & 1866 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; 1867 position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 1868 stat_crtc = RREG32(RADEON_CRTC2_STATUS); 1869 if (!(stat_crtc & 1)) 1870 in_vbl = false; 1871 1872 ret |= DRM_SCANOUTPOS_VALID; 1873 } 1874 } 1875 1876 /* Get optional system timestamp after query. */ 1877 if (etime) 1878 *etime = ktime_get(); 1879 1880 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1881 1882 /* Decode into vertical and horizontal scanout position. */ 1883 *vpos = position & 0x1fff; 1884 *hpos = (position >> 16) & 0x1fff; 1885 1886 /* Valid vblank area boundaries from gpu retrieved? */ 1887 if (vbl > 0) { 1888 /* Yes: Decode. */ 1889 ret |= DRM_SCANOUTPOS_ACCURATE; 1890 vbl_start = vbl & 0x1fff; 1891 vbl_end = (vbl >> 16) & 0x1fff; 1892 } 1893 else { 1894 /* No: Fake something reasonable which gives at least ok results. */ 1895 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 1896 vbl_end = 0; 1897 } 1898 1899 /* Test scanout position against vblank region. */ 1900 if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 1901 in_vbl = false; 1902 1903 /* Check if inside vblank area and apply corrective offsets: 1904 * vpos will then be >=0 in video scanout area, but negative 1905 * within vblank area, counting down the number of lines until 1906 * start of scanout. 1907 */ 1908 1909 /* Inside "upper part" of vblank area? Apply corrective offset if so: */ 1910 if (in_vbl && (*vpos >= vbl_start)) { 1911 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 1912 *vpos = *vpos - vtotal; 1913 } 1914 1915 /* Correct for shifted end of vbl at vbl_end. */ 1916 *vpos = *vpos - vbl_end; 1917 1918 /* In vblank? */ 1919 if (in_vbl) 1920 ret |= DRM_SCANOUTPOS_INVBL; 1921 1922 /* Is vpos outside nominal vblank area, but less than 1923 * 1/100 of a frame height away from start of vblank? 1924 * If so, assume this isn't a massively delayed vblank 1925 * interrupt, but a vblank interrupt that fired a few 1926 * microseconds before true start of vblank. Compensate 1927 * by adding a full frame duration to the final timestamp. 1928 * Happens, e.g., on ATI R500, R600. 1929 * 1930 * We only do this if DRM_CALLED_FROM_VBLIRQ. 1931 */ 1932 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { 1933 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 1934 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 1935 1936 if (vbl_start - *vpos < vtotal / 100) { 1937 *vpos -= vtotal; 1938 1939 /* Signal this correction as "applied". */ 1940 ret |= 0x8; 1941 } 1942 } 1943 1944 return ret; 1945 } 1946