1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2006-2011 Intel Corporation 4 * 5 * Authors: 6 * Eric Anholt <eric@anholt.net> 7 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/highmem.h> 12 13 #include <drm/drm_crtc.h> 14 #include <drm/drm_fourcc.h> 15 #include <drm/drm_vblank.h> 16 17 #include "framebuffer.h" 18 #include "gem.h" 19 #include "gma_display.h" 20 #include "psb_drv.h" 21 #include "psb_intel_drv.h" 22 #include "psb_intel_reg.h" 23 24 /* 25 * Returns whether any output on the specified pipe is of the specified type 26 */ 27 bool gma_pipe_has_type(struct drm_crtc *crtc, int type) 28 { 29 struct drm_device *dev = crtc->dev; 30 struct drm_mode_config *mode_config = &dev->mode_config; 31 struct drm_connector *l_entry; 32 33 list_for_each_entry(l_entry, &mode_config->connector_list, head) { 34 if (l_entry->encoder && l_entry->encoder->crtc == crtc) { 35 struct gma_encoder *gma_encoder = 36 gma_attached_encoder(l_entry); 37 if (gma_encoder->type == type) 38 return true; 39 } 40 } 41 42 return false; 43 } 44 45 void gma_wait_for_vblank(struct drm_device *dev) 46 { 47 /* Wait for 20ms, i.e. one cycle at 50hz. */ 48 mdelay(20); 49 } 50 51 int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, 52 struct drm_framebuffer *old_fb) 53 { 54 struct drm_device *dev = crtc->dev; 55 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 56 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 57 struct drm_framebuffer *fb = crtc->primary->fb; 58 struct psb_gem_object *pobj; 59 int pipe = gma_crtc->pipe; 60 const struct psb_offset *map = &dev_priv->regmap[pipe]; 61 unsigned long start, offset; 62 u32 dspcntr; 63 int ret = 0; 64 65 if (!gma_power_begin(dev, true)) 66 return 0; 67 68 /* no fb bound */ 69 if (!fb) { 70 dev_err(dev->dev, "No FB bound\n"); 71 goto gma_pipe_cleaner; 72 } 73 74 pobj = to_psb_gem_object(fb->obj[0]); 75 76 /* We are displaying this buffer, make sure it is actually loaded 77 into the GTT */ 78 ret = psb_gem_pin(pobj); 79 if (ret < 0) 80 goto gma_pipe_set_base_exit; 81 start = pobj->offset; 82 offset = y * fb->pitches[0] + x * fb->format->cpp[0]; 83 84 REG_WRITE(map->stride, fb->pitches[0]); 85 86 dspcntr = REG_READ(map->cntr); 87 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 88 89 switch (fb->format->cpp[0] * 8) { 90 case 8: 91 dspcntr |= DISPPLANE_8BPP; 92 break; 93 case 16: 94 if (fb->format->depth == 15) 95 dspcntr |= DISPPLANE_15_16BPP; 96 else 97 dspcntr |= DISPPLANE_16BPP; 98 break; 99 case 24: 100 case 32: 101 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 102 break; 103 default: 104 dev_err(dev->dev, "Unknown color depth\n"); 105 ret = -EINVAL; 106 goto gma_pipe_set_base_exit; 107 } 108 REG_WRITE(map->cntr, dspcntr); 109 110 dev_dbg(dev->dev, 111 "Writing base %08lX %08lX %d %d\n", start, offset, x, y); 112 113 /* FIXME: Investigate whether this really is the base for psb and why 114 the linear offset is named base for the other chips. map->surf 115 should be the base and map->linoff the offset for all chips */ 116 if (IS_PSB(dev)) { 117 REG_WRITE(map->base, offset + start); 118 REG_READ(map->base); 119 } else { 120 REG_WRITE(map->base, offset); 121 REG_READ(map->base); 122 REG_WRITE(map->surf, start); 123 REG_READ(map->surf); 124 } 125 126 gma_pipe_cleaner: 127 /* If there was a previous display we can now unpin it */ 128 if (old_fb) 129 psb_gem_unpin(to_psb_gem_object(old_fb->obj[0])); 130 131 gma_pipe_set_base_exit: 132 gma_power_end(dev); 133 return ret; 134 } 135 136 /* Loads the palette/gamma unit for the CRTC with the prepared values */ 137 void gma_crtc_load_lut(struct drm_crtc *crtc) 138 { 139 struct drm_device *dev = crtc->dev; 140 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 141 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 142 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 143 int palreg = map->palette; 144 u16 *r, *g, *b; 145 int i; 146 147 /* The clocks have to be on to load the palette. */ 148 if (!crtc->enabled) 149 return; 150 151 r = crtc->gamma_store; 152 g = r + crtc->gamma_size; 153 b = g + crtc->gamma_size; 154 155 if (gma_power_begin(dev, false)) { 156 for (i = 0; i < 256; i++) { 157 REG_WRITE(palreg + 4 * i, 158 (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) | 159 (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) | 160 ((*b++ >> 8) + gma_crtc->lut_adj[i])); 161 } 162 gma_power_end(dev); 163 } else { 164 for (i = 0; i < 256; i++) { 165 /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */ 166 dev_priv->regs.pipe[0].palette[i] = 167 (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) | 168 (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) | 169 ((*b++ >> 8) + gma_crtc->lut_adj[i]); 170 } 171 172 } 173 } 174 175 int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, 176 u32 size, 177 struct drm_modeset_acquire_ctx *ctx) 178 { 179 gma_crtc_load_lut(crtc); 180 181 return 0; 182 } 183 184 /* 185 * Sets the power management mode of the pipe and plane. 186 * 187 * This code should probably grow support for turning the cursor off and back 188 * on appropriately at the same time as we're turning the pipe off/on. 189 */ 190 void gma_crtc_dpms(struct drm_crtc *crtc, int mode) 191 { 192 struct drm_device *dev = crtc->dev; 193 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 194 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 195 int pipe = gma_crtc->pipe; 196 const struct psb_offset *map = &dev_priv->regmap[pipe]; 197 u32 temp; 198 199 /* XXX: When our outputs are all unaware of DPMS modes other than off 200 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 201 */ 202 203 if (IS_CDV(dev)) 204 dev_priv->ops->disable_sr(dev); 205 206 switch (mode) { 207 case DRM_MODE_DPMS_ON: 208 case DRM_MODE_DPMS_STANDBY: 209 case DRM_MODE_DPMS_SUSPEND: 210 if (gma_crtc->active) 211 break; 212 213 gma_crtc->active = true; 214 215 /* Enable the DPLL */ 216 temp = REG_READ(map->dpll); 217 if ((temp & DPLL_VCO_ENABLE) == 0) { 218 REG_WRITE(map->dpll, temp); 219 REG_READ(map->dpll); 220 /* Wait for the clocks to stabilize. */ 221 udelay(150); 222 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 223 REG_READ(map->dpll); 224 /* Wait for the clocks to stabilize. */ 225 udelay(150); 226 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 227 REG_READ(map->dpll); 228 /* Wait for the clocks to stabilize. */ 229 udelay(150); 230 } 231 232 /* Enable the plane */ 233 temp = REG_READ(map->cntr); 234 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 235 REG_WRITE(map->cntr, 236 temp | DISPLAY_PLANE_ENABLE); 237 /* Flush the plane changes */ 238 REG_WRITE(map->base, REG_READ(map->base)); 239 } 240 241 udelay(150); 242 243 /* Enable the pipe */ 244 temp = REG_READ(map->conf); 245 if ((temp & PIPEACONF_ENABLE) == 0) 246 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE); 247 248 temp = REG_READ(map->status); 249 temp &= ~(0xFFFF); 250 temp |= PIPE_FIFO_UNDERRUN; 251 REG_WRITE(map->status, temp); 252 REG_READ(map->status); 253 254 gma_crtc_load_lut(crtc); 255 256 /* Give the overlay scaler a chance to enable 257 * if it's on this pipe */ 258 /* psb_intel_crtc_dpms_video(crtc, true); TODO */ 259 260 drm_crtc_vblank_on(crtc); 261 break; 262 case DRM_MODE_DPMS_OFF: 263 if (!gma_crtc->active) 264 break; 265 266 gma_crtc->active = false; 267 268 /* Give the overlay scaler a chance to disable 269 * if it's on this pipe */ 270 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ 271 272 /* Disable the VGA plane that we never use */ 273 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 274 275 /* Turn off vblank interrupts */ 276 drm_crtc_vblank_off(crtc); 277 278 /* Wait for vblank for the disable to take effect */ 279 gma_wait_for_vblank(dev); 280 281 /* Disable plane */ 282 temp = REG_READ(map->cntr); 283 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 284 REG_WRITE(map->cntr, 285 temp & ~DISPLAY_PLANE_ENABLE); 286 /* Flush the plane changes */ 287 REG_WRITE(map->base, REG_READ(map->base)); 288 REG_READ(map->base); 289 } 290 291 /* Disable pipe */ 292 temp = REG_READ(map->conf); 293 if ((temp & PIPEACONF_ENABLE) != 0) { 294 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE); 295 REG_READ(map->conf); 296 } 297 298 /* Wait for vblank for the disable to take effect. */ 299 gma_wait_for_vblank(dev); 300 301 udelay(150); 302 303 /* Disable DPLL */ 304 temp = REG_READ(map->dpll); 305 if ((temp & DPLL_VCO_ENABLE) != 0) { 306 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE); 307 REG_READ(map->dpll); 308 } 309 310 /* Wait for the clocks to turn off. */ 311 udelay(150); 312 break; 313 } 314 315 if (IS_CDV(dev)) 316 dev_priv->ops->update_wm(dev, crtc); 317 318 /* Set FIFO watermarks */ 319 REG_WRITE(DSPARB, 0x3F3E); 320 } 321 322 int gma_crtc_cursor_set(struct drm_crtc *crtc, 323 struct drm_file *file_priv, 324 uint32_t handle, 325 uint32_t width, uint32_t height) 326 { 327 struct drm_device *dev = crtc->dev; 328 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 329 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 330 int pipe = gma_crtc->pipe; 331 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 332 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 333 uint32_t temp; 334 size_t addr = 0; 335 struct psb_gem_object *pobj; 336 struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj; 337 struct drm_gem_object *obj; 338 void *tmp_dst; 339 int ret = 0, i, cursor_pages; 340 341 /* If we didn't get a handle then turn the cursor off */ 342 if (!handle) { 343 temp = CURSOR_MODE_DISABLE; 344 if (gma_power_begin(dev, false)) { 345 REG_WRITE(control, temp); 346 REG_WRITE(base, 0); 347 gma_power_end(dev); 348 } 349 350 /* Unpin the old GEM object */ 351 if (gma_crtc->cursor_obj) { 352 pobj = to_psb_gem_object(gma_crtc->cursor_obj); 353 psb_gem_unpin(pobj); 354 drm_gem_object_put(gma_crtc->cursor_obj); 355 gma_crtc->cursor_obj = NULL; 356 } 357 return 0; 358 } 359 360 /* Currently we only support 64x64 cursors */ 361 if (width != 64 || height != 64) { 362 dev_dbg(dev->dev, "We currently only support 64x64 cursors\n"); 363 return -EINVAL; 364 } 365 366 obj = drm_gem_object_lookup(file_priv, handle); 367 if (!obj) { 368 ret = -ENOENT; 369 goto unlock; 370 } 371 372 if (obj->size < width * height * 4) { 373 dev_dbg(dev->dev, "Buffer is too small\n"); 374 ret = -ENOMEM; 375 goto unref_cursor; 376 } 377 378 pobj = to_psb_gem_object(obj); 379 380 /* Pin the memory into the GTT */ 381 ret = psb_gem_pin(pobj); 382 if (ret) { 383 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 384 goto unref_cursor; 385 } 386 387 if (dev_priv->ops->cursor_needs_phys) { 388 if (!cursor_pobj) { 389 dev_err(dev->dev, "No hardware cursor mem available"); 390 ret = -ENOMEM; 391 goto unref_cursor; 392 } 393 394 /* Prevent overflow */ 395 if (pobj->npage > 4) 396 cursor_pages = 4; 397 else 398 cursor_pages = pobj->npage; 399 400 /* Copy the cursor to cursor mem */ 401 tmp_dst = dev_priv->vram_addr + cursor_pobj->offset; 402 for (i = 0; i < cursor_pages; i++) { 403 memcpy_from_page(tmp_dst, pobj->pages[i], 0, PAGE_SIZE); 404 tmp_dst += PAGE_SIZE; 405 } 406 407 addr = gma_crtc->cursor_addr; 408 } else { 409 addr = pobj->offset; 410 gma_crtc->cursor_addr = addr; 411 } 412 413 temp = 0; 414 /* set the pipe for the cursor */ 415 temp |= (pipe << 28); 416 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 417 418 if (gma_power_begin(dev, false)) { 419 REG_WRITE(control, temp); 420 REG_WRITE(base, addr); 421 gma_power_end(dev); 422 } 423 424 /* unpin the old bo */ 425 if (gma_crtc->cursor_obj) { 426 pobj = to_psb_gem_object(gma_crtc->cursor_obj); 427 psb_gem_unpin(pobj); 428 drm_gem_object_put(gma_crtc->cursor_obj); 429 } 430 431 gma_crtc->cursor_obj = obj; 432 unlock: 433 return ret; 434 435 unref_cursor: 436 drm_gem_object_put(obj); 437 return ret; 438 } 439 440 int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 441 { 442 struct drm_device *dev = crtc->dev; 443 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 444 int pipe = gma_crtc->pipe; 445 uint32_t temp = 0; 446 uint32_t addr; 447 448 if (x < 0) { 449 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); 450 x = -x; 451 } 452 if (y < 0) { 453 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); 454 y = -y; 455 } 456 457 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); 458 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); 459 460 addr = gma_crtc->cursor_addr; 461 462 if (gma_power_begin(dev, false)) { 463 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); 464 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr); 465 gma_power_end(dev); 466 } 467 return 0; 468 } 469 470 void gma_crtc_prepare(struct drm_crtc *crtc) 471 { 472 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 473 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 474 } 475 476 void gma_crtc_commit(struct drm_crtc *crtc) 477 { 478 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 479 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 480 } 481 482 void gma_crtc_disable(struct drm_crtc *crtc) 483 { 484 struct psb_gem_object *pobj; 485 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 486 487 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 488 489 if (crtc->primary->fb) { 490 pobj = to_psb_gem_object(crtc->primary->fb->obj[0]); 491 psb_gem_unpin(pobj); 492 } 493 } 494 495 void gma_crtc_destroy(struct drm_crtc *crtc) 496 { 497 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 498 499 if (gma_crtc->cursor_pobj) 500 drm_gem_object_put(&gma_crtc->cursor_pobj->base); 501 502 kfree(gma_crtc->crtc_state); 503 drm_crtc_cleanup(crtc); 504 kfree(gma_crtc); 505 } 506 507 int gma_crtc_page_flip(struct drm_crtc *crtc, 508 struct drm_framebuffer *fb, 509 struct drm_pending_vblank_event *event, 510 uint32_t page_flip_flags, 511 struct drm_modeset_acquire_ctx *ctx) 512 { 513 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 514 struct drm_framebuffer *current_fb = crtc->primary->fb; 515 struct drm_framebuffer *old_fb = crtc->primary->old_fb; 516 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 517 struct drm_device *dev = crtc->dev; 518 unsigned long flags; 519 int ret; 520 521 if (!crtc_funcs->mode_set_base) 522 return -EINVAL; 523 524 /* Using mode_set_base requires the new fb to be set already. */ 525 crtc->primary->fb = fb; 526 527 if (event) { 528 spin_lock_irqsave(&dev->event_lock, flags); 529 530 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 531 532 gma_crtc->page_flip_event = event; 533 534 /* Call this locked if we want an event at vblank interrupt. */ 535 ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); 536 if (ret) { 537 gma_crtc->page_flip_event = NULL; 538 drm_crtc_vblank_put(crtc); 539 } 540 541 spin_unlock_irqrestore(&dev->event_lock, flags); 542 } else { 543 ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); 544 } 545 546 /* Restore previous fb in case of failure. */ 547 if (ret) 548 crtc->primary->fb = current_fb; 549 550 return ret; 551 } 552 553 int gma_crtc_set_config(struct drm_mode_set *set, 554 struct drm_modeset_acquire_ctx *ctx) 555 { 556 struct drm_device *dev = set->crtc->dev; 557 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 558 int ret; 559 560 if (!dev_priv->rpm_enabled) 561 return drm_crtc_helper_set_config(set, ctx); 562 563 pm_runtime_forbid(dev->dev); 564 ret = drm_crtc_helper_set_config(set, ctx); 565 pm_runtime_allow(dev->dev); 566 567 return ret; 568 } 569 570 /* 571 * Save HW states of given crtc 572 */ 573 void gma_crtc_save(struct drm_crtc *crtc) 574 { 575 struct drm_device *dev = crtc->dev; 576 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 577 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 578 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state; 579 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 580 uint32_t palette_reg; 581 int i; 582 583 if (!crtc_state) { 584 dev_err(dev->dev, "No CRTC state found\n"); 585 return; 586 } 587 588 crtc_state->saveDSPCNTR = REG_READ(map->cntr); 589 crtc_state->savePIPECONF = REG_READ(map->conf); 590 crtc_state->savePIPESRC = REG_READ(map->src); 591 crtc_state->saveFP0 = REG_READ(map->fp0); 592 crtc_state->saveFP1 = REG_READ(map->fp1); 593 crtc_state->saveDPLL = REG_READ(map->dpll); 594 crtc_state->saveHTOTAL = REG_READ(map->htotal); 595 crtc_state->saveHBLANK = REG_READ(map->hblank); 596 crtc_state->saveHSYNC = REG_READ(map->hsync); 597 crtc_state->saveVTOTAL = REG_READ(map->vtotal); 598 crtc_state->saveVBLANK = REG_READ(map->vblank); 599 crtc_state->saveVSYNC = REG_READ(map->vsync); 600 crtc_state->saveDSPSTRIDE = REG_READ(map->stride); 601 602 /* NOTE: DSPSIZE DSPPOS only for psb */ 603 crtc_state->saveDSPSIZE = REG_READ(map->size); 604 crtc_state->saveDSPPOS = REG_READ(map->pos); 605 606 crtc_state->saveDSPBASE = REG_READ(map->base); 607 608 palette_reg = map->palette; 609 for (i = 0; i < 256; ++i) 610 crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2)); 611 } 612 613 /* 614 * Restore HW states of given crtc 615 */ 616 void gma_crtc_restore(struct drm_crtc *crtc) 617 { 618 struct drm_device *dev = crtc->dev; 619 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 620 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 621 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state; 622 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 623 uint32_t palette_reg; 624 int i; 625 626 if (!crtc_state) { 627 dev_err(dev->dev, "No crtc state\n"); 628 return; 629 } 630 631 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { 632 REG_WRITE(map->dpll, 633 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); 634 REG_READ(map->dpll); 635 udelay(150); 636 } 637 638 REG_WRITE(map->fp0, crtc_state->saveFP0); 639 REG_READ(map->fp0); 640 641 REG_WRITE(map->fp1, crtc_state->saveFP1); 642 REG_READ(map->fp1); 643 644 REG_WRITE(map->dpll, crtc_state->saveDPLL); 645 REG_READ(map->dpll); 646 udelay(150); 647 648 REG_WRITE(map->htotal, crtc_state->saveHTOTAL); 649 REG_WRITE(map->hblank, crtc_state->saveHBLANK); 650 REG_WRITE(map->hsync, crtc_state->saveHSYNC); 651 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL); 652 REG_WRITE(map->vblank, crtc_state->saveVBLANK); 653 REG_WRITE(map->vsync, crtc_state->saveVSYNC); 654 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE); 655 656 REG_WRITE(map->size, crtc_state->saveDSPSIZE); 657 REG_WRITE(map->pos, crtc_state->saveDSPPOS); 658 659 REG_WRITE(map->src, crtc_state->savePIPESRC); 660 REG_WRITE(map->base, crtc_state->saveDSPBASE); 661 REG_WRITE(map->conf, crtc_state->savePIPECONF); 662 663 gma_wait_for_vblank(dev); 664 665 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR); 666 REG_WRITE(map->base, crtc_state->saveDSPBASE); 667 668 gma_wait_for_vblank(dev); 669 670 palette_reg = map->palette; 671 for (i = 0; i < 256; ++i) 672 REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]); 673 } 674 675 void gma_encoder_prepare(struct drm_encoder *encoder) 676 { 677 const struct drm_encoder_helper_funcs *encoder_funcs = 678 encoder->helper_private; 679 /* lvds has its own version of prepare see psb_intel_lvds_prepare */ 680 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 681 } 682 683 void gma_encoder_commit(struct drm_encoder *encoder) 684 { 685 const struct drm_encoder_helper_funcs *encoder_funcs = 686 encoder->helper_private; 687 /* lvds has its own version of commit see psb_intel_lvds_commit */ 688 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 689 } 690 691 void gma_encoder_destroy(struct drm_encoder *encoder) 692 { 693 struct gma_encoder *intel_encoder = to_gma_encoder(encoder); 694 695 drm_encoder_cleanup(encoder); 696 kfree(intel_encoder); 697 } 698 699 /* Currently there is only a 1:1 mapping of encoders and connectors */ 700 struct drm_encoder *gma_best_encoder(struct drm_connector *connector) 701 { 702 struct gma_encoder *gma_encoder = gma_attached_encoder(connector); 703 704 return &gma_encoder->base; 705 } 706 707 void gma_connector_attach_encoder(struct gma_connector *connector, 708 struct gma_encoder *encoder) 709 { 710 connector->encoder = encoder; 711 drm_connector_attach_encoder(&connector->base, 712 &encoder->base); 713 } 714 715 #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; } 716 717 bool gma_pll_is_valid(struct drm_crtc *crtc, 718 const struct gma_limit_t *limit, 719 struct gma_clock_t *clock) 720 { 721 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 722 GMA_PLL_INVALID("p1 out of range"); 723 if (clock->p < limit->p.min || limit->p.max < clock->p) 724 GMA_PLL_INVALID("p out of range"); 725 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 726 GMA_PLL_INVALID("m2 out of range"); 727 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 728 GMA_PLL_INVALID("m1 out of range"); 729 /* On CDV m1 is always 0 */ 730 if (clock->m1 <= clock->m2 && clock->m1 != 0) 731 GMA_PLL_INVALID("m1 <= m2 && m1 != 0"); 732 if (clock->m < limit->m.min || limit->m.max < clock->m) 733 GMA_PLL_INVALID("m out of range"); 734 if (clock->n < limit->n.min || limit->n.max < clock->n) 735 GMA_PLL_INVALID("n out of range"); 736 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 737 GMA_PLL_INVALID("vco out of range"); 738 /* XXX: We may need to be checking "Dot clock" 739 * depending on the multiplier, connector, etc., 740 * rather than just a single range. 741 */ 742 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 743 GMA_PLL_INVALID("dot out of range"); 744 745 return true; 746 } 747 748 bool gma_find_best_pll(const struct gma_limit_t *limit, 749 struct drm_crtc *crtc, int target, int refclk, 750 struct gma_clock_t *best_clock) 751 { 752 struct drm_device *dev = crtc->dev; 753 const struct gma_clock_funcs *clock_funcs = 754 to_gma_crtc(crtc)->clock_funcs; 755 struct gma_clock_t clock; 756 int err = target; 757 758 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 759 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { 760 /* 761 * For LVDS, if the panel is on, just rely on its current 762 * settings for dual-channel. We haven't figured out how to 763 * reliably set up different single/dual channel state, if we 764 * even can. 765 */ 766 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 767 LVDS_CLKB_POWER_UP) 768 clock.p2 = limit->p2.p2_fast; 769 else 770 clock.p2 = limit->p2.p2_slow; 771 } else { 772 if (target < limit->p2.dot_limit) 773 clock.p2 = limit->p2.p2_slow; 774 else 775 clock.p2 = limit->p2.p2_fast; 776 } 777 778 memset(best_clock, 0, sizeof(*best_clock)); 779 780 /* m1 is always 0 on CDV so the outmost loop will run just once */ 781 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 782 for (clock.m2 = limit->m2.min; 783 (clock.m2 < clock.m1 || clock.m1 == 0) && 784 clock.m2 <= limit->m2.max; clock.m2++) { 785 for (clock.n = limit->n.min; 786 clock.n <= limit->n.max; clock.n++) { 787 for (clock.p1 = limit->p1.min; 788 clock.p1 <= limit->p1.max; 789 clock.p1++) { 790 int this_err; 791 792 clock_funcs->clock(refclk, &clock); 793 794 if (!clock_funcs->pll_is_valid(crtc, 795 limit, &clock)) 796 continue; 797 798 this_err = abs(clock.dot - target); 799 if (this_err < err) { 800 *best_clock = clock; 801 err = this_err; 802 } 803 } 804 } 805 } 806 } 807 808 return err != target; 809 } 810