1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dma-resv.h> 28 #include <linux/i2c.h> 29 #include <linux/input.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 #include <linux/string_helpers.h> 34 35 #include <drm/display/drm_dp_helper.h> 36 #include <drm/display/drm_dp_tunnel.h> 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_atomic_uapi.h> 40 #include <drm/drm_damage_helper.h> 41 #include <drm/drm_edid.h> 42 #include <drm/drm_fourcc.h> 43 #include <drm/drm_probe_helper.h> 44 #include <drm/drm_rect.h> 45 46 #include "gem/i915_gem_lmem.h" 47 #include "gem/i915_gem_object.h" 48 49 #include "g4x_dp.h" 50 #include "g4x_hdmi.h" 51 #include "hsw_ips.h" 52 #include "i915_config.h" 53 #include "i915_drv.h" 54 #include "i915_reg.h" 55 #include "i915_utils.h" 56 #include "i9xx_plane.h" 57 #include "i9xx_wm.h" 58 #include "intel_atomic.h" 59 #include "intel_atomic_plane.h" 60 #include "intel_audio.h" 61 #include "intel_bw.h" 62 #include "intel_cdclk.h" 63 #include "intel_clock_gating.h" 64 #include "intel_color.h" 65 #include "intel_crt.h" 66 #include "intel_crtc.h" 67 #include "intel_crtc_state_dump.h" 68 #include "intel_ddi.h" 69 #include "intel_de.h" 70 #include "intel_display_driver.h" 71 #include "intel_display_power.h" 72 #include "intel_display_types.h" 73 #include "intel_dmc.h" 74 #include "intel_dp.h" 75 #include "intel_dp_link_training.h" 76 #include "intel_dp_mst.h" 77 #include "intel_dp_tunnel.h" 78 #include "intel_dpll.h" 79 #include "intel_dpll_mgr.h" 80 #include "intel_dpt.h" 81 #include "intel_dpt_common.h" 82 #include "intel_drrs.h" 83 #include "intel_dsb.h" 84 #include "intel_dsi.h" 85 #include "intel_dvo.h" 86 #include "intel_fb.h" 87 #include "intel_fbc.h" 88 #include "intel_fbdev.h" 89 #include "intel_fdi.h" 90 #include "intel_fifo_underrun.h" 91 #include "intel_frontbuffer.h" 92 #include "intel_hdmi.h" 93 #include "intel_hotplug.h" 94 #include "intel_link_bw.h" 95 #include "intel_lvds.h" 96 #include "intel_lvds_regs.h" 97 #include "intel_modeset_setup.h" 98 #include "intel_modeset_verify.h" 99 #include "intel_overlay.h" 100 #include "intel_panel.h" 101 #include "intel_pch_display.h" 102 #include "intel_pch_refclk.h" 103 #include "intel_pcode.h" 104 #include "intel_pipe_crc.h" 105 #include "intel_plane_initial.h" 106 #include "intel_pmdemand.h" 107 #include "intel_pps.h" 108 #include "intel_psr.h" 109 #include "intel_psr_regs.h" 110 #include "intel_sdvo.h" 111 #include "intel_snps_phy.h" 112 #include "intel_tc.h" 113 #include "intel_tv.h" 114 #include "intel_vblank.h" 115 #include "intel_vdsc.h" 116 #include "intel_vdsc_regs.h" 117 #include "intel_vga.h" 118 #include "intel_vrr.h" 119 #include "intel_wm.h" 120 #include "skl_scaler.h" 121 #include "skl_universal_plane.h" 122 #include "skl_watermark.h" 123 #include "vlv_dsi.h" 124 #include "vlv_dsi_pll.h" 125 #include "vlv_dsi_regs.h" 126 #include "vlv_sideband.h" 127 128 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 129 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 130 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 131 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); 132 133 /* returns HPLL frequency in kHz */ 134 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 135 { 136 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 137 138 /* Obtain SKU information */ 139 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 140 CCK_FUSE_HPLL_FREQ_MASK; 141 142 return vco_freq[hpll_freq] * 1000; 143 } 144 145 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 146 const char *name, u32 reg, int ref_freq) 147 { 148 u32 val; 149 int divider; 150 151 val = vlv_cck_read(dev_priv, reg); 152 divider = val & CCK_FREQUENCY_VALUES; 153 154 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 155 (divider << CCK_FREQUENCY_STATUS_SHIFT), 156 "%s change in progress\n", name); 157 158 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 159 } 160 161 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 162 const char *name, u32 reg) 163 { 164 int hpll; 165 166 vlv_cck_get(dev_priv); 167 168 if (dev_priv->hpll_freq == 0) 169 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 170 171 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 172 173 vlv_cck_put(dev_priv); 174 175 return hpll; 176 } 177 178 void intel_update_czclk(struct drm_i915_private *dev_priv) 179 { 180 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 181 return; 182 183 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 184 CCK_CZ_CLOCK_CONTROL); 185 186 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 187 dev_priv->czclk_freq); 188 } 189 190 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 191 { 192 return (crtc_state->active_planes & 193 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 194 } 195 196 /* WA Display #0827: Gen9:all */ 197 static void 198 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 199 { 200 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 201 DUPS1_GATING_DIS | DUPS2_GATING_DIS, 202 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); 203 } 204 205 /* Wa_2006604312:icl,ehl */ 206 static void 207 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 208 bool enable) 209 { 210 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 211 DPFR_GATING_DIS, 212 enable ? DPFR_GATING_DIS : 0); 213 } 214 215 /* Wa_1604331009:icl,jsl,ehl */ 216 static void 217 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 218 bool enable) 219 { 220 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 221 CURSOR_GATING_DIS, 222 enable ? CURSOR_GATING_DIS : 0); 223 } 224 225 static bool 226 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 227 { 228 return crtc_state->master_transcoder != INVALID_TRANSCODER; 229 } 230 231 bool 232 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 233 { 234 return crtc_state->sync_mode_slaves_mask != 0; 235 } 236 237 bool 238 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 239 { 240 return is_trans_port_sync_master(crtc_state) || 241 is_trans_port_sync_slave(crtc_state); 242 } 243 244 static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state) 245 { 246 return ffs(crtc_state->bigjoiner_pipes) - 1; 247 } 248 249 u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state) 250 { 251 if (crtc_state->bigjoiner_pipes) 252 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state)); 253 else 254 return 0; 255 } 256 257 bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state) 258 { 259 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 260 261 return crtc_state->bigjoiner_pipes && 262 crtc->pipe != bigjoiner_master_pipe(crtc_state); 263 } 264 265 bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state) 266 { 267 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 268 269 return crtc_state->bigjoiner_pipes && 270 crtc->pipe == bigjoiner_master_pipe(crtc_state); 271 } 272 273 static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state) 274 { 275 return hweight8(crtc_state->bigjoiner_pipes); 276 } 277 278 struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) 279 { 280 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 281 282 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 283 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state)); 284 else 285 return to_intel_crtc(crtc_state->uapi.crtc); 286 } 287 288 static void 289 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 290 { 291 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 292 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 293 294 if (DISPLAY_VER(dev_priv) >= 4) { 295 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 296 297 /* Wait for the Pipe State to go off */ 298 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder), 299 TRANSCONF_STATE_ENABLE, 100)) 300 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); 301 } else { 302 intel_wait_for_pipe_scanline_stopped(crtc); 303 } 304 } 305 306 void assert_transcoder(struct drm_i915_private *dev_priv, 307 enum transcoder cpu_transcoder, bool state) 308 { 309 bool cur_state; 310 enum intel_display_power_domain power_domain; 311 intel_wakeref_t wakeref; 312 313 /* we keep both pipes enabled on 830 */ 314 if (IS_I830(dev_priv)) 315 state = true; 316 317 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 318 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 319 if (wakeref) { 320 u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 321 cur_state = !!(val & TRANSCONF_ENABLE); 322 323 intel_display_power_put(dev_priv, power_domain, wakeref); 324 } else { 325 cur_state = false; 326 } 327 328 I915_STATE_WARN(dev_priv, cur_state != state, 329 "transcoder %s assertion failure (expected %s, current %s)\n", 330 transcoder_name(cpu_transcoder), str_on_off(state), 331 str_on_off(cur_state)); 332 } 333 334 static void assert_plane(struct intel_plane *plane, bool state) 335 { 336 struct drm_i915_private *i915 = to_i915(plane->base.dev); 337 enum pipe pipe; 338 bool cur_state; 339 340 cur_state = plane->get_hw_state(plane, &pipe); 341 342 I915_STATE_WARN(i915, cur_state != state, 343 "%s assertion failure (expected %s, current %s)\n", 344 plane->base.name, str_on_off(state), 345 str_on_off(cur_state)); 346 } 347 348 #define assert_plane_enabled(p) assert_plane(p, true) 349 #define assert_plane_disabled(p) assert_plane(p, false) 350 351 static void assert_planes_disabled(struct intel_crtc *crtc) 352 { 353 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 354 struct intel_plane *plane; 355 356 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 357 assert_plane_disabled(plane); 358 } 359 360 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 361 struct intel_digital_port *dig_port, 362 unsigned int expected_mask) 363 { 364 u32 port_mask; 365 i915_reg_t dpll_reg; 366 367 switch (dig_port->base.port) { 368 default: 369 MISSING_CASE(dig_port->base.port); 370 fallthrough; 371 case PORT_B: 372 port_mask = DPLL_PORTB_READY_MASK; 373 dpll_reg = DPLL(0); 374 break; 375 case PORT_C: 376 port_mask = DPLL_PORTC_READY_MASK; 377 dpll_reg = DPLL(0); 378 expected_mask <<= 4; 379 break; 380 case PORT_D: 381 port_mask = DPLL_PORTD_READY_MASK; 382 dpll_reg = DPIO_PHY_STATUS; 383 break; 384 } 385 386 if (intel_de_wait_for_register(dev_priv, dpll_reg, 387 port_mask, expected_mask, 1000)) 388 drm_WARN(&dev_priv->drm, 1, 389 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 390 dig_port->base.base.base.id, dig_port->base.base.name, 391 intel_de_read(dev_priv, dpll_reg) & port_mask, 392 expected_mask); 393 } 394 395 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 396 { 397 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 398 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 399 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 400 enum pipe pipe = crtc->pipe; 401 u32 val; 402 403 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 404 405 assert_planes_disabled(crtc); 406 407 /* 408 * A pipe without a PLL won't actually be able to drive bits from 409 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 410 * need the check. 411 */ 412 if (HAS_GMCH(dev_priv)) { 413 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 414 assert_dsi_pll_enabled(dev_priv); 415 else 416 assert_pll_enabled(dev_priv, pipe); 417 } else { 418 if (new_crtc_state->has_pch_encoder) { 419 /* if driving the PCH, we need FDI enabled */ 420 assert_fdi_rx_pll_enabled(dev_priv, 421 intel_crtc_pch_transcoder(crtc)); 422 assert_fdi_tx_pll_enabled(dev_priv, 423 (enum pipe) cpu_transcoder); 424 } 425 /* FIXME: assert CPU port conditions for SNB+ */ 426 } 427 428 /* Wa_22012358565:adl-p */ 429 if (DISPLAY_VER(dev_priv) == 13) 430 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 431 0, PIPE_ARB_USE_PROG_SLOTS); 432 433 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 434 if (val & TRANSCONF_ENABLE) { 435 /* we keep both pipes enabled on 830 */ 436 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 437 return; 438 } 439 440 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), 441 val | TRANSCONF_ENABLE); 442 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 443 444 /* 445 * Until the pipe starts PIPEDSL reads will return a stale value, 446 * which causes an apparent vblank timestamp jump when PIPEDSL 447 * resets to its proper value. That also messes up the frame count 448 * when it's derived from the timestamps. So let's wait for the 449 * pipe to start properly before we call drm_crtc_vblank_on() 450 */ 451 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 452 intel_wait_for_pipe_scanline_moving(crtc); 453 } 454 455 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 456 { 457 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 458 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 459 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 460 enum pipe pipe = crtc->pipe; 461 u32 val; 462 463 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 464 465 /* 466 * Make sure planes won't keep trying to pump pixels to us, 467 * or we might hang the display. 468 */ 469 assert_planes_disabled(crtc); 470 471 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 472 if ((val & TRANSCONF_ENABLE) == 0) 473 return; 474 475 /* 476 * Double wide has implications for planes 477 * so best keep it disabled when not needed. 478 */ 479 if (old_crtc_state->double_wide) 480 val &= ~TRANSCONF_DOUBLE_WIDE; 481 482 /* Don't disable pipe or pipe PLLs if needed */ 483 if (!IS_I830(dev_priv)) 484 val &= ~TRANSCONF_ENABLE; 485 486 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 487 488 if (DISPLAY_VER(dev_priv) >= 12) 489 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), 490 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 491 492 if ((val & TRANSCONF_ENABLE) == 0) 493 intel_wait_for_pipe_off(old_crtc_state); 494 } 495 496 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 497 { 498 unsigned int size = 0; 499 int i; 500 501 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 502 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; 503 504 return size; 505 } 506 507 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 508 { 509 unsigned int size = 0; 510 int i; 511 512 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 513 unsigned int plane_size; 514 515 if (rem_info->plane[i].linear) 516 plane_size = rem_info->plane[i].size; 517 else 518 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; 519 520 if (plane_size == 0) 521 continue; 522 523 if (rem_info->plane_alignment) 524 size = ALIGN(size, rem_info->plane_alignment); 525 526 size += plane_size; 527 } 528 529 return size; 530 } 531 532 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 533 { 534 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 535 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 536 537 return DISPLAY_VER(dev_priv) < 4 || 538 (plane->fbc && 539 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); 540 } 541 542 /* 543 * Convert the x/y offsets into a linear offset. 544 * Only valid with 0/180 degree rotation, which is fine since linear 545 * offset is only used with linear buffers on pre-hsw and tiled buffers 546 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 547 */ 548 u32 intel_fb_xy_to_linear(int x, int y, 549 const struct intel_plane_state *state, 550 int color_plane) 551 { 552 const struct drm_framebuffer *fb = state->hw.fb; 553 unsigned int cpp = fb->format->cpp[color_plane]; 554 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; 555 556 return y * pitch + x * cpp; 557 } 558 559 /* 560 * Add the x/y offsets derived from fb->offsets[] to the user 561 * specified plane src x/y offsets. The resulting x/y offsets 562 * specify the start of scanout from the beginning of the gtt mapping. 563 */ 564 void intel_add_fb_offsets(int *x, int *y, 565 const struct intel_plane_state *state, 566 int color_plane) 567 568 { 569 *x += state->view.color_plane[color_plane].x; 570 *y += state->view.color_plane[color_plane].y; 571 } 572 573 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 574 u32 pixel_format, u64 modifier) 575 { 576 struct intel_crtc *crtc; 577 struct intel_plane *plane; 578 579 if (!HAS_DISPLAY(dev_priv)) 580 return 0; 581 582 /* 583 * We assume the primary plane for pipe A has 584 * the highest stride limits of them all, 585 * if in case pipe A is disabled, use the first pipe from pipe_mask. 586 */ 587 crtc = intel_first_crtc(dev_priv); 588 if (!crtc) 589 return 0; 590 591 plane = to_intel_plane(crtc->base.primary); 592 593 return plane->max_stride(plane, pixel_format, modifier, 594 DRM_MODE_ROTATE_0); 595 } 596 597 void intel_set_plane_visible(struct intel_crtc_state *crtc_state, 598 struct intel_plane_state *plane_state, 599 bool visible) 600 { 601 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 602 603 plane_state->uapi.visible = visible; 604 605 if (visible) 606 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 607 else 608 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 609 } 610 611 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) 612 { 613 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 614 struct drm_plane *plane; 615 616 /* 617 * Active_planes aliases if multiple "primary" or cursor planes 618 * have been used on the same (or wrong) pipe. plane_mask uses 619 * unique ids, hence we can use that to reconstruct active_planes. 620 */ 621 crtc_state->enabled_planes = 0; 622 crtc_state->active_planes = 0; 623 624 drm_for_each_plane_mask(plane, &dev_priv->drm, 625 crtc_state->uapi.plane_mask) { 626 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 627 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 628 } 629 } 630 631 void intel_plane_disable_noatomic(struct intel_crtc *crtc, 632 struct intel_plane *plane) 633 { 634 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 635 struct intel_crtc_state *crtc_state = 636 to_intel_crtc_state(crtc->base.state); 637 struct intel_plane_state *plane_state = 638 to_intel_plane_state(plane->base.state); 639 640 drm_dbg_kms(&dev_priv->drm, 641 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 642 plane->base.base.id, plane->base.name, 643 crtc->base.base.id, crtc->base.name); 644 645 intel_set_plane_visible(crtc_state, plane_state, false); 646 intel_plane_fixup_bitmasks(crtc_state); 647 crtc_state->data_rate[plane->id] = 0; 648 crtc_state->data_rate_y[plane->id] = 0; 649 crtc_state->rel_data_rate[plane->id] = 0; 650 crtc_state->rel_data_rate_y[plane->id] = 0; 651 crtc_state->min_cdclk[plane->id] = 0; 652 653 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 654 hsw_ips_disable(crtc_state)) { 655 crtc_state->ips_enabled = false; 656 intel_crtc_wait_for_next_vblank(crtc); 657 } 658 659 /* 660 * Vblank time updates from the shadow to live plane control register 661 * are blocked if the memory self-refresh mode is active at that 662 * moment. So to make sure the plane gets truly disabled, disable 663 * first the self-refresh mode. The self-refresh enable bit in turn 664 * will be checked/applied by the HW only at the next frame start 665 * event which is after the vblank start event, so we need to have a 666 * wait-for-vblank between disabling the plane and the pipe. 667 */ 668 if (HAS_GMCH(dev_priv) && 669 intel_set_memory_cxsr(dev_priv, false)) 670 intel_crtc_wait_for_next_vblank(crtc); 671 672 /* 673 * Gen2 reports pipe underruns whenever all planes are disabled. 674 * So disable underrun reporting before all the planes get disabled. 675 */ 676 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) 677 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 678 679 intel_plane_disable_arm(plane, crtc_state); 680 intel_crtc_wait_for_next_vblank(crtc); 681 } 682 683 unsigned int 684 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 685 { 686 int x = 0, y = 0; 687 688 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 689 plane_state->view.color_plane[0].offset, 0); 690 691 return y; 692 } 693 694 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 695 { 696 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 697 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 698 enum pipe pipe = crtc->pipe; 699 u32 tmp; 700 701 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 702 703 /* 704 * Display WA #1153: icl 705 * enable hardware to bypass the alpha math 706 * and rounding for per-pixel values 00 and 0xff 707 */ 708 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 709 /* 710 * Display WA # 1605353570: icl 711 * Set the pixel rounding bit to 1 for allowing 712 * passthrough of Frame buffer pixels unmodified 713 * across pipe 714 */ 715 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 716 717 /* 718 * Underrun recovery must always be disabled on display 13+. 719 * DG2 chicken bit meaning is inverted compared to other platforms. 720 */ 721 if (IS_DG2(dev_priv)) 722 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 723 else if (DISPLAY_VER(dev_priv) >= 13) 724 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 725 726 /* Wa_14010547955:dg2 */ 727 if (IS_DG2(dev_priv)) 728 tmp |= DG2_RENDER_CCSTAG_4_3_EN; 729 730 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 731 } 732 733 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 734 { 735 struct drm_crtc *crtc; 736 bool cleanup_done; 737 738 drm_for_each_crtc(crtc, &dev_priv->drm) { 739 struct drm_crtc_commit *commit; 740 spin_lock(&crtc->commit_lock); 741 commit = list_first_entry_or_null(&crtc->commit_list, 742 struct drm_crtc_commit, commit_entry); 743 cleanup_done = commit ? 744 try_wait_for_completion(&commit->cleanup_done) : true; 745 spin_unlock(&crtc->commit_lock); 746 747 if (cleanup_done) 748 continue; 749 750 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 751 752 return true; 753 } 754 755 return false; 756 } 757 758 /* 759 * Finds the encoder associated with the given CRTC. This can only be 760 * used when we know that the CRTC isn't feeding multiple encoders! 761 */ 762 struct intel_encoder * 763 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 764 const struct intel_crtc_state *crtc_state) 765 { 766 const struct drm_connector_state *connector_state; 767 const struct drm_connector *connector; 768 struct intel_encoder *encoder = NULL; 769 struct intel_crtc *master_crtc; 770 int num_encoders = 0; 771 int i; 772 773 master_crtc = intel_master_crtc(crtc_state); 774 775 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 776 if (connector_state->crtc != &master_crtc->base) 777 continue; 778 779 encoder = to_intel_encoder(connector_state->best_encoder); 780 num_encoders++; 781 } 782 783 drm_WARN(state->base.dev, num_encoders != 1, 784 "%d encoders for pipe %c\n", 785 num_encoders, pipe_name(master_crtc->pipe)); 786 787 return encoder; 788 } 789 790 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 791 { 792 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 793 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 794 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 795 enum pipe pipe = crtc->pipe; 796 int width = drm_rect_width(dst); 797 int height = drm_rect_height(dst); 798 int x = dst->x1; 799 int y = dst->y1; 800 801 if (!crtc_state->pch_pfit.enabled) 802 return; 803 804 /* Force use of hard-coded filter coefficients 805 * as some pre-programmed values are broken, 806 * e.g. x201. 807 */ 808 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 809 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 810 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 811 else 812 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 813 PF_FILTER_MED_3x3); 814 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 815 PF_WIN_XPOS(x) | PF_WIN_YPOS(y)); 816 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 817 PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height)); 818 } 819 820 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 821 { 822 if (crtc->overlay) 823 (void) intel_overlay_switch_off(crtc->overlay); 824 825 /* Let userspace switch the overlay on again. In most cases userspace 826 * has to recompute where to put it anyway. 827 */ 828 } 829 830 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 831 { 832 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 833 834 if (!crtc_state->nv12_planes) 835 return false; 836 837 /* WA Display #0827: Gen9:all */ 838 if (DISPLAY_VER(dev_priv) == 9) 839 return true; 840 841 return false; 842 } 843 844 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 845 { 846 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 847 848 /* Wa_2006604312:icl,ehl */ 849 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) 850 return true; 851 852 return false; 853 } 854 855 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 856 { 857 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 858 859 /* Wa_1604331009:icl,jsl,ehl */ 860 if (is_hdr_mode(crtc_state) && 861 crtc_state->active_planes & BIT(PLANE_CURSOR) && 862 DISPLAY_VER(dev_priv) == 11) 863 return true; 864 865 return false; 866 } 867 868 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, 869 enum pipe pipe, bool enable) 870 { 871 if (DISPLAY_VER(i915) == 9) { 872 /* 873 * "Plane N strech max must be programmed to 11b (x1) 874 * when Async flips are enabled on that plane." 875 */ 876 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 877 SKL_PLANE1_STRETCH_MAX_MASK, 878 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 879 } else { 880 /* Also needed on HSW/BDW albeit undocumented */ 881 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 882 HSW_PRI_STRETCH_MAX_MASK, 883 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 884 } 885 } 886 887 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 888 { 889 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 890 891 return crtc_state->uapi.async_flip && i915_vtd_active(i915) && 892 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); 893 } 894 895 static void intel_encoders_audio_enable(struct intel_atomic_state *state, 896 struct intel_crtc *crtc) 897 { 898 const struct intel_crtc_state *crtc_state = 899 intel_atomic_get_new_crtc_state(state, crtc); 900 const struct drm_connector_state *conn_state; 901 struct drm_connector *conn; 902 int i; 903 904 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 905 struct intel_encoder *encoder = 906 to_intel_encoder(conn_state->best_encoder); 907 908 if (conn_state->crtc != &crtc->base) 909 continue; 910 911 if (encoder->audio_enable) 912 encoder->audio_enable(encoder, crtc_state, conn_state); 913 } 914 } 915 916 static void intel_encoders_audio_disable(struct intel_atomic_state *state, 917 struct intel_crtc *crtc) 918 { 919 const struct intel_crtc_state *old_crtc_state = 920 intel_atomic_get_old_crtc_state(state, crtc); 921 const struct drm_connector_state *old_conn_state; 922 struct drm_connector *conn; 923 int i; 924 925 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 926 struct intel_encoder *encoder = 927 to_intel_encoder(old_conn_state->best_encoder); 928 929 if (old_conn_state->crtc != &crtc->base) 930 continue; 931 932 if (encoder->audio_disable) 933 encoder->audio_disable(encoder, old_crtc_state, old_conn_state); 934 } 935 } 936 937 #define is_enabling(feature, old_crtc_state, new_crtc_state) \ 938 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ 939 (new_crtc_state)->feature) 940 #define is_disabling(feature, old_crtc_state, new_crtc_state) \ 941 ((old_crtc_state)->feature && \ 942 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) 943 944 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 945 const struct intel_crtc_state *new_crtc_state) 946 { 947 if (!new_crtc_state->hw.active) 948 return false; 949 950 return is_enabling(active_planes, old_crtc_state, new_crtc_state); 951 } 952 953 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 954 const struct intel_crtc_state *new_crtc_state) 955 { 956 if (!old_crtc_state->hw.active) 957 return false; 958 959 return is_disabling(active_planes, old_crtc_state, new_crtc_state); 960 } 961 962 static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, 963 const struct intel_crtc_state *new_crtc_state) 964 { 965 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline || 966 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || 967 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || 968 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || 969 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full; 970 } 971 972 static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state, 973 const struct intel_crtc_state *new_crtc_state) 974 { 975 if (!new_crtc_state->hw.active) 976 return false; 977 978 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) || 979 (new_crtc_state->vrr.enable && 980 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 981 vrr_params_changed(old_crtc_state, new_crtc_state))); 982 } 983 984 static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state, 985 const struct intel_crtc_state *new_crtc_state) 986 { 987 if (!old_crtc_state->hw.active) 988 return false; 989 990 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) || 991 (old_crtc_state->vrr.enable && 992 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 993 vrr_params_changed(old_crtc_state, new_crtc_state))); 994 } 995 996 static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, 997 const struct intel_crtc_state *new_crtc_state) 998 { 999 if (!new_crtc_state->hw.active) 1000 return false; 1001 1002 return is_enabling(has_audio, old_crtc_state, new_crtc_state) || 1003 (new_crtc_state->has_audio && 1004 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1005 } 1006 1007 static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, 1008 const struct intel_crtc_state *new_crtc_state) 1009 { 1010 if (!old_crtc_state->hw.active) 1011 return false; 1012 1013 return is_disabling(has_audio, old_crtc_state, new_crtc_state) || 1014 (old_crtc_state->has_audio && 1015 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1016 } 1017 1018 #undef is_disabling 1019 #undef is_enabling 1020 1021 static void intel_post_plane_update(struct intel_atomic_state *state, 1022 struct intel_crtc *crtc) 1023 { 1024 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1025 const struct intel_crtc_state *old_crtc_state = 1026 intel_atomic_get_old_crtc_state(state, crtc); 1027 const struct intel_crtc_state *new_crtc_state = 1028 intel_atomic_get_new_crtc_state(state, crtc); 1029 enum pipe pipe = crtc->pipe; 1030 1031 intel_psr_post_plane_update(state, crtc); 1032 1033 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 1034 1035 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 1036 intel_update_watermarks(dev_priv); 1037 1038 intel_fbc_post_update(state, crtc); 1039 1040 if (needs_async_flip_vtd_wa(old_crtc_state) && 1041 !needs_async_flip_vtd_wa(new_crtc_state)) 1042 intel_async_flip_vtd_wa(dev_priv, pipe, false); 1043 1044 if (needs_nv12_wa(old_crtc_state) && 1045 !needs_nv12_wa(new_crtc_state)) 1046 skl_wa_827(dev_priv, pipe, false); 1047 1048 if (needs_scalerclk_wa(old_crtc_state) && 1049 !needs_scalerclk_wa(new_crtc_state)) 1050 icl_wa_scalerclkgating(dev_priv, pipe, false); 1051 1052 if (needs_cursorclk_wa(old_crtc_state) && 1053 !needs_cursorclk_wa(new_crtc_state)) 1054 icl_wa_cursorclkgating(dev_priv, pipe, false); 1055 1056 if (intel_crtc_needs_color_update(new_crtc_state)) 1057 intel_color_post_update(new_crtc_state); 1058 1059 if (audio_enabling(old_crtc_state, new_crtc_state)) 1060 intel_encoders_audio_enable(state, crtc); 1061 } 1062 1063 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 1064 struct intel_crtc *crtc) 1065 { 1066 const struct intel_crtc_state *crtc_state = 1067 intel_atomic_get_new_crtc_state(state, crtc); 1068 u8 update_planes = crtc_state->update_planes; 1069 const struct intel_plane_state __maybe_unused *plane_state; 1070 struct intel_plane *plane; 1071 int i; 1072 1073 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1074 if (plane->pipe == crtc->pipe && 1075 update_planes & BIT(plane->id)) 1076 plane->enable_flip_done(plane); 1077 } 1078 } 1079 1080 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 1081 struct intel_crtc *crtc) 1082 { 1083 const struct intel_crtc_state *crtc_state = 1084 intel_atomic_get_new_crtc_state(state, crtc); 1085 u8 update_planes = crtc_state->update_planes; 1086 const struct intel_plane_state __maybe_unused *plane_state; 1087 struct intel_plane *plane; 1088 int i; 1089 1090 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1091 if (plane->pipe == crtc->pipe && 1092 update_planes & BIT(plane->id)) 1093 plane->disable_flip_done(plane); 1094 } 1095 } 1096 1097 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 1098 struct intel_crtc *crtc) 1099 { 1100 const struct intel_crtc_state *old_crtc_state = 1101 intel_atomic_get_old_crtc_state(state, crtc); 1102 const struct intel_crtc_state *new_crtc_state = 1103 intel_atomic_get_new_crtc_state(state, crtc); 1104 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & 1105 ~new_crtc_state->async_flip_planes; 1106 const struct intel_plane_state *old_plane_state; 1107 struct intel_plane *plane; 1108 bool need_vbl_wait = false; 1109 int i; 1110 1111 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1112 if (plane->need_async_flip_disable_wa && 1113 plane->pipe == crtc->pipe && 1114 disable_async_flip_planes & BIT(plane->id)) { 1115 /* 1116 * Apart from the async flip bit we want to 1117 * preserve the old state for the plane. 1118 */ 1119 plane->async_flip(plane, old_crtc_state, 1120 old_plane_state, false); 1121 need_vbl_wait = true; 1122 } 1123 } 1124 1125 if (need_vbl_wait) 1126 intel_crtc_wait_for_next_vblank(crtc); 1127 } 1128 1129 static void intel_pre_plane_update(struct intel_atomic_state *state, 1130 struct intel_crtc *crtc) 1131 { 1132 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1133 const struct intel_crtc_state *old_crtc_state = 1134 intel_atomic_get_old_crtc_state(state, crtc); 1135 const struct intel_crtc_state *new_crtc_state = 1136 intel_atomic_get_new_crtc_state(state, crtc); 1137 enum pipe pipe = crtc->pipe; 1138 1139 if (vrr_disabling(old_crtc_state, new_crtc_state)) { 1140 intel_vrr_disable(old_crtc_state); 1141 intel_crtc_update_active_timings(old_crtc_state, false); 1142 } 1143 1144 if (audio_disabling(old_crtc_state, new_crtc_state)) 1145 intel_encoders_audio_disable(state, crtc); 1146 1147 intel_drrs_deactivate(old_crtc_state); 1148 1149 intel_psr_pre_plane_update(state, crtc); 1150 1151 if (hsw_ips_pre_update(state, crtc)) 1152 intel_crtc_wait_for_next_vblank(crtc); 1153 1154 if (intel_fbc_pre_update(state, crtc)) 1155 intel_crtc_wait_for_next_vblank(crtc); 1156 1157 if (!needs_async_flip_vtd_wa(old_crtc_state) && 1158 needs_async_flip_vtd_wa(new_crtc_state)) 1159 intel_async_flip_vtd_wa(dev_priv, pipe, true); 1160 1161 /* Display WA 827 */ 1162 if (!needs_nv12_wa(old_crtc_state) && 1163 needs_nv12_wa(new_crtc_state)) 1164 skl_wa_827(dev_priv, pipe, true); 1165 1166 /* Wa_2006604312:icl,ehl */ 1167 if (!needs_scalerclk_wa(old_crtc_state) && 1168 needs_scalerclk_wa(new_crtc_state)) 1169 icl_wa_scalerclkgating(dev_priv, pipe, true); 1170 1171 /* Wa_1604331009:icl,jsl,ehl */ 1172 if (!needs_cursorclk_wa(old_crtc_state) && 1173 needs_cursorclk_wa(new_crtc_state)) 1174 icl_wa_cursorclkgating(dev_priv, pipe, true); 1175 1176 /* 1177 * Vblank time updates from the shadow to live plane control register 1178 * are blocked if the memory self-refresh mode is active at that 1179 * moment. So to make sure the plane gets truly disabled, disable 1180 * first the self-refresh mode. The self-refresh enable bit in turn 1181 * will be checked/applied by the HW only at the next frame start 1182 * event which is after the vblank start event, so we need to have a 1183 * wait-for-vblank between disabling the plane and the pipe. 1184 */ 1185 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1186 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 1187 intel_crtc_wait_for_next_vblank(crtc); 1188 1189 /* 1190 * IVB workaround: must disable low power watermarks for at least 1191 * one frame before enabling scaling. LP watermarks can be re-enabled 1192 * when scaling is disabled. 1193 * 1194 * WaCxSRDisabledForSpriteScaling:ivb 1195 */ 1196 if (old_crtc_state->hw.active && 1197 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 1198 intel_crtc_wait_for_next_vblank(crtc); 1199 1200 /* 1201 * If we're doing a modeset we don't need to do any 1202 * pre-vblank watermark programming here. 1203 */ 1204 if (!intel_crtc_needs_modeset(new_crtc_state)) { 1205 /* 1206 * For platforms that support atomic watermarks, program the 1207 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1208 * will be the intermediate values that are safe for both pre- and 1209 * post- vblank; when vblank happens, the 'active' values will be set 1210 * to the final 'target' values and we'll do this again to get the 1211 * optimal watermarks. For gen9+ platforms, the values we program here 1212 * will be the final target values which will get automatically latched 1213 * at vblank time; no further programming will be necessary. 1214 * 1215 * If a platform hasn't been transitioned to atomic watermarks yet, 1216 * we'll continue to update watermarks the old way, if flags tell 1217 * us to. 1218 */ 1219 if (!intel_initial_watermarks(state, crtc)) 1220 if (new_crtc_state->update_wm_pre) 1221 intel_update_watermarks(dev_priv); 1222 } 1223 1224 /* 1225 * Gen2 reports pipe underruns whenever all planes are disabled. 1226 * So disable underrun reporting before all the planes get disabled. 1227 * 1228 * We do this after .initial_watermarks() so that we have a 1229 * chance of catching underruns with the intermediate watermarks 1230 * vs. the old plane configuration. 1231 */ 1232 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1233 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1234 1235 /* 1236 * WA for platforms where async address update enable bit 1237 * is double buffered and only latched at start of vblank. 1238 */ 1239 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) 1240 intel_crtc_async_flip_disable_wa(state, crtc); 1241 } 1242 1243 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1244 struct intel_crtc *crtc) 1245 { 1246 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1247 const struct intel_crtc_state *new_crtc_state = 1248 intel_atomic_get_new_crtc_state(state, crtc); 1249 unsigned int update_mask = new_crtc_state->update_planes; 1250 const struct intel_plane_state *old_plane_state; 1251 struct intel_plane *plane; 1252 unsigned fb_bits = 0; 1253 int i; 1254 1255 intel_crtc_dpms_overlay_disable(crtc); 1256 1257 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1258 if (crtc->pipe != plane->pipe || 1259 !(update_mask & BIT(plane->id))) 1260 continue; 1261 1262 intel_plane_disable_arm(plane, new_crtc_state); 1263 1264 if (old_plane_state->uapi.visible) 1265 fb_bits |= plane->frontbuffer_bit; 1266 } 1267 1268 intel_frontbuffer_flip(dev_priv, fb_bits); 1269 } 1270 1271 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1272 { 1273 struct drm_i915_private *i915 = to_i915(state->base.dev); 1274 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1275 struct intel_crtc *crtc; 1276 int i; 1277 1278 /* 1279 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 1280 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 1281 */ 1282 if (i915->display.dpll.mgr) { 1283 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1284 if (intel_crtc_needs_modeset(new_crtc_state)) 1285 continue; 1286 1287 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 1288 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1289 } 1290 } 1291 } 1292 1293 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1294 struct intel_crtc *crtc) 1295 { 1296 const struct intel_crtc_state *crtc_state = 1297 intel_atomic_get_new_crtc_state(state, crtc); 1298 const struct drm_connector_state *conn_state; 1299 struct drm_connector *conn; 1300 int i; 1301 1302 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1303 struct intel_encoder *encoder = 1304 to_intel_encoder(conn_state->best_encoder); 1305 1306 if (conn_state->crtc != &crtc->base) 1307 continue; 1308 1309 if (encoder->pre_pll_enable) 1310 encoder->pre_pll_enable(state, encoder, 1311 crtc_state, conn_state); 1312 } 1313 } 1314 1315 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1316 struct intel_crtc *crtc) 1317 { 1318 const struct intel_crtc_state *crtc_state = 1319 intel_atomic_get_new_crtc_state(state, crtc); 1320 const struct drm_connector_state *conn_state; 1321 struct drm_connector *conn; 1322 int i; 1323 1324 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1325 struct intel_encoder *encoder = 1326 to_intel_encoder(conn_state->best_encoder); 1327 1328 if (conn_state->crtc != &crtc->base) 1329 continue; 1330 1331 if (encoder->pre_enable) 1332 encoder->pre_enable(state, encoder, 1333 crtc_state, conn_state); 1334 } 1335 } 1336 1337 static void intel_encoders_enable(struct intel_atomic_state *state, 1338 struct intel_crtc *crtc) 1339 { 1340 const struct intel_crtc_state *crtc_state = 1341 intel_atomic_get_new_crtc_state(state, crtc); 1342 const struct drm_connector_state *conn_state; 1343 struct drm_connector *conn; 1344 int i; 1345 1346 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1347 struct intel_encoder *encoder = 1348 to_intel_encoder(conn_state->best_encoder); 1349 1350 if (conn_state->crtc != &crtc->base) 1351 continue; 1352 1353 if (encoder->enable) 1354 encoder->enable(state, encoder, 1355 crtc_state, conn_state); 1356 intel_opregion_notify_encoder(encoder, true); 1357 } 1358 } 1359 1360 static void intel_encoders_disable(struct intel_atomic_state *state, 1361 struct intel_crtc *crtc) 1362 { 1363 const struct intel_crtc_state *old_crtc_state = 1364 intel_atomic_get_old_crtc_state(state, crtc); 1365 const struct drm_connector_state *old_conn_state; 1366 struct drm_connector *conn; 1367 int i; 1368 1369 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1370 struct intel_encoder *encoder = 1371 to_intel_encoder(old_conn_state->best_encoder); 1372 1373 if (old_conn_state->crtc != &crtc->base) 1374 continue; 1375 1376 intel_opregion_notify_encoder(encoder, false); 1377 if (encoder->disable) 1378 encoder->disable(state, encoder, 1379 old_crtc_state, old_conn_state); 1380 } 1381 } 1382 1383 static void intel_encoders_post_disable(struct intel_atomic_state *state, 1384 struct intel_crtc *crtc) 1385 { 1386 const struct intel_crtc_state *old_crtc_state = 1387 intel_atomic_get_old_crtc_state(state, crtc); 1388 const struct drm_connector_state *old_conn_state; 1389 struct drm_connector *conn; 1390 int i; 1391 1392 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1393 struct intel_encoder *encoder = 1394 to_intel_encoder(old_conn_state->best_encoder); 1395 1396 if (old_conn_state->crtc != &crtc->base) 1397 continue; 1398 1399 if (encoder->post_disable) 1400 encoder->post_disable(state, encoder, 1401 old_crtc_state, old_conn_state); 1402 } 1403 } 1404 1405 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1406 struct intel_crtc *crtc) 1407 { 1408 const struct intel_crtc_state *old_crtc_state = 1409 intel_atomic_get_old_crtc_state(state, crtc); 1410 const struct drm_connector_state *old_conn_state; 1411 struct drm_connector *conn; 1412 int i; 1413 1414 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1415 struct intel_encoder *encoder = 1416 to_intel_encoder(old_conn_state->best_encoder); 1417 1418 if (old_conn_state->crtc != &crtc->base) 1419 continue; 1420 1421 if (encoder->post_pll_disable) 1422 encoder->post_pll_disable(state, encoder, 1423 old_crtc_state, old_conn_state); 1424 } 1425 } 1426 1427 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1428 struct intel_crtc *crtc) 1429 { 1430 const struct intel_crtc_state *crtc_state = 1431 intel_atomic_get_new_crtc_state(state, crtc); 1432 const struct drm_connector_state *conn_state; 1433 struct drm_connector *conn; 1434 int i; 1435 1436 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1437 struct intel_encoder *encoder = 1438 to_intel_encoder(conn_state->best_encoder); 1439 1440 if (conn_state->crtc != &crtc->base) 1441 continue; 1442 1443 if (encoder->update_pipe) 1444 encoder->update_pipe(state, encoder, 1445 crtc_state, conn_state); 1446 } 1447 } 1448 1449 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 1450 { 1451 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1452 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1453 1454 plane->disable_arm(plane, crtc_state); 1455 } 1456 1457 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1458 { 1459 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1460 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1461 1462 if (crtc_state->has_pch_encoder) { 1463 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1464 &crtc_state->fdi_m_n); 1465 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1466 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1467 &crtc_state->dp_m_n); 1468 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1469 &crtc_state->dp_m2_n2); 1470 } 1471 1472 intel_set_transcoder_timings(crtc_state); 1473 1474 ilk_set_pipeconf(crtc_state); 1475 } 1476 1477 static void ilk_crtc_enable(struct intel_atomic_state *state, 1478 struct intel_crtc *crtc) 1479 { 1480 const struct intel_crtc_state *new_crtc_state = 1481 intel_atomic_get_new_crtc_state(state, crtc); 1482 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1483 enum pipe pipe = crtc->pipe; 1484 1485 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1486 return; 1487 1488 /* 1489 * Sometimes spurious CPU pipe underruns happen during FDI 1490 * training, at least with VGA+HDMI cloning. Suppress them. 1491 * 1492 * On ILK we get an occasional spurious CPU pipe underruns 1493 * between eDP port A enable and vdd enable. Also PCH port 1494 * enable seems to result in the occasional CPU pipe underrun. 1495 * 1496 * Spurious PCH underruns also occur during PCH enabling. 1497 */ 1498 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1499 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1500 1501 ilk_configure_cpu_transcoder(new_crtc_state); 1502 1503 intel_set_pipe_src_size(new_crtc_state); 1504 1505 crtc->active = true; 1506 1507 intel_encoders_pre_enable(state, crtc); 1508 1509 if (new_crtc_state->has_pch_encoder) { 1510 ilk_pch_pre_enable(state, crtc); 1511 } else { 1512 assert_fdi_tx_disabled(dev_priv, pipe); 1513 assert_fdi_rx_disabled(dev_priv, pipe); 1514 } 1515 1516 ilk_pfit_enable(new_crtc_state); 1517 1518 /* 1519 * On ILK+ LUT must be loaded before the pipe is running but with 1520 * clocks enabled 1521 */ 1522 intel_color_load_luts(new_crtc_state); 1523 intel_color_commit_noarm(new_crtc_state); 1524 intel_color_commit_arm(new_crtc_state); 1525 /* update DSPCNTR to configure gamma for pipe bottom color */ 1526 intel_disable_primary_plane(new_crtc_state); 1527 1528 intel_initial_watermarks(state, crtc); 1529 intel_enable_transcoder(new_crtc_state); 1530 1531 if (new_crtc_state->has_pch_encoder) 1532 ilk_pch_enable(state, crtc); 1533 1534 intel_crtc_vblank_on(new_crtc_state); 1535 1536 intel_encoders_enable(state, crtc); 1537 1538 if (HAS_PCH_CPT(dev_priv)) 1539 intel_wait_for_pipe_scanline_moving(crtc); 1540 1541 /* 1542 * Must wait for vblank to avoid spurious PCH FIFO underruns. 1543 * And a second vblank wait is needed at least on ILK with 1544 * some interlaced HDMI modes. Let's do the double wait always 1545 * in case there are more corner cases we don't know about. 1546 */ 1547 if (new_crtc_state->has_pch_encoder) { 1548 intel_crtc_wait_for_next_vblank(crtc); 1549 intel_crtc_wait_for_next_vblank(crtc); 1550 } 1551 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1552 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1553 } 1554 1555 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 1556 enum pipe pipe, bool apply) 1557 { 1558 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); 1559 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1560 1561 if (apply) 1562 val |= mask; 1563 else 1564 val &= ~mask; 1565 1566 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); 1567 } 1568 1569 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1570 { 1571 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1572 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1573 1574 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 1575 HSW_LINETIME(crtc_state->linetime) | 1576 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1577 } 1578 1579 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1580 { 1581 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1582 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1583 1584 intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder), 1585 HSW_FRAME_START_DELAY_MASK, 1586 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1587 } 1588 1589 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, 1590 const struct intel_crtc_state *crtc_state) 1591 { 1592 struct intel_crtc *master_crtc = intel_master_crtc(crtc_state); 1593 1594 /* 1595 * Enable sequence steps 1-7 on bigjoiner master 1596 */ 1597 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 1598 intel_encoders_pre_pll_enable(state, master_crtc); 1599 1600 if (crtc_state->shared_dpll) 1601 intel_enable_shared_dpll(crtc_state); 1602 1603 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 1604 intel_encoders_pre_enable(state, master_crtc); 1605 } 1606 1607 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1608 { 1609 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1610 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1611 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1612 1613 if (crtc_state->has_pch_encoder) { 1614 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1615 &crtc_state->fdi_m_n); 1616 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1617 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1618 &crtc_state->dp_m_n); 1619 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1620 &crtc_state->dp_m2_n2); 1621 } 1622 1623 intel_set_transcoder_timings(crtc_state); 1624 if (HAS_VRR(dev_priv)) 1625 intel_vrr_set_transcoder_timings(crtc_state); 1626 1627 if (cpu_transcoder != TRANSCODER_EDP) 1628 intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder), 1629 crtc_state->pixel_multiplier - 1); 1630 1631 hsw_set_frame_start_delay(crtc_state); 1632 1633 hsw_set_transconf(crtc_state); 1634 } 1635 1636 static void hsw_crtc_enable(struct intel_atomic_state *state, 1637 struct intel_crtc *crtc) 1638 { 1639 const struct intel_crtc_state *new_crtc_state = 1640 intel_atomic_get_new_crtc_state(state, crtc); 1641 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1642 enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 1643 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1644 bool psl_clkgate_wa; 1645 1646 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1647 return; 1648 1649 intel_dmc_enable_pipe(dev_priv, crtc->pipe); 1650 1651 if (!new_crtc_state->bigjoiner_pipes) { 1652 intel_encoders_pre_pll_enable(state, crtc); 1653 1654 if (new_crtc_state->shared_dpll) 1655 intel_enable_shared_dpll(new_crtc_state); 1656 1657 intel_encoders_pre_enable(state, crtc); 1658 } else { 1659 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); 1660 } 1661 1662 intel_dsc_enable(new_crtc_state); 1663 1664 if (DISPLAY_VER(dev_priv) >= 13) 1665 intel_uncompressed_joiner_enable(new_crtc_state); 1666 1667 intel_set_pipe_src_size(new_crtc_state); 1668 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 1669 bdw_set_pipe_misc(new_crtc_state); 1670 1671 if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && 1672 !transcoder_is_dsi(cpu_transcoder)) 1673 hsw_configure_cpu_transcoder(new_crtc_state); 1674 1675 crtc->active = true; 1676 1677 /* Display WA #1180: WaDisableScalarClockGating: glk */ 1678 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 && 1679 new_crtc_state->pch_pfit.enabled; 1680 if (psl_clkgate_wa) 1681 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 1682 1683 if (DISPLAY_VER(dev_priv) >= 9) 1684 skl_pfit_enable(new_crtc_state); 1685 else 1686 ilk_pfit_enable(new_crtc_state); 1687 1688 /* 1689 * On ILK+ LUT must be loaded before the pipe is running but with 1690 * clocks enabled 1691 */ 1692 intel_color_load_luts(new_crtc_state); 1693 intel_color_commit_noarm(new_crtc_state); 1694 intel_color_commit_arm(new_crtc_state); 1695 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 1696 if (DISPLAY_VER(dev_priv) < 9) 1697 intel_disable_primary_plane(new_crtc_state); 1698 1699 hsw_set_linetime_wm(new_crtc_state); 1700 1701 if (DISPLAY_VER(dev_priv) >= 11) 1702 icl_set_pipe_chicken(new_crtc_state); 1703 1704 intel_initial_watermarks(state, crtc); 1705 1706 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 1707 intel_crtc_vblank_on(new_crtc_state); 1708 1709 intel_encoders_enable(state, crtc); 1710 1711 if (psl_clkgate_wa) { 1712 intel_crtc_wait_for_next_vblank(crtc); 1713 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 1714 } 1715 1716 /* If we change the relative order between pipe/planes enabling, we need 1717 * to change the workaround. */ 1718 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 1719 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 1720 struct intel_crtc *wa_crtc; 1721 1722 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); 1723 1724 intel_crtc_wait_for_next_vblank(wa_crtc); 1725 intel_crtc_wait_for_next_vblank(wa_crtc); 1726 } 1727 } 1728 1729 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 1730 { 1731 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1732 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1733 enum pipe pipe = crtc->pipe; 1734 1735 /* To avoid upsetting the power well on haswell only disable the pfit if 1736 * it's in use. The hw state code will make sure we get this right. */ 1737 if (!old_crtc_state->pch_pfit.enabled) 1738 return; 1739 1740 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0); 1741 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0); 1742 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0); 1743 } 1744 1745 static void ilk_crtc_disable(struct intel_atomic_state *state, 1746 struct intel_crtc *crtc) 1747 { 1748 const struct intel_crtc_state *old_crtc_state = 1749 intel_atomic_get_old_crtc_state(state, crtc); 1750 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1751 enum pipe pipe = crtc->pipe; 1752 1753 /* 1754 * Sometimes spurious CPU pipe underruns happen when the 1755 * pipe is already disabled, but FDI RX/TX is still enabled. 1756 * Happens at least with VGA+HDMI cloning. Suppress them. 1757 */ 1758 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1759 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1760 1761 intel_encoders_disable(state, crtc); 1762 1763 intel_crtc_vblank_off(old_crtc_state); 1764 1765 intel_disable_transcoder(old_crtc_state); 1766 1767 ilk_pfit_disable(old_crtc_state); 1768 1769 if (old_crtc_state->has_pch_encoder) 1770 ilk_pch_disable(state, crtc); 1771 1772 intel_encoders_post_disable(state, crtc); 1773 1774 if (old_crtc_state->has_pch_encoder) 1775 ilk_pch_post_disable(state, crtc); 1776 1777 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1778 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1779 1780 intel_disable_shared_dpll(old_crtc_state); 1781 } 1782 1783 static void hsw_crtc_disable(struct intel_atomic_state *state, 1784 struct intel_crtc *crtc) 1785 { 1786 const struct intel_crtc_state *old_crtc_state = 1787 intel_atomic_get_old_crtc_state(state, crtc); 1788 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1789 1790 /* 1791 * FIXME collapse everything to one hook. 1792 * Need care with mst->ddi interactions. 1793 */ 1794 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { 1795 intel_encoders_disable(state, crtc); 1796 intel_encoders_post_disable(state, crtc); 1797 } 1798 1799 intel_disable_shared_dpll(old_crtc_state); 1800 1801 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { 1802 struct intel_crtc *slave_crtc; 1803 1804 intel_encoders_post_pll_disable(state, crtc); 1805 1806 intel_dmc_disable_pipe(i915, crtc->pipe); 1807 1808 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 1809 intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) 1810 intel_dmc_disable_pipe(i915, slave_crtc->pipe); 1811 } 1812 } 1813 1814 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 1815 { 1816 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1817 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1818 1819 if (!crtc_state->gmch_pfit.control) 1820 return; 1821 1822 /* 1823 * The panel fitter should only be adjusted whilst the pipe is disabled, 1824 * according to register description and PRM. 1825 */ 1826 drm_WARN_ON(&dev_priv->drm, 1827 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); 1828 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 1829 1830 intel_de_write(dev_priv, PFIT_PGM_RATIOS, 1831 crtc_state->gmch_pfit.pgm_ratios); 1832 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); 1833 1834 /* Border color in case we don't scale up to the full screen. Black by 1835 * default, change to something else for debugging. */ 1836 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); 1837 } 1838 1839 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 1840 { 1841 if (phy == PHY_NONE) 1842 return false; 1843 else if (IS_ALDERLAKE_S(dev_priv)) 1844 return phy <= PHY_E; 1845 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 1846 return phy <= PHY_D; 1847 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) 1848 return phy <= PHY_C; 1849 else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12)) 1850 return phy <= PHY_B; 1851 else 1852 /* 1853 * DG2 outputs labelled as "combo PHY" in the bspec use 1854 * SNPS PHYs with completely different programming, 1855 * hence we always return false here. 1856 */ 1857 return false; 1858 } 1859 1860 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 1861 { 1862 /* 1863 * DG2's "TC1", although TC-capable output, doesn't share the same flow 1864 * as other platforms on the display engine side and rather rely on the 1865 * SNPS PHY, that is programmed separately 1866 */ 1867 if (IS_DG2(dev_priv)) 1868 return false; 1869 1870 if (DISPLAY_VER(dev_priv) >= 13) 1871 return phy >= PHY_F && phy <= PHY_I; 1872 else if (IS_TIGERLAKE(dev_priv)) 1873 return phy >= PHY_D && phy <= PHY_I; 1874 else if (IS_ICELAKE(dev_priv)) 1875 return phy >= PHY_C && phy <= PHY_F; 1876 1877 return false; 1878 } 1879 1880 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) 1881 { 1882 /* 1883 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port 1884 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). 1885 */ 1886 return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E; 1887 } 1888 1889 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 1890 { 1891 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) 1892 return PHY_D + port - PORT_D_XELPD; 1893 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) 1894 return PHY_F + port - PORT_TC1; 1895 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) 1896 return PHY_B + port - PORT_TC1; 1897 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) 1898 return PHY_C + port - PORT_TC1; 1899 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 1900 port == PORT_D) 1901 return PHY_A; 1902 1903 return PHY_A + port - PORT_A; 1904 } 1905 1906 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 1907 { 1908 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 1909 return TC_PORT_NONE; 1910 1911 if (DISPLAY_VER(dev_priv) >= 12) 1912 return TC_PORT_1 + port - PORT_TC1; 1913 else 1914 return TC_PORT_1 + port - PORT_C; 1915 } 1916 1917 enum intel_display_power_domain 1918 intel_aux_power_domain(struct intel_digital_port *dig_port) 1919 { 1920 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 1921 1922 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 1923 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); 1924 1925 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 1926 } 1927 1928 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, 1929 struct intel_power_domain_mask *mask) 1930 { 1931 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1932 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1933 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1934 struct drm_encoder *encoder; 1935 enum pipe pipe = crtc->pipe; 1936 1937 bitmap_zero(mask->bits, POWER_DOMAIN_NUM); 1938 1939 if (!crtc_state->hw.active) 1940 return; 1941 1942 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); 1943 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); 1944 if (crtc_state->pch_pfit.enabled || 1945 crtc_state->pch_pfit.force_thru) 1946 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); 1947 1948 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 1949 crtc_state->uapi.encoder_mask) { 1950 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 1951 1952 set_bit(intel_encoder->power_domain, mask->bits); 1953 } 1954 1955 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 1956 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); 1957 1958 if (crtc_state->shared_dpll) 1959 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); 1960 1961 if (crtc_state->dsc.compression_enable) 1962 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); 1963 } 1964 1965 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, 1966 struct intel_power_domain_mask *old_domains) 1967 { 1968 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1969 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1970 enum intel_display_power_domain domain; 1971 struct intel_power_domain_mask domains, new_domains; 1972 1973 get_crtc_power_domains(crtc_state, &domains); 1974 1975 bitmap_andnot(new_domains.bits, 1976 domains.bits, 1977 crtc->enabled_power_domains.mask.bits, 1978 POWER_DOMAIN_NUM); 1979 bitmap_andnot(old_domains->bits, 1980 crtc->enabled_power_domains.mask.bits, 1981 domains.bits, 1982 POWER_DOMAIN_NUM); 1983 1984 for_each_power_domain(domain, &new_domains) 1985 intel_display_power_get_in_set(dev_priv, 1986 &crtc->enabled_power_domains, 1987 domain); 1988 } 1989 1990 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, 1991 struct intel_power_domain_mask *domains) 1992 { 1993 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), 1994 &crtc->enabled_power_domains, 1995 domains); 1996 } 1997 1998 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1999 { 2000 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2001 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2002 2003 if (intel_crtc_has_dp_encoder(crtc_state)) { 2004 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 2005 &crtc_state->dp_m_n); 2006 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 2007 &crtc_state->dp_m2_n2); 2008 } 2009 2010 intel_set_transcoder_timings(crtc_state); 2011 2012 i9xx_set_pipeconf(crtc_state); 2013 } 2014 2015 static void valleyview_crtc_enable(struct intel_atomic_state *state, 2016 struct intel_crtc *crtc) 2017 { 2018 const struct intel_crtc_state *new_crtc_state = 2019 intel_atomic_get_new_crtc_state(state, crtc); 2020 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2021 enum pipe pipe = crtc->pipe; 2022 2023 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2024 return; 2025 2026 i9xx_configure_cpu_transcoder(new_crtc_state); 2027 2028 intel_set_pipe_src_size(new_crtc_state); 2029 2030 intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); 2031 2032 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 2033 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); 2034 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); 2035 } 2036 2037 crtc->active = true; 2038 2039 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2040 2041 intel_encoders_pre_pll_enable(state, crtc); 2042 2043 if (IS_CHERRYVIEW(dev_priv)) 2044 chv_enable_pll(new_crtc_state); 2045 else 2046 vlv_enable_pll(new_crtc_state); 2047 2048 intel_encoders_pre_enable(state, crtc); 2049 2050 i9xx_pfit_enable(new_crtc_state); 2051 2052 intel_color_load_luts(new_crtc_state); 2053 intel_color_commit_noarm(new_crtc_state); 2054 intel_color_commit_arm(new_crtc_state); 2055 /* update DSPCNTR to configure gamma for pipe bottom color */ 2056 intel_disable_primary_plane(new_crtc_state); 2057 2058 intel_initial_watermarks(state, crtc); 2059 intel_enable_transcoder(new_crtc_state); 2060 2061 intel_crtc_vblank_on(new_crtc_state); 2062 2063 intel_encoders_enable(state, crtc); 2064 } 2065 2066 static void i9xx_crtc_enable(struct intel_atomic_state *state, 2067 struct intel_crtc *crtc) 2068 { 2069 const struct intel_crtc_state *new_crtc_state = 2070 intel_atomic_get_new_crtc_state(state, crtc); 2071 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2072 enum pipe pipe = crtc->pipe; 2073 2074 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2075 return; 2076 2077 i9xx_configure_cpu_transcoder(new_crtc_state); 2078 2079 intel_set_pipe_src_size(new_crtc_state); 2080 2081 crtc->active = true; 2082 2083 if (DISPLAY_VER(dev_priv) != 2) 2084 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2085 2086 intel_encoders_pre_enable(state, crtc); 2087 2088 i9xx_enable_pll(new_crtc_state); 2089 2090 i9xx_pfit_enable(new_crtc_state); 2091 2092 intel_color_load_luts(new_crtc_state); 2093 intel_color_commit_noarm(new_crtc_state); 2094 intel_color_commit_arm(new_crtc_state); 2095 /* update DSPCNTR to configure gamma for pipe bottom color */ 2096 intel_disable_primary_plane(new_crtc_state); 2097 2098 if (!intel_initial_watermarks(state, crtc)) 2099 intel_update_watermarks(dev_priv); 2100 intel_enable_transcoder(new_crtc_state); 2101 2102 intel_crtc_vblank_on(new_crtc_state); 2103 2104 intel_encoders_enable(state, crtc); 2105 2106 /* prevents spurious underruns */ 2107 if (DISPLAY_VER(dev_priv) == 2) 2108 intel_crtc_wait_for_next_vblank(crtc); 2109 } 2110 2111 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 2112 { 2113 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 2114 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2115 2116 if (!old_crtc_state->gmch_pfit.control) 2117 return; 2118 2119 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); 2120 2121 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 2122 intel_de_read(dev_priv, PFIT_CONTROL)); 2123 intel_de_write(dev_priv, PFIT_CONTROL, 0); 2124 } 2125 2126 static void i9xx_crtc_disable(struct intel_atomic_state *state, 2127 struct intel_crtc *crtc) 2128 { 2129 struct intel_crtc_state *old_crtc_state = 2130 intel_atomic_get_old_crtc_state(state, crtc); 2131 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2132 enum pipe pipe = crtc->pipe; 2133 2134 /* 2135 * On gen2 planes are double buffered but the pipe isn't, so we must 2136 * wait for planes to fully turn off before disabling the pipe. 2137 */ 2138 if (DISPLAY_VER(dev_priv) == 2) 2139 intel_crtc_wait_for_next_vblank(crtc); 2140 2141 intel_encoders_disable(state, crtc); 2142 2143 intel_crtc_vblank_off(old_crtc_state); 2144 2145 intel_disable_transcoder(old_crtc_state); 2146 2147 i9xx_pfit_disable(old_crtc_state); 2148 2149 intel_encoders_post_disable(state, crtc); 2150 2151 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2152 if (IS_CHERRYVIEW(dev_priv)) 2153 chv_disable_pll(dev_priv, pipe); 2154 else if (IS_VALLEYVIEW(dev_priv)) 2155 vlv_disable_pll(dev_priv, pipe); 2156 else 2157 i9xx_disable_pll(old_crtc_state); 2158 } 2159 2160 intel_encoders_post_pll_disable(state, crtc); 2161 2162 if (DISPLAY_VER(dev_priv) != 2) 2163 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 2164 2165 if (!dev_priv->display.funcs.wm->initial_watermarks) 2166 intel_update_watermarks(dev_priv); 2167 2168 /* clock the pipe down to 640x480@60 to potentially save power */ 2169 if (IS_I830(dev_priv)) 2170 i830_enable_pipe(dev_priv, pipe); 2171 } 2172 2173 void intel_encoder_destroy(struct drm_encoder *encoder) 2174 { 2175 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2176 2177 drm_encoder_cleanup(encoder); 2178 kfree(intel_encoder); 2179 } 2180 2181 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2182 { 2183 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2184 2185 /* GDG double wide on either pipe, otherwise pipe A only */ 2186 return DISPLAY_VER(dev_priv) < 4 && 2187 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 2188 } 2189 2190 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2191 { 2192 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 2193 struct drm_rect src; 2194 2195 /* 2196 * We only use IF-ID interlacing. If we ever use 2197 * PF-ID we'll need to adjust the pixel_rate here. 2198 */ 2199 2200 if (!crtc_state->pch_pfit.enabled) 2201 return pixel_rate; 2202 2203 drm_rect_init(&src, 0, 0, 2204 drm_rect_width(&crtc_state->pipe_src) << 16, 2205 drm_rect_height(&crtc_state->pipe_src) << 16); 2206 2207 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 2208 pixel_rate); 2209 } 2210 2211 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 2212 const struct drm_display_mode *timings) 2213 { 2214 mode->hdisplay = timings->crtc_hdisplay; 2215 mode->htotal = timings->crtc_htotal; 2216 mode->hsync_start = timings->crtc_hsync_start; 2217 mode->hsync_end = timings->crtc_hsync_end; 2218 2219 mode->vdisplay = timings->crtc_vdisplay; 2220 mode->vtotal = timings->crtc_vtotal; 2221 mode->vsync_start = timings->crtc_vsync_start; 2222 mode->vsync_end = timings->crtc_vsync_end; 2223 2224 mode->flags = timings->flags; 2225 mode->type = DRM_MODE_TYPE_DRIVER; 2226 2227 mode->clock = timings->crtc_clock; 2228 2229 drm_mode_set_name(mode); 2230 } 2231 2232 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2233 { 2234 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2235 2236 if (HAS_GMCH(dev_priv)) 2237 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2238 crtc_state->pixel_rate = 2239 crtc_state->hw.pipe_mode.crtc_clock; 2240 else 2241 crtc_state->pixel_rate = 2242 ilk_pipe_pixel_rate(crtc_state); 2243 } 2244 2245 static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state, 2246 struct drm_display_mode *mode) 2247 { 2248 int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 2249 2250 if (num_pipes < 2) 2251 return; 2252 2253 mode->crtc_clock /= num_pipes; 2254 mode->crtc_hdisplay /= num_pipes; 2255 mode->crtc_hblank_start /= num_pipes; 2256 mode->crtc_hblank_end /= num_pipes; 2257 mode->crtc_hsync_start /= num_pipes; 2258 mode->crtc_hsync_end /= num_pipes; 2259 mode->crtc_htotal /= num_pipes; 2260 } 2261 2262 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, 2263 struct drm_display_mode *mode) 2264 { 2265 int overlap = crtc_state->splitter.pixel_overlap; 2266 int n = crtc_state->splitter.link_count; 2267 2268 if (!crtc_state->splitter.enable) 2269 return; 2270 2271 /* 2272 * eDP MSO uses segment timings from EDID for transcoder 2273 * timings, but full mode for everything else. 2274 * 2275 * h_full = (h_segment - pixel_overlap) * link_count 2276 */ 2277 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; 2278 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; 2279 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; 2280 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; 2281 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; 2282 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; 2283 mode->crtc_clock *= n; 2284 } 2285 2286 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 2287 { 2288 struct drm_display_mode *mode = &crtc_state->hw.mode; 2289 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2290 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2291 2292 /* 2293 * Start with the adjusted_mode crtc timings, which 2294 * have been filled with the transcoder timings. 2295 */ 2296 drm_mode_copy(pipe_mode, adjusted_mode); 2297 2298 /* Expand MSO per-segment transcoder timings to full */ 2299 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2300 2301 /* 2302 * We want the full numbers in adjusted_mode normal timings, 2303 * adjusted_mode crtc timings are left with the raw transcoder 2304 * timings. 2305 */ 2306 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 2307 2308 /* Populate the "user" mode with full numbers */ 2309 drm_mode_copy(mode, pipe_mode); 2310 intel_mode_from_crtc_timings(mode, mode); 2311 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * 2312 (intel_bigjoiner_num_pipes(crtc_state) ?: 1); 2313 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); 2314 2315 /* Derive per-pipe timings in case bigjoiner is used */ 2316 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); 2317 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2318 2319 intel_crtc_compute_pixel_rate(crtc_state); 2320 } 2321 2322 void intel_encoder_get_config(struct intel_encoder *encoder, 2323 struct intel_crtc_state *crtc_state) 2324 { 2325 encoder->get_config(encoder, crtc_state); 2326 2327 intel_crtc_readout_derived_state(crtc_state); 2328 } 2329 2330 static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state) 2331 { 2332 int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 2333 int width, height; 2334 2335 if (num_pipes < 2) 2336 return; 2337 2338 width = drm_rect_width(&crtc_state->pipe_src); 2339 height = drm_rect_height(&crtc_state->pipe_src); 2340 2341 drm_rect_init(&crtc_state->pipe_src, 0, 0, 2342 width / num_pipes, height); 2343 } 2344 2345 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) 2346 { 2347 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2348 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2349 2350 intel_bigjoiner_compute_pipe_src(crtc_state); 2351 2352 /* 2353 * Pipe horizontal size must be even in: 2354 * - DVO ganged mode 2355 * - LVDS dual channel mode 2356 * - Double wide pipe 2357 */ 2358 if (drm_rect_width(&crtc_state->pipe_src) & 1) { 2359 if (crtc_state->double_wide) { 2360 drm_dbg_kms(&i915->drm, 2361 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", 2362 crtc->base.base.id, crtc->base.name); 2363 return -EINVAL; 2364 } 2365 2366 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 2367 intel_is_dual_link_lvds(i915)) { 2368 drm_dbg_kms(&i915->drm, 2369 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", 2370 crtc->base.base.id, crtc->base.name); 2371 return -EINVAL; 2372 } 2373 } 2374 2375 return 0; 2376 } 2377 2378 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) 2379 { 2380 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2381 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2382 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2383 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2384 int clock_limit = i915->max_dotclk_freq; 2385 2386 /* 2387 * Start with the adjusted_mode crtc timings, which 2388 * have been filled with the transcoder timings. 2389 */ 2390 drm_mode_copy(pipe_mode, adjusted_mode); 2391 2392 /* Expand MSO per-segment transcoder timings to full */ 2393 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2394 2395 /* Derive per-pipe timings in case bigjoiner is used */ 2396 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); 2397 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2398 2399 if (DISPLAY_VER(i915) < 4) { 2400 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; 2401 2402 /* 2403 * Enable double wide mode when the dot clock 2404 * is > 90% of the (display) core speed. 2405 */ 2406 if (intel_crtc_supports_double_wide(crtc) && 2407 pipe_mode->crtc_clock > clock_limit) { 2408 clock_limit = i915->max_dotclk_freq; 2409 crtc_state->double_wide = true; 2410 } 2411 } 2412 2413 if (pipe_mode->crtc_clock > clock_limit) { 2414 drm_dbg_kms(&i915->drm, 2415 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 2416 crtc->base.base.id, crtc->base.name, 2417 pipe_mode->crtc_clock, clock_limit, 2418 str_yes_no(crtc_state->double_wide)); 2419 return -EINVAL; 2420 } 2421 2422 return 0; 2423 } 2424 2425 static int intel_crtc_compute_config(struct intel_atomic_state *state, 2426 struct intel_crtc *crtc) 2427 { 2428 struct intel_crtc_state *crtc_state = 2429 intel_atomic_get_new_crtc_state(state, crtc); 2430 int ret; 2431 2432 ret = intel_dpll_crtc_compute_clock(state, crtc); 2433 if (ret) 2434 return ret; 2435 2436 ret = intel_crtc_compute_pipe_src(crtc_state); 2437 if (ret) 2438 return ret; 2439 2440 ret = intel_crtc_compute_pipe_mode(crtc_state); 2441 if (ret) 2442 return ret; 2443 2444 intel_crtc_compute_pixel_rate(crtc_state); 2445 2446 if (crtc_state->has_pch_encoder) 2447 return ilk_fdi_compute_config(crtc, crtc_state); 2448 2449 return 0; 2450 } 2451 2452 static void 2453 intel_reduce_m_n_ratio(u32 *num, u32 *den) 2454 { 2455 while (*num > DATA_LINK_M_N_MASK || 2456 *den > DATA_LINK_M_N_MASK) { 2457 *num >>= 1; 2458 *den >>= 1; 2459 } 2460 } 2461 2462 static void compute_m_n(u32 *ret_m, u32 *ret_n, 2463 u32 m, u32 n, u32 constant_n) 2464 { 2465 if (constant_n) 2466 *ret_n = constant_n; 2467 else 2468 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2469 2470 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2471 intel_reduce_m_n_ratio(ret_m, ret_n); 2472 } 2473 2474 void 2475 intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, 2476 int pixel_clock, int link_clock, 2477 int bw_overhead, 2478 struct intel_link_m_n *m_n) 2479 { 2480 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); 2481 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, 2482 bw_overhead); 2483 u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); 2484 2485 /* 2486 * Windows/BIOS uses fixed M/N values always. Follow suit. 2487 * 2488 * Also several DP dongles in particular seem to be fussy 2489 * about too large link M/N values. Presumably the 20bit 2490 * value used by Windows/BIOS is acceptable to everyone. 2491 */ 2492 m_n->tu = 64; 2493 compute_m_n(&m_n->data_m, &m_n->data_n, 2494 data_m, data_n, 2495 0x8000000); 2496 2497 compute_m_n(&m_n->link_m, &m_n->link_n, 2498 pixel_clock, link_symbol_clock, 2499 0x80000); 2500 } 2501 2502 void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 2503 { 2504 /* 2505 * There may be no VBT; and if the BIOS enabled SSC we can 2506 * just keep using it to avoid unnecessary flicker. Whereas if the 2507 * BIOS isn't using it, don't assume it will work even if the VBT 2508 * indicates as much. 2509 */ 2510 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 2511 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 2512 PCH_DREF_CONTROL) & 2513 DREF_SSC1_ENABLE; 2514 2515 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2516 drm_dbg_kms(&dev_priv->drm, 2517 "SSC %s by BIOS, overriding VBT which says %s\n", 2518 str_enabled_disabled(bios_lvds_use_ssc), 2519 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); 2520 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; 2521 } 2522 } 2523 } 2524 2525 void intel_zero_m_n(struct intel_link_m_n *m_n) 2526 { 2527 /* corresponds to 0 register value */ 2528 memset(m_n, 0, sizeof(*m_n)); 2529 m_n->tu = 1; 2530 } 2531 2532 void intel_set_m_n(struct drm_i915_private *i915, 2533 const struct intel_link_m_n *m_n, 2534 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 2535 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 2536 { 2537 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 2538 intel_de_write(i915, data_n_reg, m_n->data_n); 2539 intel_de_write(i915, link_m_reg, m_n->link_m); 2540 /* 2541 * On BDW+ writing LINK_N arms the double buffered update 2542 * of all the M/N registers, so it must be written last. 2543 */ 2544 intel_de_write(i915, link_n_reg, m_n->link_n); 2545 } 2546 2547 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 2548 enum transcoder transcoder) 2549 { 2550 if (IS_HASWELL(dev_priv)) 2551 return transcoder == TRANSCODER_EDP; 2552 2553 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); 2554 } 2555 2556 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 2557 enum transcoder transcoder, 2558 const struct intel_link_m_n *m_n) 2559 { 2560 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2561 enum pipe pipe = crtc->pipe; 2562 2563 if (DISPLAY_VER(dev_priv) >= 5) 2564 intel_set_m_n(dev_priv, m_n, 2565 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), 2566 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); 2567 else 2568 intel_set_m_n(dev_priv, m_n, 2569 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 2570 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 2571 } 2572 2573 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 2574 enum transcoder transcoder, 2575 const struct intel_link_m_n *m_n) 2576 { 2577 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2578 2579 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 2580 return; 2581 2582 intel_set_m_n(dev_priv, m_n, 2583 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), 2584 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); 2585 } 2586 2587 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 2588 { 2589 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2590 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2591 enum pipe pipe = crtc->pipe; 2592 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2593 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2594 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2595 int vsyncshift = 0; 2596 2597 /* We need to be careful not to changed the adjusted mode, for otherwise 2598 * the hw state checker will get angry at the mismatch. */ 2599 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2600 crtc_vtotal = adjusted_mode->crtc_vtotal; 2601 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2602 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2603 2604 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2605 /* the chip adds 2 halflines automatically */ 2606 crtc_vtotal -= 1; 2607 crtc_vblank_end -= 1; 2608 2609 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2610 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 2611 else 2612 vsyncshift = adjusted_mode->crtc_hsync_start - 2613 adjusted_mode->crtc_htotal / 2; 2614 if (vsyncshift < 0) 2615 vsyncshift += adjusted_mode->crtc_htotal; 2616 } 2617 2618 /* 2619 * VBLANK_START no longer works on ADL+, instead we must use 2620 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. 2621 */ 2622 if (DISPLAY_VER(dev_priv) >= 13) { 2623 intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder), 2624 crtc_vblank_start - crtc_vdisplay); 2625 2626 /* 2627 * VBLANK_START not used by hw, just clear it 2628 * to make it stand out in register dumps. 2629 */ 2630 crtc_vblank_start = 1; 2631 } 2632 2633 if (DISPLAY_VER(dev_priv) >= 4) 2634 intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder), 2635 vsyncshift); 2636 2637 intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), 2638 HACTIVE(adjusted_mode->crtc_hdisplay - 1) | 2639 HTOTAL(adjusted_mode->crtc_htotal - 1)); 2640 intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), 2641 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | 2642 HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); 2643 intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), 2644 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | 2645 HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); 2646 2647 intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), 2648 VACTIVE(crtc_vdisplay - 1) | 2649 VTOTAL(crtc_vtotal - 1)); 2650 intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 2651 VBLANK_START(crtc_vblank_start - 1) | 2652 VBLANK_END(crtc_vblank_end - 1)); 2653 intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), 2654 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | 2655 VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); 2656 2657 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2658 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 2659 * documented on the DDI_FUNC_CTL register description, EDP Input Select 2660 * bits. */ 2661 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 2662 (pipe == PIPE_B || pipe == PIPE_C)) 2663 intel_de_write(dev_priv, TRANS_VTOTAL(pipe), 2664 VACTIVE(crtc_vdisplay - 1) | 2665 VTOTAL(crtc_vtotal - 1)); 2666 } 2667 2668 static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state) 2669 { 2670 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2671 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2672 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2673 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2674 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2675 2676 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2677 crtc_vtotal = adjusted_mode->crtc_vtotal; 2678 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2679 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2680 2681 drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE); 2682 2683 /* 2684 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode. 2685 * But let's write it anyway to keep the state checker happy. 2686 */ 2687 intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 2688 VBLANK_START(crtc_vblank_start - 1) | 2689 VBLANK_END(crtc_vblank_end - 1)); 2690 /* 2691 * The double buffer latch point for TRANS_VTOTAL 2692 * is the transcoder's undelayed vblank. 2693 */ 2694 intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), 2695 VACTIVE(crtc_vdisplay - 1) | 2696 VTOTAL(crtc_vtotal - 1)); 2697 } 2698 2699 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 2700 { 2701 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2702 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2703 int width = drm_rect_width(&crtc_state->pipe_src); 2704 int height = drm_rect_height(&crtc_state->pipe_src); 2705 enum pipe pipe = crtc->pipe; 2706 2707 /* pipesrc controls the size that is scaled from, which should 2708 * always be the user's requested size. 2709 */ 2710 intel_de_write(dev_priv, PIPESRC(pipe), 2711 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2712 2713 if (!crtc_state->enable_psr2_su_region_et) 2714 return; 2715 2716 width = drm_rect_width(&crtc_state->psr2_su_area); 2717 height = drm_rect_height(&crtc_state->psr2_su_area); 2718 2719 intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe), 2720 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2721 } 2722 2723 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 2724 { 2725 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2726 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2727 2728 if (DISPLAY_VER(dev_priv) == 2) 2729 return false; 2730 2731 if (DISPLAY_VER(dev_priv) >= 9 || 2732 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2733 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; 2734 else 2735 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; 2736 } 2737 2738 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 2739 struct intel_crtc_state *pipe_config) 2740 { 2741 struct drm_device *dev = crtc->base.dev; 2742 struct drm_i915_private *dev_priv = to_i915(dev); 2743 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2744 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2745 u32 tmp; 2746 2747 tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)); 2748 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; 2749 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; 2750 2751 if (!transcoder_is_dsi(cpu_transcoder)) { 2752 tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)); 2753 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; 2754 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; 2755 } 2756 2757 tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)); 2758 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; 2759 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; 2760 2761 tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); 2762 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; 2763 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; 2764 2765 /* FIXME TGL+ DSI transcoders have this! */ 2766 if (!transcoder_is_dsi(cpu_transcoder)) { 2767 tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); 2768 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; 2769 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; 2770 } 2771 tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); 2772 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; 2773 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; 2774 2775 if (intel_pipe_is_interlaced(pipe_config)) { 2776 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; 2777 adjusted_mode->crtc_vtotal += 1; 2778 adjusted_mode->crtc_vblank_end += 1; 2779 } 2780 2781 if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) 2782 adjusted_mode->crtc_vblank_start = 2783 adjusted_mode->crtc_vdisplay + 2784 intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder)); 2785 } 2786 2787 static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) 2788 { 2789 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2790 int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 2791 enum pipe master_pipe, pipe = crtc->pipe; 2792 int width; 2793 2794 if (num_pipes < 2) 2795 return; 2796 2797 master_pipe = bigjoiner_master_pipe(crtc_state); 2798 width = drm_rect_width(&crtc_state->pipe_src); 2799 2800 drm_rect_translate_to(&crtc_state->pipe_src, 2801 (pipe - master_pipe) * width, 0); 2802 } 2803 2804 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 2805 struct intel_crtc_state *pipe_config) 2806 { 2807 struct drm_device *dev = crtc->base.dev; 2808 struct drm_i915_private *dev_priv = to_i915(dev); 2809 u32 tmp; 2810 2811 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); 2812 2813 drm_rect_init(&pipe_config->pipe_src, 0, 0, 2814 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, 2815 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); 2816 2817 intel_bigjoiner_adjust_pipe_src(pipe_config); 2818 } 2819 2820 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 2821 { 2822 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2823 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2824 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2825 u32 val = 0; 2826 2827 /* 2828 * - We keep both pipes enabled on 830 2829 * - During modeset the pipe is still disabled and must remain so 2830 * - During fastset the pipe is already enabled and must remain so 2831 */ 2832 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) 2833 val |= TRANSCONF_ENABLE; 2834 2835 if (crtc_state->double_wide) 2836 val |= TRANSCONF_DOUBLE_WIDE; 2837 2838 /* only g4x and later have fancy bpc/dither controls */ 2839 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2840 IS_CHERRYVIEW(dev_priv)) { 2841 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 2842 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 2843 val |= TRANSCONF_DITHER_EN | 2844 TRANSCONF_DITHER_TYPE_SP; 2845 2846 switch (crtc_state->pipe_bpp) { 2847 default: 2848 /* Case prevented by intel_choose_pipe_bpp_dither. */ 2849 MISSING_CASE(crtc_state->pipe_bpp); 2850 fallthrough; 2851 case 18: 2852 val |= TRANSCONF_BPC_6; 2853 break; 2854 case 24: 2855 val |= TRANSCONF_BPC_8; 2856 break; 2857 case 30: 2858 val |= TRANSCONF_BPC_10; 2859 break; 2860 } 2861 } 2862 2863 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 2864 if (DISPLAY_VER(dev_priv) < 4 || 2865 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2866 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; 2867 else 2868 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; 2869 } else { 2870 val |= TRANSCONF_INTERLACE_PROGRESSIVE; 2871 } 2872 2873 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2874 crtc_state->limited_color_range) 2875 val |= TRANSCONF_COLOR_RANGE_SELECT; 2876 2877 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 2878 2879 if (crtc_state->wgc_enable) 2880 val |= TRANSCONF_WGC_ENABLE; 2881 2882 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 2883 2884 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 2885 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 2886 } 2887 2888 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 2889 { 2890 if (IS_I830(dev_priv)) 2891 return false; 2892 2893 return DISPLAY_VER(dev_priv) >= 4 || 2894 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 2895 } 2896 2897 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 2898 { 2899 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2900 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2901 enum pipe pipe; 2902 u32 tmp; 2903 2904 if (!i9xx_has_pfit(dev_priv)) 2905 return; 2906 2907 tmp = intel_de_read(dev_priv, PFIT_CONTROL); 2908 if (!(tmp & PFIT_ENABLE)) 2909 return; 2910 2911 /* Check whether the pfit is attached to our pipe. */ 2912 if (DISPLAY_VER(dev_priv) >= 4) 2913 pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp); 2914 else 2915 pipe = PIPE_B; 2916 2917 if (pipe != crtc->pipe) 2918 return; 2919 2920 crtc_state->gmch_pfit.control = tmp; 2921 crtc_state->gmch_pfit.pgm_ratios = 2922 intel_de_read(dev_priv, PFIT_PGM_RATIOS); 2923 } 2924 2925 static enum intel_output_format 2926 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) 2927 { 2928 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2929 u32 tmp; 2930 2931 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 2932 2933 if (tmp & PIPE_MISC_YUV420_ENABLE) { 2934 /* We support 4:2:0 in full blend mode only */ 2935 drm_WARN_ON(&dev_priv->drm, 2936 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); 2937 2938 return INTEL_OUTPUT_FORMAT_YCBCR420; 2939 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { 2940 return INTEL_OUTPUT_FORMAT_YCBCR444; 2941 } else { 2942 return INTEL_OUTPUT_FORMAT_RGB; 2943 } 2944 } 2945 2946 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 2947 struct intel_crtc_state *pipe_config) 2948 { 2949 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2950 enum intel_display_power_domain power_domain; 2951 intel_wakeref_t wakeref; 2952 u32 tmp; 2953 bool ret; 2954 2955 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 2956 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 2957 if (!wakeref) 2958 return false; 2959 2960 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2961 pipe_config->sink_format = pipe_config->output_format; 2962 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 2963 pipe_config->shared_dpll = NULL; 2964 2965 ret = false; 2966 2967 tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 2968 if (!(tmp & TRANSCONF_ENABLE)) 2969 goto out; 2970 2971 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2972 IS_CHERRYVIEW(dev_priv)) { 2973 switch (tmp & TRANSCONF_BPC_MASK) { 2974 case TRANSCONF_BPC_6: 2975 pipe_config->pipe_bpp = 18; 2976 break; 2977 case TRANSCONF_BPC_8: 2978 pipe_config->pipe_bpp = 24; 2979 break; 2980 case TRANSCONF_BPC_10: 2981 pipe_config->pipe_bpp = 30; 2982 break; 2983 default: 2984 MISSING_CASE(tmp); 2985 break; 2986 } 2987 } 2988 2989 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2990 (tmp & TRANSCONF_COLOR_RANGE_SELECT)) 2991 pipe_config->limited_color_range = true; 2992 2993 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); 2994 2995 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 2996 2997 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2998 (tmp & TRANSCONF_WGC_ENABLE)) 2999 pipe_config->wgc_enable = true; 3000 3001 intel_color_get_config(pipe_config); 3002 3003 if (DISPLAY_VER(dev_priv) < 4) 3004 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; 3005 3006 intel_get_transcoder_timings(crtc, pipe_config); 3007 intel_get_pipe_src_size(crtc, pipe_config); 3008 3009 i9xx_get_pfit_config(pipe_config); 3010 3011 if (DISPLAY_VER(dev_priv) >= 4) { 3012 /* No way to read it out on pipes B and C */ 3013 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 3014 tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe]; 3015 else 3016 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); 3017 pipe_config->pixel_multiplier = 3018 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 3019 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 3020 pipe_config->dpll_hw_state.dpll_md = tmp; 3021 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 3022 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 3023 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); 3024 pipe_config->pixel_multiplier = 3025 ((tmp & SDVO_MULTIPLIER_MASK) 3026 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 3027 } else { 3028 /* Note that on i915G/GM the pixel multiplier is in the sdvo 3029 * port and will be fixed up in the encoder->get_config 3030 * function. */ 3031 pipe_config->pixel_multiplier = 1; 3032 } 3033 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, 3034 DPLL(crtc->pipe)); 3035 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 3036 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, 3037 FP0(crtc->pipe)); 3038 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, 3039 FP1(crtc->pipe)); 3040 } else { 3041 /* Mask out read-only status bits. */ 3042 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 3043 DPLL_PORTC_READY_MASK | 3044 DPLL_PORTB_READY_MASK); 3045 } 3046 3047 if (IS_CHERRYVIEW(dev_priv)) 3048 chv_crtc_clock_get(crtc, pipe_config); 3049 else if (IS_VALLEYVIEW(dev_priv)) 3050 vlv_crtc_clock_get(crtc, pipe_config); 3051 else 3052 i9xx_crtc_clock_get(crtc, pipe_config); 3053 3054 /* 3055 * Normally the dotclock is filled in by the encoder .get_config() 3056 * but in case the pipe is enabled w/o any ports we need a sane 3057 * default. 3058 */ 3059 pipe_config->hw.adjusted_mode.crtc_clock = 3060 pipe_config->port_clock / pipe_config->pixel_multiplier; 3061 3062 ret = true; 3063 3064 out: 3065 intel_display_power_put(dev_priv, power_domain, wakeref); 3066 3067 return ret; 3068 } 3069 3070 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3071 { 3072 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3073 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3074 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3075 u32 val = 0; 3076 3077 /* 3078 * - During modeset the pipe is still disabled and must remain so 3079 * - During fastset the pipe is already enabled and must remain so 3080 */ 3081 if (!intel_crtc_needs_modeset(crtc_state)) 3082 val |= TRANSCONF_ENABLE; 3083 3084 switch (crtc_state->pipe_bpp) { 3085 default: 3086 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3087 MISSING_CASE(crtc_state->pipe_bpp); 3088 fallthrough; 3089 case 18: 3090 val |= TRANSCONF_BPC_6; 3091 break; 3092 case 24: 3093 val |= TRANSCONF_BPC_8; 3094 break; 3095 case 30: 3096 val |= TRANSCONF_BPC_10; 3097 break; 3098 case 36: 3099 val |= TRANSCONF_BPC_12; 3100 break; 3101 } 3102 3103 if (crtc_state->dither) 3104 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3105 3106 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3107 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3108 else 3109 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3110 3111 /* 3112 * This would end up with an odd purple hue over 3113 * the entire display. Make sure we don't do it. 3114 */ 3115 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 3116 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3117 3118 if (crtc_state->limited_color_range && 3119 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3120 val |= TRANSCONF_COLOR_RANGE_SELECT; 3121 3122 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3123 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; 3124 3125 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3126 3127 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3128 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3129 3130 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 3131 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 3132 } 3133 3134 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3135 { 3136 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3137 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3138 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3139 u32 val = 0; 3140 3141 /* 3142 * - During modeset the pipe is still disabled and must remain so 3143 * - During fastset the pipe is already enabled and must remain so 3144 */ 3145 if (!intel_crtc_needs_modeset(crtc_state)) 3146 val |= TRANSCONF_ENABLE; 3147 3148 if (IS_HASWELL(dev_priv) && crtc_state->dither) 3149 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3150 3151 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3152 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3153 else 3154 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3155 3156 if (IS_HASWELL(dev_priv) && 3157 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3158 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; 3159 3160 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 3161 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 3162 } 3163 3164 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) 3165 { 3166 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3167 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3168 u32 val = 0; 3169 3170 switch (crtc_state->pipe_bpp) { 3171 case 18: 3172 val |= PIPE_MISC_BPC_6; 3173 break; 3174 case 24: 3175 val |= PIPE_MISC_BPC_8; 3176 break; 3177 case 30: 3178 val |= PIPE_MISC_BPC_10; 3179 break; 3180 case 36: 3181 /* Port output 12BPC defined for ADLP+ */ 3182 if (DISPLAY_VER(dev_priv) >= 13) 3183 val |= PIPE_MISC_BPC_12_ADLP; 3184 break; 3185 default: 3186 MISSING_CASE(crtc_state->pipe_bpp); 3187 break; 3188 } 3189 3190 if (crtc_state->dither) 3191 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; 3192 3193 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3194 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3195 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; 3196 3197 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3198 val |= PIPE_MISC_YUV420_ENABLE | 3199 PIPE_MISC_YUV420_MODE_FULL_BLEND; 3200 3201 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) 3202 val |= PIPE_MISC_HDR_MODE_PRECISION; 3203 3204 if (DISPLAY_VER(dev_priv) >= 12) 3205 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; 3206 3207 /* allow PSR with sprite enabled */ 3208 if (IS_BROADWELL(dev_priv)) 3209 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; 3210 3211 intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); 3212 } 3213 3214 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) 3215 { 3216 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3217 u32 tmp; 3218 3219 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3220 3221 switch (tmp & PIPE_MISC_BPC_MASK) { 3222 case PIPE_MISC_BPC_6: 3223 return 18; 3224 case PIPE_MISC_BPC_8: 3225 return 24; 3226 case PIPE_MISC_BPC_10: 3227 return 30; 3228 /* 3229 * PORT OUTPUT 12 BPC defined for ADLP+. 3230 * 3231 * TODO: 3232 * For previous platforms with DSI interface, bits 5:7 3233 * are used for storing pipe_bpp irrespective of dithering. 3234 * Since the value of 12 BPC is not defined for these bits 3235 * on older platforms, need to find a workaround for 12 BPC 3236 * MIPI DSI HW readout. 3237 */ 3238 case PIPE_MISC_BPC_12_ADLP: 3239 if (DISPLAY_VER(dev_priv) >= 13) 3240 return 36; 3241 fallthrough; 3242 default: 3243 MISSING_CASE(tmp); 3244 return 0; 3245 } 3246 } 3247 3248 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3249 { 3250 /* 3251 * Account for spread spectrum to avoid 3252 * oversubscribing the link. Max center spread 3253 * is 2.5%; use 5% for safety's sake. 3254 */ 3255 u32 bps = target_clock * bpp * 21 / 20; 3256 return DIV_ROUND_UP(bps, link_bw * 8); 3257 } 3258 3259 void intel_get_m_n(struct drm_i915_private *i915, 3260 struct intel_link_m_n *m_n, 3261 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 3262 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3263 { 3264 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; 3265 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; 3266 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; 3267 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; 3268 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; 3269 } 3270 3271 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3272 enum transcoder transcoder, 3273 struct intel_link_m_n *m_n) 3274 { 3275 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3276 enum pipe pipe = crtc->pipe; 3277 3278 if (DISPLAY_VER(dev_priv) >= 5) 3279 intel_get_m_n(dev_priv, m_n, 3280 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), 3281 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); 3282 else 3283 intel_get_m_n(dev_priv, m_n, 3284 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 3285 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3286 } 3287 3288 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 3289 enum transcoder transcoder, 3290 struct intel_link_m_n *m_n) 3291 { 3292 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3293 3294 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 3295 return; 3296 3297 intel_get_m_n(dev_priv, m_n, 3298 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), 3299 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); 3300 } 3301 3302 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 3303 { 3304 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3305 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3306 u32 ctl, pos, size; 3307 enum pipe pipe; 3308 3309 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 3310 if ((ctl & PF_ENABLE) == 0) 3311 return; 3312 3313 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 3314 pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl); 3315 else 3316 pipe = crtc->pipe; 3317 3318 crtc_state->pch_pfit.enabled = true; 3319 3320 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 3321 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 3322 3323 drm_rect_init(&crtc_state->pch_pfit.dst, 3324 REG_FIELD_GET(PF_WIN_XPOS_MASK, pos), 3325 REG_FIELD_GET(PF_WIN_YPOS_MASK, pos), 3326 REG_FIELD_GET(PF_WIN_XSIZE_MASK, size), 3327 REG_FIELD_GET(PF_WIN_YSIZE_MASK, size)); 3328 3329 /* 3330 * We currently do not free assignements of panel fitters on 3331 * ivb/hsw (since we don't use the higher upscaling modes which 3332 * differentiates them) so just WARN about this case for now. 3333 */ 3334 drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe); 3335 } 3336 3337 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3338 struct intel_crtc_state *pipe_config) 3339 { 3340 struct drm_device *dev = crtc->base.dev; 3341 struct drm_i915_private *dev_priv = to_i915(dev); 3342 enum intel_display_power_domain power_domain; 3343 intel_wakeref_t wakeref; 3344 u32 tmp; 3345 bool ret; 3346 3347 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3348 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3349 if (!wakeref) 3350 return false; 3351 3352 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3353 pipe_config->shared_dpll = NULL; 3354 3355 ret = false; 3356 tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 3357 if (!(tmp & TRANSCONF_ENABLE)) 3358 goto out; 3359 3360 switch (tmp & TRANSCONF_BPC_MASK) { 3361 case TRANSCONF_BPC_6: 3362 pipe_config->pipe_bpp = 18; 3363 break; 3364 case TRANSCONF_BPC_8: 3365 pipe_config->pipe_bpp = 24; 3366 break; 3367 case TRANSCONF_BPC_10: 3368 pipe_config->pipe_bpp = 30; 3369 break; 3370 case TRANSCONF_BPC_12: 3371 pipe_config->pipe_bpp = 36; 3372 break; 3373 default: 3374 break; 3375 } 3376 3377 if (tmp & TRANSCONF_COLOR_RANGE_SELECT) 3378 pipe_config->limited_color_range = true; 3379 3380 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { 3381 case TRANSCONF_OUTPUT_COLORSPACE_YUV601: 3382 case TRANSCONF_OUTPUT_COLORSPACE_YUV709: 3383 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3384 break; 3385 default: 3386 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3387 break; 3388 } 3389 3390 pipe_config->sink_format = pipe_config->output_format; 3391 3392 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); 3393 3394 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3395 3396 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); 3397 3398 intel_color_get_config(pipe_config); 3399 3400 pipe_config->pixel_multiplier = 1; 3401 3402 ilk_pch_get_config(pipe_config); 3403 3404 intel_get_transcoder_timings(crtc, pipe_config); 3405 intel_get_pipe_src_size(crtc, pipe_config); 3406 3407 ilk_get_pfit_config(pipe_config); 3408 3409 ret = true; 3410 3411 out: 3412 intel_display_power_put(dev_priv, power_domain, wakeref); 3413 3414 return ret; 3415 } 3416 3417 static u8 bigjoiner_pipes(struct drm_i915_private *i915) 3418 { 3419 u8 pipes; 3420 3421 if (DISPLAY_VER(i915) >= 12) 3422 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3423 else if (DISPLAY_VER(i915) >= 11) 3424 pipes = BIT(PIPE_B) | BIT(PIPE_C); 3425 else 3426 pipes = 0; 3427 3428 return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask; 3429 } 3430 3431 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, 3432 enum transcoder cpu_transcoder) 3433 { 3434 enum intel_display_power_domain power_domain; 3435 intel_wakeref_t wakeref; 3436 u32 tmp = 0; 3437 3438 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3439 3440 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3441 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 3442 3443 return tmp & TRANS_DDI_FUNC_ENABLE; 3444 } 3445 3446 static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv, 3447 u8 *master_pipes, u8 *slave_pipes) 3448 { 3449 struct intel_crtc *crtc; 3450 3451 *master_pipes = 0; 3452 *slave_pipes = 0; 3453 3454 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, 3455 bigjoiner_pipes(dev_priv)) { 3456 enum intel_display_power_domain power_domain; 3457 enum pipe pipe = crtc->pipe; 3458 intel_wakeref_t wakeref; 3459 3460 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); 3461 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 3462 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 3463 3464 if (!(tmp & BIG_JOINER_ENABLE)) 3465 continue; 3466 3467 if (tmp & MASTER_BIG_JOINER_ENABLE) 3468 *master_pipes |= BIT(pipe); 3469 else 3470 *slave_pipes |= BIT(pipe); 3471 } 3472 3473 if (DISPLAY_VER(dev_priv) < 13) 3474 continue; 3475 3476 power_domain = POWER_DOMAIN_PIPE(pipe); 3477 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 3478 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 3479 3480 if (tmp & UNCOMPRESSED_JOINER_MASTER) 3481 *master_pipes |= BIT(pipe); 3482 if (tmp & UNCOMPRESSED_JOINER_SLAVE) 3483 *slave_pipes |= BIT(pipe); 3484 } 3485 } 3486 3487 /* Bigjoiner pipes should always be consecutive master and slave */ 3488 drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1, 3489 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", 3490 *master_pipes, *slave_pipes); 3491 } 3492 3493 static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes) 3494 { 3495 if ((slave_pipes & BIT(pipe)) == 0) 3496 return pipe; 3497 3498 /* ignore everything above our pipe */ 3499 master_pipes &= ~GENMASK(7, pipe); 3500 3501 /* highest remaining bit should be our master pipe */ 3502 return fls(master_pipes) - 1; 3503 } 3504 3505 static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes) 3506 { 3507 enum pipe master_pipe, next_master_pipe; 3508 3509 master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes); 3510 3511 if ((master_pipes & BIT(master_pipe)) == 0) 3512 return 0; 3513 3514 /* ignore our master pipe and everything below it */ 3515 master_pipes &= ~GENMASK(master_pipe, 0); 3516 /* make sure a high bit is set for the ffs() */ 3517 master_pipes |= BIT(7); 3518 /* lowest remaining bit should be the next master pipe */ 3519 next_master_pipe = ffs(master_pipes) - 1; 3520 3521 return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe); 3522 } 3523 3524 static u8 hsw_panel_transcoders(struct drm_i915_private *i915) 3525 { 3526 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 3527 3528 if (DISPLAY_VER(i915) >= 11) 3529 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 3530 3531 return panel_transcoder_mask; 3532 } 3533 3534 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3535 { 3536 struct drm_device *dev = crtc->base.dev; 3537 struct drm_i915_private *dev_priv = to_i915(dev); 3538 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); 3539 enum transcoder cpu_transcoder; 3540 u8 master_pipes, slave_pipes; 3541 u8 enabled_transcoders = 0; 3542 3543 /* 3544 * XXX: Do intel_display_power_get_if_enabled before reading this (for 3545 * consistency and less surprising code; it's in always on power). 3546 */ 3547 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, 3548 panel_transcoder_mask) { 3549 enum intel_display_power_domain power_domain; 3550 intel_wakeref_t wakeref; 3551 enum pipe trans_pipe; 3552 u32 tmp = 0; 3553 3554 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3555 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3556 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 3557 3558 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 3559 continue; 3560 3561 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 3562 default: 3563 drm_WARN(dev, 1, 3564 "unknown pipe linked to transcoder %s\n", 3565 transcoder_name(cpu_transcoder)); 3566 fallthrough; 3567 case TRANS_DDI_EDP_INPUT_A_ONOFF: 3568 case TRANS_DDI_EDP_INPUT_A_ON: 3569 trans_pipe = PIPE_A; 3570 break; 3571 case TRANS_DDI_EDP_INPUT_B_ONOFF: 3572 trans_pipe = PIPE_B; 3573 break; 3574 case TRANS_DDI_EDP_INPUT_C_ONOFF: 3575 trans_pipe = PIPE_C; 3576 break; 3577 case TRANS_DDI_EDP_INPUT_D_ONOFF: 3578 trans_pipe = PIPE_D; 3579 break; 3580 } 3581 3582 if (trans_pipe == crtc->pipe) 3583 enabled_transcoders |= BIT(cpu_transcoder); 3584 } 3585 3586 /* single pipe or bigjoiner master */ 3587 cpu_transcoder = (enum transcoder) crtc->pipe; 3588 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3589 enabled_transcoders |= BIT(cpu_transcoder); 3590 3591 /* bigjoiner slave -> consider the master pipe's transcoder as well */ 3592 enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes); 3593 if (slave_pipes & BIT(crtc->pipe)) { 3594 cpu_transcoder = (enum transcoder) 3595 get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes); 3596 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3597 enabled_transcoders |= BIT(cpu_transcoder); 3598 } 3599 3600 return enabled_transcoders; 3601 } 3602 3603 static bool has_edp_transcoders(u8 enabled_transcoders) 3604 { 3605 return enabled_transcoders & BIT(TRANSCODER_EDP); 3606 } 3607 3608 static bool has_dsi_transcoders(u8 enabled_transcoders) 3609 { 3610 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 3611 BIT(TRANSCODER_DSI_1)); 3612 } 3613 3614 static bool has_pipe_transcoders(u8 enabled_transcoders) 3615 { 3616 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 3617 BIT(TRANSCODER_DSI_0) | 3618 BIT(TRANSCODER_DSI_1)); 3619 } 3620 3621 static void assert_enabled_transcoders(struct drm_i915_private *i915, 3622 u8 enabled_transcoders) 3623 { 3624 /* Only one type of transcoder please */ 3625 drm_WARN_ON(&i915->drm, 3626 has_edp_transcoders(enabled_transcoders) + 3627 has_dsi_transcoders(enabled_transcoders) + 3628 has_pipe_transcoders(enabled_transcoders) > 1); 3629 3630 /* Only DSI transcoders can be ganged */ 3631 drm_WARN_ON(&i915->drm, 3632 !has_dsi_transcoders(enabled_transcoders) && 3633 !is_power_of_2(enabled_transcoders)); 3634 } 3635 3636 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 3637 struct intel_crtc_state *pipe_config, 3638 struct intel_display_power_domain_set *power_domain_set) 3639 { 3640 struct drm_device *dev = crtc->base.dev; 3641 struct drm_i915_private *dev_priv = to_i915(dev); 3642 unsigned long enabled_transcoders; 3643 u32 tmp; 3644 3645 enabled_transcoders = hsw_enabled_transcoders(crtc); 3646 if (!enabled_transcoders) 3647 return false; 3648 3649 assert_enabled_transcoders(dev_priv, enabled_transcoders); 3650 3651 /* 3652 * With the exception of DSI we should only ever have 3653 * a single enabled transcoder. With DSI let's just 3654 * pick the first one. 3655 */ 3656 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 3657 3658 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 3659 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 3660 return false; 3661 3662 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { 3663 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 3664 3665 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 3666 pipe_config->pch_pfit.force_thru = true; 3667 } 3668 3669 tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 3670 3671 return tmp & TRANSCONF_ENABLE; 3672 } 3673 3674 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 3675 struct intel_crtc_state *pipe_config, 3676 struct intel_display_power_domain_set *power_domain_set) 3677 { 3678 struct drm_device *dev = crtc->base.dev; 3679 struct drm_i915_private *dev_priv = to_i915(dev); 3680 enum transcoder cpu_transcoder; 3681 enum port port; 3682 u32 tmp; 3683 3684 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 3685 if (port == PORT_A) 3686 cpu_transcoder = TRANSCODER_DSI_A; 3687 else 3688 cpu_transcoder = TRANSCODER_DSI_C; 3689 3690 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 3691 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 3692 continue; 3693 3694 /* 3695 * The PLL needs to be enabled with a valid divider 3696 * configuration, otherwise accessing DSI registers will hang 3697 * the machine. See BSpec North Display Engine 3698 * registers/MIPI[BXT]. We can break out here early, since we 3699 * need the same DSI PLL to be enabled for both DSI ports. 3700 */ 3701 if (!bxt_dsi_pll_is_enabled(dev_priv)) 3702 break; 3703 3704 /* XXX: this works for video mode only */ 3705 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); 3706 if (!(tmp & DPI_ENABLE)) 3707 continue; 3708 3709 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 3710 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 3711 continue; 3712 3713 pipe_config->cpu_transcoder = cpu_transcoder; 3714 break; 3715 } 3716 3717 return transcoder_is_dsi(pipe_config->cpu_transcoder); 3718 } 3719 3720 static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state) 3721 { 3722 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3723 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3724 u8 master_pipes, slave_pipes; 3725 enum pipe pipe = crtc->pipe; 3726 3727 enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes); 3728 3729 if (((master_pipes | slave_pipes) & BIT(pipe)) == 0) 3730 return; 3731 3732 crtc_state->bigjoiner_pipes = 3733 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) | 3734 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes); 3735 } 3736 3737 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 3738 struct intel_crtc_state *pipe_config) 3739 { 3740 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3741 bool active; 3742 u32 tmp; 3743 3744 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 3745 POWER_DOMAIN_PIPE(crtc->pipe))) 3746 return false; 3747 3748 pipe_config->shared_dpll = NULL; 3749 3750 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains); 3751 3752 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 3753 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) { 3754 drm_WARN_ON(&dev_priv->drm, active); 3755 active = true; 3756 } 3757 3758 if (!active) 3759 goto out; 3760 3761 intel_bigjoiner_get_config(pipe_config); 3762 intel_dsc_get_config(pipe_config); 3763 3764 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 3765 DISPLAY_VER(dev_priv) >= 11) 3766 intel_get_transcoder_timings(crtc, pipe_config); 3767 3768 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 3769 intel_vrr_get_config(pipe_config); 3770 3771 intel_get_pipe_src_size(crtc, pipe_config); 3772 3773 if (IS_HASWELL(dev_priv)) { 3774 u32 tmp = intel_de_read(dev_priv, 3775 TRANSCONF(pipe_config->cpu_transcoder)); 3776 3777 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) 3778 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3779 else 3780 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3781 } else { 3782 pipe_config->output_format = 3783 bdw_get_pipe_misc_output_format(crtc); 3784 } 3785 3786 pipe_config->sink_format = pipe_config->output_format; 3787 3788 intel_color_get_config(pipe_config); 3789 3790 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 3791 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 3792 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 3793 pipe_config->ips_linetime = 3794 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 3795 3796 if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 3797 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 3798 if (DISPLAY_VER(dev_priv) >= 9) 3799 skl_scaler_get_config(pipe_config); 3800 else 3801 ilk_get_pfit_config(pipe_config); 3802 } 3803 3804 hsw_ips_get_config(pipe_config); 3805 3806 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 3807 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 3808 pipe_config->pixel_multiplier = 3809 intel_de_read(dev_priv, 3810 TRANS_MULT(pipe_config->cpu_transcoder)) + 1; 3811 } else { 3812 pipe_config->pixel_multiplier = 1; 3813 } 3814 3815 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 3816 tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder)); 3817 3818 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 3819 } else { 3820 /* no idea if this is correct */ 3821 pipe_config->framestart_delay = 1; 3822 } 3823 3824 out: 3825 intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains); 3826 3827 return active; 3828 } 3829 3830 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 3831 { 3832 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3833 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3834 3835 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state)) 3836 return false; 3837 3838 crtc_state->hw.active = true; 3839 3840 intel_crtc_readout_derived_state(crtc_state); 3841 3842 return true; 3843 } 3844 3845 int intel_dotclock_calculate(int link_freq, 3846 const struct intel_link_m_n *m_n) 3847 { 3848 /* 3849 * The calculation for the data clock -> pixel clock is: 3850 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 3851 * But we want to avoid losing precison if possible, so: 3852 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 3853 * 3854 * and for link freq (10kbs units) -> pixel clock it is: 3855 * link_symbol_clock = link_freq * 10 / link_symbol_size 3856 * pixel_clock = (m * link_symbol_clock) / n 3857 * or for more precision: 3858 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size) 3859 */ 3860 3861 if (!m_n->link_n) 3862 return 0; 3863 3864 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10), 3865 m_n->link_n * intel_dp_link_symbol_size(link_freq)); 3866 } 3867 3868 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) 3869 { 3870 int dotclock; 3871 3872 if (intel_crtc_has_dp_encoder(pipe_config)) 3873 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 3874 &pipe_config->dp_m_n); 3875 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) 3876 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, 3877 pipe_config->pipe_bpp); 3878 else 3879 dotclock = pipe_config->port_clock; 3880 3881 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 3882 !intel_crtc_has_dp_encoder(pipe_config)) 3883 dotclock *= 2; 3884 3885 if (pipe_config->pixel_multiplier) 3886 dotclock /= pipe_config->pixel_multiplier; 3887 3888 return dotclock; 3889 } 3890 3891 /* Returns the currently programmed mode of the given encoder. */ 3892 struct drm_display_mode * 3893 intel_encoder_current_mode(struct intel_encoder *encoder) 3894 { 3895 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3896 struct intel_crtc_state *crtc_state; 3897 struct drm_display_mode *mode; 3898 struct intel_crtc *crtc; 3899 enum pipe pipe; 3900 3901 if (!encoder->get_hw_state(encoder, &pipe)) 3902 return NULL; 3903 3904 crtc = intel_crtc_for_pipe(dev_priv, pipe); 3905 3906 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 3907 if (!mode) 3908 return NULL; 3909 3910 crtc_state = intel_crtc_state_alloc(crtc); 3911 if (!crtc_state) { 3912 kfree(mode); 3913 return NULL; 3914 } 3915 3916 if (!intel_crtc_get_pipe_config(crtc_state)) { 3917 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 3918 kfree(mode); 3919 return NULL; 3920 } 3921 3922 intel_encoder_get_config(encoder, crtc_state); 3923 3924 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 3925 3926 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 3927 3928 return mode; 3929 } 3930 3931 static bool encoders_cloneable(const struct intel_encoder *a, 3932 const struct intel_encoder *b) 3933 { 3934 /* masks could be asymmetric, so check both ways */ 3935 return a == b || (a->cloneable & BIT(b->type) && 3936 b->cloneable & BIT(a->type)); 3937 } 3938 3939 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 3940 struct intel_crtc *crtc, 3941 struct intel_encoder *encoder) 3942 { 3943 struct intel_encoder *source_encoder; 3944 struct drm_connector *connector; 3945 struct drm_connector_state *connector_state; 3946 int i; 3947 3948 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 3949 if (connector_state->crtc != &crtc->base) 3950 continue; 3951 3952 source_encoder = 3953 to_intel_encoder(connector_state->best_encoder); 3954 if (!encoders_cloneable(encoder, source_encoder)) 3955 return false; 3956 } 3957 3958 return true; 3959 } 3960 3961 static int icl_add_linked_planes(struct intel_atomic_state *state) 3962 { 3963 struct intel_plane *plane, *linked; 3964 struct intel_plane_state *plane_state, *linked_plane_state; 3965 int i; 3966 3967 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 3968 linked = plane_state->planar_linked_plane; 3969 3970 if (!linked) 3971 continue; 3972 3973 linked_plane_state = intel_atomic_get_plane_state(state, linked); 3974 if (IS_ERR(linked_plane_state)) 3975 return PTR_ERR(linked_plane_state); 3976 3977 drm_WARN_ON(state->base.dev, 3978 linked_plane_state->planar_linked_plane != plane); 3979 drm_WARN_ON(state->base.dev, 3980 linked_plane_state->planar_slave == plane_state->planar_slave); 3981 } 3982 3983 return 0; 3984 } 3985 3986 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 3987 { 3988 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3989 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3990 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 3991 struct intel_plane *plane, *linked; 3992 struct intel_plane_state *plane_state; 3993 int i; 3994 3995 if (DISPLAY_VER(dev_priv) < 11) 3996 return 0; 3997 3998 /* 3999 * Destroy all old plane links and make the slave plane invisible 4000 * in the crtc_state->active_planes mask. 4001 */ 4002 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4003 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 4004 continue; 4005 4006 plane_state->planar_linked_plane = NULL; 4007 if (plane_state->planar_slave && !plane_state->uapi.visible) { 4008 crtc_state->enabled_planes &= ~BIT(plane->id); 4009 crtc_state->active_planes &= ~BIT(plane->id); 4010 crtc_state->update_planes |= BIT(plane->id); 4011 crtc_state->data_rate[plane->id] = 0; 4012 crtc_state->rel_data_rate[plane->id] = 0; 4013 } 4014 4015 plane_state->planar_slave = false; 4016 } 4017 4018 if (!crtc_state->nv12_planes) 4019 return 0; 4020 4021 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4022 struct intel_plane_state *linked_state = NULL; 4023 4024 if (plane->pipe != crtc->pipe || 4025 !(crtc_state->nv12_planes & BIT(plane->id))) 4026 continue; 4027 4028 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 4029 if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 4030 continue; 4031 4032 if (crtc_state->active_planes & BIT(linked->id)) 4033 continue; 4034 4035 linked_state = intel_atomic_get_plane_state(state, linked); 4036 if (IS_ERR(linked_state)) 4037 return PTR_ERR(linked_state); 4038 4039 break; 4040 } 4041 4042 if (!linked_state) { 4043 drm_dbg_kms(&dev_priv->drm, 4044 "Need %d free Y planes for planar YUV\n", 4045 hweight8(crtc_state->nv12_planes)); 4046 4047 return -EINVAL; 4048 } 4049 4050 plane_state->planar_linked_plane = linked; 4051 4052 linked_state->planar_slave = true; 4053 linked_state->planar_linked_plane = plane; 4054 crtc_state->enabled_planes |= BIT(linked->id); 4055 crtc_state->active_planes |= BIT(linked->id); 4056 crtc_state->update_planes |= BIT(linked->id); 4057 crtc_state->data_rate[linked->id] = 4058 crtc_state->data_rate_y[plane->id]; 4059 crtc_state->rel_data_rate[linked->id] = 4060 crtc_state->rel_data_rate_y[plane->id]; 4061 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 4062 linked->base.name, plane->base.name); 4063 4064 /* Copy parameters to slave plane */ 4065 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 4066 linked_state->color_ctl = plane_state->color_ctl; 4067 linked_state->view = plane_state->view; 4068 linked_state->decrypt = plane_state->decrypt; 4069 4070 intel_plane_copy_hw_state(linked_state, plane_state); 4071 linked_state->uapi.src = plane_state->uapi.src; 4072 linked_state->uapi.dst = plane_state->uapi.dst; 4073 4074 if (icl_is_hdr_plane(dev_priv, plane->id)) { 4075 if (linked->id == PLANE_SPRITE5) 4076 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; 4077 else if (linked->id == PLANE_SPRITE4) 4078 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; 4079 else if (linked->id == PLANE_SPRITE3) 4080 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; 4081 else if (linked->id == PLANE_SPRITE2) 4082 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; 4083 else 4084 MISSING_CASE(linked->id); 4085 } 4086 } 4087 4088 return 0; 4089 } 4090 4091 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 4092 { 4093 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 4094 struct intel_atomic_state *state = 4095 to_intel_atomic_state(new_crtc_state->uapi.state); 4096 const struct intel_crtc_state *old_crtc_state = 4097 intel_atomic_get_old_crtc_state(state, crtc); 4098 4099 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 4100 } 4101 4102 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4103 { 4104 const struct drm_display_mode *pipe_mode = 4105 &crtc_state->hw.pipe_mode; 4106 int linetime_wm; 4107 4108 if (!crtc_state->hw.enable) 4109 return 0; 4110 4111 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4112 pipe_mode->crtc_clock); 4113 4114 return min(linetime_wm, 0x1ff); 4115 } 4116 4117 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4118 const struct intel_cdclk_state *cdclk_state) 4119 { 4120 const struct drm_display_mode *pipe_mode = 4121 &crtc_state->hw.pipe_mode; 4122 int linetime_wm; 4123 4124 if (!crtc_state->hw.enable) 4125 return 0; 4126 4127 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4128 cdclk_state->logical.cdclk); 4129 4130 return min(linetime_wm, 0x1ff); 4131 } 4132 4133 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4134 { 4135 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4136 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4137 const struct drm_display_mode *pipe_mode = 4138 &crtc_state->hw.pipe_mode; 4139 int linetime_wm; 4140 4141 if (!crtc_state->hw.enable) 4142 return 0; 4143 4144 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4145 crtc_state->pixel_rate); 4146 4147 /* Display WA #1135: BXT:ALL GLK:ALL */ 4148 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4149 skl_watermark_ipc_enabled(dev_priv)) 4150 linetime_wm /= 2; 4151 4152 return min(linetime_wm, 0x1ff); 4153 } 4154 4155 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4156 struct intel_crtc *crtc) 4157 { 4158 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4159 struct intel_crtc_state *crtc_state = 4160 intel_atomic_get_new_crtc_state(state, crtc); 4161 const struct intel_cdclk_state *cdclk_state; 4162 4163 if (DISPLAY_VER(dev_priv) >= 9) 4164 crtc_state->linetime = skl_linetime_wm(crtc_state); 4165 else 4166 crtc_state->linetime = hsw_linetime_wm(crtc_state); 4167 4168 if (!hsw_crtc_supports_ips(crtc)) 4169 return 0; 4170 4171 cdclk_state = intel_atomic_get_cdclk_state(state); 4172 if (IS_ERR(cdclk_state)) 4173 return PTR_ERR(cdclk_state); 4174 4175 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4176 cdclk_state); 4177 4178 return 0; 4179 } 4180 4181 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4182 struct intel_crtc *crtc) 4183 { 4184 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4185 struct intel_crtc_state *crtc_state = 4186 intel_atomic_get_new_crtc_state(state, crtc); 4187 int ret; 4188 4189 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) && 4190 intel_crtc_needs_modeset(crtc_state) && 4191 !crtc_state->hw.active) 4192 crtc_state->update_wm_post = true; 4193 4194 if (intel_crtc_needs_modeset(crtc_state)) { 4195 ret = intel_dpll_crtc_get_shared_dpll(state, crtc); 4196 if (ret) 4197 return ret; 4198 } 4199 4200 /* 4201 * May need to update pipe gamma enable bits 4202 * when C8 planes are getting enabled/disabled. 4203 */ 4204 if (c8_planes_changed(crtc_state)) 4205 crtc_state->uapi.color_mgmt_changed = true; 4206 4207 if (intel_crtc_needs_color_update(crtc_state)) { 4208 ret = intel_color_check(crtc_state); 4209 if (ret) 4210 return ret; 4211 } 4212 4213 ret = intel_compute_pipe_wm(state, crtc); 4214 if (ret) { 4215 drm_dbg_kms(&dev_priv->drm, 4216 "Target pipe watermarks are invalid\n"); 4217 return ret; 4218 } 4219 4220 /* 4221 * Calculate 'intermediate' watermarks that satisfy both the 4222 * old state and the new state. We can program these 4223 * immediately. 4224 */ 4225 ret = intel_compute_intermediate_wm(state, crtc); 4226 if (ret) { 4227 drm_dbg_kms(&dev_priv->drm, 4228 "No valid intermediate pipe watermarks are possible\n"); 4229 return ret; 4230 } 4231 4232 if (DISPLAY_VER(dev_priv) >= 9) { 4233 if (intel_crtc_needs_modeset(crtc_state) || 4234 intel_crtc_needs_fastset(crtc_state)) { 4235 ret = skl_update_scaler_crtc(crtc_state); 4236 if (ret) 4237 return ret; 4238 } 4239 4240 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); 4241 if (ret) 4242 return ret; 4243 } 4244 4245 if (HAS_IPS(dev_priv)) { 4246 ret = hsw_ips_compute_config(state, crtc); 4247 if (ret) 4248 return ret; 4249 } 4250 4251 if (DISPLAY_VER(dev_priv) >= 9 || 4252 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 4253 ret = hsw_compute_linetime_wm(state, crtc); 4254 if (ret) 4255 return ret; 4256 4257 } 4258 4259 ret = intel_psr2_sel_fetch_update(state, crtc); 4260 if (ret) 4261 return ret; 4262 4263 return 0; 4264 } 4265 4266 static int 4267 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 4268 struct intel_crtc_state *crtc_state) 4269 { 4270 struct drm_connector *connector = conn_state->connector; 4271 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 4272 const struct drm_display_info *info = &connector->display_info; 4273 int bpp; 4274 4275 switch (conn_state->max_bpc) { 4276 case 6 ... 7: 4277 bpp = 6 * 3; 4278 break; 4279 case 8 ... 9: 4280 bpp = 8 * 3; 4281 break; 4282 case 10 ... 11: 4283 bpp = 10 * 3; 4284 break; 4285 case 12 ... 16: 4286 bpp = 12 * 3; 4287 break; 4288 default: 4289 MISSING_CASE(conn_state->max_bpc); 4290 return -EINVAL; 4291 } 4292 4293 if (bpp < crtc_state->pipe_bpp) { 4294 drm_dbg_kms(&i915->drm, 4295 "[CONNECTOR:%d:%s] Limiting display bpp to %d " 4296 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", 4297 connector->base.id, connector->name, 4298 bpp, 3 * info->bpc, 4299 3 * conn_state->max_requested_bpc, 4300 crtc_state->pipe_bpp); 4301 4302 crtc_state->pipe_bpp = bpp; 4303 } 4304 4305 return 0; 4306 } 4307 4308 static int 4309 compute_baseline_pipe_bpp(struct intel_atomic_state *state, 4310 struct intel_crtc *crtc) 4311 { 4312 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4313 struct intel_crtc_state *crtc_state = 4314 intel_atomic_get_new_crtc_state(state, crtc); 4315 struct drm_connector *connector; 4316 struct drm_connector_state *connector_state; 4317 int bpp, i; 4318 4319 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 4320 IS_CHERRYVIEW(dev_priv))) 4321 bpp = 10*3; 4322 else if (DISPLAY_VER(dev_priv) >= 5) 4323 bpp = 12*3; 4324 else 4325 bpp = 8*3; 4326 4327 crtc_state->pipe_bpp = bpp; 4328 4329 /* Clamp display bpp to connector max bpp */ 4330 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4331 int ret; 4332 4333 if (connector_state->crtc != &crtc->base) 4334 continue; 4335 4336 ret = compute_sink_pipe_bpp(connector_state, crtc_state); 4337 if (ret) 4338 return ret; 4339 } 4340 4341 return 0; 4342 } 4343 4344 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 4345 { 4346 struct drm_device *dev = state->base.dev; 4347 struct drm_connector *connector; 4348 struct drm_connector_list_iter conn_iter; 4349 unsigned int used_ports = 0; 4350 unsigned int used_mst_ports = 0; 4351 bool ret = true; 4352 4353 /* 4354 * We're going to peek into connector->state, 4355 * hence connection_mutex must be held. 4356 */ 4357 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 4358 4359 /* 4360 * Walk the connector list instead of the encoder 4361 * list to detect the problem on ddi platforms 4362 * where there's just one encoder per digital port. 4363 */ 4364 drm_connector_list_iter_begin(dev, &conn_iter); 4365 drm_for_each_connector_iter(connector, &conn_iter) { 4366 struct drm_connector_state *connector_state; 4367 struct intel_encoder *encoder; 4368 4369 connector_state = 4370 drm_atomic_get_new_connector_state(&state->base, 4371 connector); 4372 if (!connector_state) 4373 connector_state = connector->state; 4374 4375 if (!connector_state->best_encoder) 4376 continue; 4377 4378 encoder = to_intel_encoder(connector_state->best_encoder); 4379 4380 drm_WARN_ON(dev, !connector_state->crtc); 4381 4382 switch (encoder->type) { 4383 case INTEL_OUTPUT_DDI: 4384 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 4385 break; 4386 fallthrough; 4387 case INTEL_OUTPUT_DP: 4388 case INTEL_OUTPUT_HDMI: 4389 case INTEL_OUTPUT_EDP: 4390 /* the same port mustn't appear more than once */ 4391 if (used_ports & BIT(encoder->port)) 4392 ret = false; 4393 4394 used_ports |= BIT(encoder->port); 4395 break; 4396 case INTEL_OUTPUT_DP_MST: 4397 used_mst_ports |= 4398 1 << encoder->port; 4399 break; 4400 default: 4401 break; 4402 } 4403 } 4404 drm_connector_list_iter_end(&conn_iter); 4405 4406 /* can't mix MST and SST/HDMI on the same port */ 4407 if (used_ports & used_mst_ports) 4408 return false; 4409 4410 return ret; 4411 } 4412 4413 static void 4414 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 4415 struct intel_crtc *crtc) 4416 { 4417 struct intel_crtc_state *crtc_state = 4418 intel_atomic_get_new_crtc_state(state, crtc); 4419 4420 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 4421 4422 drm_property_replace_blob(&crtc_state->hw.degamma_lut, 4423 crtc_state->uapi.degamma_lut); 4424 drm_property_replace_blob(&crtc_state->hw.gamma_lut, 4425 crtc_state->uapi.gamma_lut); 4426 drm_property_replace_blob(&crtc_state->hw.ctm, 4427 crtc_state->uapi.ctm); 4428 } 4429 4430 static void 4431 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 4432 struct intel_crtc *crtc) 4433 { 4434 struct intel_crtc_state *crtc_state = 4435 intel_atomic_get_new_crtc_state(state, crtc); 4436 4437 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 4438 4439 crtc_state->hw.enable = crtc_state->uapi.enable; 4440 crtc_state->hw.active = crtc_state->uapi.active; 4441 drm_mode_copy(&crtc_state->hw.mode, 4442 &crtc_state->uapi.mode); 4443 drm_mode_copy(&crtc_state->hw.adjusted_mode, 4444 &crtc_state->uapi.adjusted_mode); 4445 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 4446 4447 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 4448 } 4449 4450 static void 4451 copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state, 4452 struct intel_crtc *slave_crtc) 4453 { 4454 struct intel_crtc_state *slave_crtc_state = 4455 intel_atomic_get_new_crtc_state(state, slave_crtc); 4456 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); 4457 const struct intel_crtc_state *master_crtc_state = 4458 intel_atomic_get_new_crtc_state(state, master_crtc); 4459 4460 drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut, 4461 master_crtc_state->hw.degamma_lut); 4462 drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut, 4463 master_crtc_state->hw.gamma_lut); 4464 drm_property_replace_blob(&slave_crtc_state->hw.ctm, 4465 master_crtc_state->hw.ctm); 4466 4467 slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed; 4468 } 4469 4470 static int 4471 copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, 4472 struct intel_crtc *slave_crtc) 4473 { 4474 struct intel_crtc_state *slave_crtc_state = 4475 intel_atomic_get_new_crtc_state(state, slave_crtc); 4476 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); 4477 const struct intel_crtc_state *master_crtc_state = 4478 intel_atomic_get_new_crtc_state(state, master_crtc); 4479 struct intel_crtc_state *saved_state; 4480 4481 WARN_ON(master_crtc_state->bigjoiner_pipes != 4482 slave_crtc_state->bigjoiner_pipes); 4483 4484 saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL); 4485 if (!saved_state) 4486 return -ENOMEM; 4487 4488 /* preserve some things from the slave's original crtc state */ 4489 saved_state->uapi = slave_crtc_state->uapi; 4490 saved_state->scaler_state = slave_crtc_state->scaler_state; 4491 saved_state->shared_dpll = slave_crtc_state->shared_dpll; 4492 saved_state->crc_enabled = slave_crtc_state->crc_enabled; 4493 4494 intel_crtc_free_hw_state(slave_crtc_state); 4495 if (slave_crtc_state->dp_tunnel_ref.tunnel) 4496 drm_dp_tunnel_ref_put(&slave_crtc_state->dp_tunnel_ref); 4497 memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state)); 4498 kfree(saved_state); 4499 4500 /* Re-init hw state */ 4501 memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw)); 4502 slave_crtc_state->hw.enable = master_crtc_state->hw.enable; 4503 slave_crtc_state->hw.active = master_crtc_state->hw.active; 4504 drm_mode_copy(&slave_crtc_state->hw.mode, 4505 &master_crtc_state->hw.mode); 4506 drm_mode_copy(&slave_crtc_state->hw.pipe_mode, 4507 &master_crtc_state->hw.pipe_mode); 4508 drm_mode_copy(&slave_crtc_state->hw.adjusted_mode, 4509 &master_crtc_state->hw.adjusted_mode); 4510 slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter; 4511 4512 if (master_crtc_state->dp_tunnel_ref.tunnel) 4513 drm_dp_tunnel_ref_get(master_crtc_state->dp_tunnel_ref.tunnel, 4514 &slave_crtc_state->dp_tunnel_ref); 4515 4516 copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc); 4517 4518 slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed; 4519 slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed; 4520 slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed; 4521 4522 WARN_ON(master_crtc_state->bigjoiner_pipes != 4523 slave_crtc_state->bigjoiner_pipes); 4524 4525 return 0; 4526 } 4527 4528 static int 4529 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 4530 struct intel_crtc *crtc) 4531 { 4532 struct intel_crtc_state *crtc_state = 4533 intel_atomic_get_new_crtc_state(state, crtc); 4534 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4535 struct intel_crtc_state *saved_state; 4536 4537 saved_state = intel_crtc_state_alloc(crtc); 4538 if (!saved_state) 4539 return -ENOMEM; 4540 4541 /* free the old crtc_state->hw members */ 4542 intel_crtc_free_hw_state(crtc_state); 4543 4544 intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state); 4545 4546 /* FIXME: before the switch to atomic started, a new pipe_config was 4547 * kzalloc'd. Code that depends on any field being zero should be 4548 * fixed, so that the crtc_state can be safely duplicated. For now, 4549 * only fields that are know to not cause problems are preserved. */ 4550 4551 saved_state->uapi = crtc_state->uapi; 4552 saved_state->inherited = crtc_state->inherited; 4553 saved_state->scaler_state = crtc_state->scaler_state; 4554 saved_state->shared_dpll = crtc_state->shared_dpll; 4555 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 4556 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 4557 sizeof(saved_state->icl_port_dplls)); 4558 saved_state->crc_enabled = crtc_state->crc_enabled; 4559 if (IS_G4X(dev_priv) || 4560 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4561 saved_state->wm = crtc_state->wm; 4562 4563 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 4564 kfree(saved_state); 4565 4566 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 4567 4568 return 0; 4569 } 4570 4571 static int 4572 intel_modeset_pipe_config(struct intel_atomic_state *state, 4573 struct intel_crtc *crtc, 4574 const struct intel_link_bw_limits *limits) 4575 { 4576 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4577 struct intel_crtc_state *crtc_state = 4578 intel_atomic_get_new_crtc_state(state, crtc); 4579 struct drm_connector *connector; 4580 struct drm_connector_state *connector_state; 4581 int pipe_src_w, pipe_src_h; 4582 int base_bpp, ret, i; 4583 4584 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; 4585 4586 crtc_state->framestart_delay = 1; 4587 4588 /* 4589 * Sanitize sync polarity flags based on requested ones. If neither 4590 * positive or negative polarity is requested, treat this as meaning 4591 * negative polarity. 4592 */ 4593 if (!(crtc_state->hw.adjusted_mode.flags & 4594 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 4595 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 4596 4597 if (!(crtc_state->hw.adjusted_mode.flags & 4598 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 4599 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 4600 4601 ret = compute_baseline_pipe_bpp(state, crtc); 4602 if (ret) 4603 return ret; 4604 4605 crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe); 4606 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; 4607 4608 if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) { 4609 drm_dbg_kms(&i915->drm, 4610 "[CRTC:%d:%s] Link bpp limited to " BPP_X16_FMT "\n", 4611 crtc->base.base.id, crtc->base.name, 4612 BPP_X16_ARGS(crtc_state->max_link_bpp_x16)); 4613 crtc_state->bw_constrained = true; 4614 } 4615 4616 base_bpp = crtc_state->pipe_bpp; 4617 4618 /* 4619 * Determine the real pipe dimensions. Note that stereo modes can 4620 * increase the actual pipe size due to the frame doubling and 4621 * insertion of additional space for blanks between the frame. This 4622 * is stored in the crtc timings. We use the requested mode to do this 4623 * computation to clearly distinguish it from the adjusted mode, which 4624 * can be changed by the connectors in the below retry loop. 4625 */ 4626 drm_mode_get_hv_timing(&crtc_state->hw.mode, 4627 &pipe_src_w, &pipe_src_h); 4628 drm_rect_init(&crtc_state->pipe_src, 0, 0, 4629 pipe_src_w, pipe_src_h); 4630 4631 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4632 struct intel_encoder *encoder = 4633 to_intel_encoder(connector_state->best_encoder); 4634 4635 if (connector_state->crtc != &crtc->base) 4636 continue; 4637 4638 if (!check_single_encoder_cloning(state, crtc, encoder)) { 4639 drm_dbg_kms(&i915->drm, 4640 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", 4641 encoder->base.base.id, encoder->base.name); 4642 return -EINVAL; 4643 } 4644 4645 /* 4646 * Determine output_types before calling the .compute_config() 4647 * hooks so that the hooks can use this information safely. 4648 */ 4649 if (encoder->compute_output_type) 4650 crtc_state->output_types |= 4651 BIT(encoder->compute_output_type(encoder, crtc_state, 4652 connector_state)); 4653 else 4654 crtc_state->output_types |= BIT(encoder->type); 4655 } 4656 4657 /* Ensure the port clock defaults are reset when retrying. */ 4658 crtc_state->port_clock = 0; 4659 crtc_state->pixel_multiplier = 1; 4660 4661 /* Fill in default crtc timings, allow encoders to overwrite them. */ 4662 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, 4663 CRTC_STEREO_DOUBLE); 4664 4665 /* Pass our mode to the connectors and the CRTC to give them a chance to 4666 * adjust it according to limitations or connector properties, and also 4667 * a chance to reject the mode entirely. 4668 */ 4669 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4670 struct intel_encoder *encoder = 4671 to_intel_encoder(connector_state->best_encoder); 4672 4673 if (connector_state->crtc != &crtc->base) 4674 continue; 4675 4676 ret = encoder->compute_config(encoder, crtc_state, 4677 connector_state); 4678 if (ret == -EDEADLK) 4679 return ret; 4680 if (ret < 0) { 4681 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n", 4682 encoder->base.base.id, encoder->base.name, ret); 4683 return ret; 4684 } 4685 } 4686 4687 /* Set default port clock if not overwritten by the encoder. Needs to be 4688 * done afterwards in case the encoder adjusts the mode. */ 4689 if (!crtc_state->port_clock) 4690 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock 4691 * crtc_state->pixel_multiplier; 4692 4693 ret = intel_crtc_compute_config(state, crtc); 4694 if (ret == -EDEADLK) 4695 return ret; 4696 if (ret < 0) { 4697 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n", 4698 crtc->base.base.id, crtc->base.name, ret); 4699 return ret; 4700 } 4701 4702 /* Dithering seems to not pass-through bits correctly when it should, so 4703 * only enable it on 6bpc panels and when its not a compliance 4704 * test requesting 6bpc video pattern. 4705 */ 4706 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && 4707 !crtc_state->dither_force_disable; 4708 drm_dbg_kms(&i915->drm, 4709 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 4710 crtc->base.base.id, crtc->base.name, 4711 base_bpp, crtc_state->pipe_bpp, crtc_state->dither); 4712 4713 return 0; 4714 } 4715 4716 static int 4717 intel_modeset_pipe_config_late(struct intel_atomic_state *state, 4718 struct intel_crtc *crtc) 4719 { 4720 struct intel_crtc_state *crtc_state = 4721 intel_atomic_get_new_crtc_state(state, crtc); 4722 struct drm_connector_state *conn_state; 4723 struct drm_connector *connector; 4724 int i; 4725 4726 intel_bigjoiner_adjust_pipe_src(crtc_state); 4727 4728 for_each_new_connector_in_state(&state->base, connector, 4729 conn_state, i) { 4730 struct intel_encoder *encoder = 4731 to_intel_encoder(conn_state->best_encoder); 4732 int ret; 4733 4734 if (conn_state->crtc != &crtc->base || 4735 !encoder->compute_config_late) 4736 continue; 4737 4738 ret = encoder->compute_config_late(encoder, crtc_state, 4739 conn_state); 4740 if (ret) 4741 return ret; 4742 } 4743 4744 return 0; 4745 } 4746 4747 bool intel_fuzzy_clock_check(int clock1, int clock2) 4748 { 4749 int diff; 4750 4751 if (clock1 == clock2) 4752 return true; 4753 4754 if (!clock1 || !clock2) 4755 return false; 4756 4757 diff = abs(clock1 - clock2); 4758 4759 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 4760 return true; 4761 4762 return false; 4763 } 4764 4765 static bool 4766 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 4767 const struct intel_link_m_n *m2_n2) 4768 { 4769 return m_n->tu == m2_n2->tu && 4770 m_n->data_m == m2_n2->data_m && 4771 m_n->data_n == m2_n2->data_n && 4772 m_n->link_m == m2_n2->link_m && 4773 m_n->link_n == m2_n2->link_n; 4774 } 4775 4776 static bool 4777 intel_compare_infoframe(const union hdmi_infoframe *a, 4778 const union hdmi_infoframe *b) 4779 { 4780 return memcmp(a, b, sizeof(*a)) == 0; 4781 } 4782 4783 static bool 4784 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 4785 const struct drm_dp_vsc_sdp *b) 4786 { 4787 return a->pixelformat == b->pixelformat && 4788 a->colorimetry == b->colorimetry && 4789 a->bpc == b->bpc && 4790 a->dynamic_range == b->dynamic_range && 4791 a->content_type == b->content_type; 4792 } 4793 4794 static bool 4795 intel_compare_buffer(const u8 *a, const u8 *b, size_t len) 4796 { 4797 return memcmp(a, b, len) == 0; 4798 } 4799 4800 static void 4801 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 4802 bool fastset, const char *name, 4803 const union hdmi_infoframe *a, 4804 const union hdmi_infoframe *b) 4805 { 4806 if (fastset) { 4807 if (!drm_debug_enabled(DRM_UT_KMS)) 4808 return; 4809 4810 drm_dbg_kms(&dev_priv->drm, 4811 "fastset requirement not met in %s infoframe\n", name); 4812 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 4813 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 4814 drm_dbg_kms(&dev_priv->drm, "found:\n"); 4815 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 4816 } else { 4817 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); 4818 drm_err(&dev_priv->drm, "expected:\n"); 4819 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 4820 drm_err(&dev_priv->drm, "found:\n"); 4821 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 4822 } 4823 } 4824 4825 static void 4826 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915, 4827 bool fastset, const char *name, 4828 const struct drm_dp_vsc_sdp *a, 4829 const struct drm_dp_vsc_sdp *b) 4830 { 4831 struct drm_printer p; 4832 4833 if (fastset) { 4834 p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL); 4835 4836 drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name); 4837 } else { 4838 p = drm_err_printer(&i915->drm, NULL); 4839 4840 drm_printf(&p, "mismatch in %s dp sdp\n", name); 4841 } 4842 4843 drm_printf(&p, "expected:\n"); 4844 drm_dp_vsc_sdp_log(&p, a); 4845 drm_printf(&p, "found:\n"); 4846 drm_dp_vsc_sdp_log(&p, b); 4847 } 4848 4849 /* Returns the length up to and including the last differing byte */ 4850 static size_t 4851 memcmp_diff_len(const u8 *a, const u8 *b, size_t len) 4852 { 4853 int i; 4854 4855 for (i = len - 1; i >= 0; i--) { 4856 if (a[i] != b[i]) 4857 return i + 1; 4858 } 4859 4860 return 0; 4861 } 4862 4863 static void 4864 pipe_config_buffer_mismatch(bool fastset, const struct intel_crtc *crtc, 4865 const char *name, 4866 const u8 *a, const u8 *b, size_t len) 4867 { 4868 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4869 4870 if (fastset) { 4871 if (!drm_debug_enabled(DRM_UT_KMS)) 4872 return; 4873 4874 /* only dump up to the last difference */ 4875 len = memcmp_diff_len(a, b, len); 4876 4877 drm_dbg_kms(&dev_priv->drm, 4878 "[CRTC:%d:%s] fastset requirement not met in %s buffer\n", 4879 crtc->base.base.id, crtc->base.name, name); 4880 print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE, 4881 16, 0, a, len, false); 4882 print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE, 4883 16, 0, b, len, false); 4884 } else { 4885 /* only dump up to the last difference */ 4886 len = memcmp_diff_len(a, b, len); 4887 4888 drm_err(&dev_priv->drm, "[CRTC:%d:%s] mismatch in %s buffer\n", 4889 crtc->base.base.id, crtc->base.name, name); 4890 print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE, 4891 16, 0, a, len, false); 4892 print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE, 4893 16, 0, b, len, false); 4894 } 4895 } 4896 4897 static void __printf(4, 5) 4898 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 4899 const char *name, const char *format, ...) 4900 { 4901 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4902 struct va_format vaf; 4903 va_list args; 4904 4905 va_start(args, format); 4906 vaf.fmt = format; 4907 vaf.va = &args; 4908 4909 if (fastset) 4910 drm_dbg_kms(&i915->drm, 4911 "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", 4912 crtc->base.base.id, crtc->base.name, name, &vaf); 4913 else 4914 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", 4915 crtc->base.base.id, crtc->base.name, name, &vaf); 4916 4917 va_end(args); 4918 } 4919 4920 static void 4921 pipe_config_pll_mismatch(bool fastset, 4922 const struct intel_crtc *crtc, 4923 const char *name, 4924 const struct intel_dpll_hw_state *a, 4925 const struct intel_dpll_hw_state *b) 4926 { 4927 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4928 4929 if (fastset) { 4930 if (!drm_debug_enabled(DRM_UT_KMS)) 4931 return; 4932 4933 drm_dbg_kms(&i915->drm, 4934 "[CRTC:%d:%s] fastset requirement not met in %s\n", 4935 crtc->base.base.id, crtc->base.name, name); 4936 drm_dbg_kms(&i915->drm, "expected:\n"); 4937 intel_dpll_dump_hw_state(i915, a); 4938 drm_dbg_kms(&i915->drm, "found:\n"); 4939 intel_dpll_dump_hw_state(i915, b); 4940 } else { 4941 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s buffer\n", 4942 crtc->base.base.id, crtc->base.name, name); 4943 drm_err(&i915->drm, "expected:\n"); 4944 intel_dpll_dump_hw_state(i915, a); 4945 drm_err(&i915->drm, "found:\n"); 4946 intel_dpll_dump_hw_state(i915, b); 4947 } 4948 } 4949 4950 bool 4951 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 4952 const struct intel_crtc_state *pipe_config, 4953 bool fastset) 4954 { 4955 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 4956 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 4957 bool ret = true; 4958 4959 #define PIPE_CONF_CHECK_X(name) do { \ 4960 if (current_config->name != pipe_config->name) { \ 4961 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 4962 __stringify(name) " is bool"); \ 4963 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 4964 "(expected 0x%08x, found 0x%08x)", \ 4965 current_config->name, \ 4966 pipe_config->name); \ 4967 ret = false; \ 4968 } \ 4969 } while (0) 4970 4971 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 4972 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 4973 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 4974 __stringify(name) " is bool"); \ 4975 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 4976 "(expected 0x%08x, found 0x%08x)", \ 4977 current_config->name & (mask), \ 4978 pipe_config->name & (mask)); \ 4979 ret = false; \ 4980 } \ 4981 } while (0) 4982 4983 #define PIPE_CONF_CHECK_I(name) do { \ 4984 if (current_config->name != pipe_config->name) { \ 4985 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 4986 __stringify(name) " is bool"); \ 4987 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 4988 "(expected %i, found %i)", \ 4989 current_config->name, \ 4990 pipe_config->name); \ 4991 ret = false; \ 4992 } \ 4993 } while (0) 4994 4995 #define PIPE_CONF_CHECK_BOOL(name) do { \ 4996 if (current_config->name != pipe_config->name) { \ 4997 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ 4998 __stringify(name) " is not bool"); \ 4999 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5000 "(expected %s, found %s)", \ 5001 str_yes_no(current_config->name), \ 5002 str_yes_no(pipe_config->name)); \ 5003 ret = false; \ 5004 } \ 5005 } while (0) 5006 5007 #define PIPE_CONF_CHECK_P(name) do { \ 5008 if (current_config->name != pipe_config->name) { \ 5009 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5010 "(expected %p, found %p)", \ 5011 current_config->name, \ 5012 pipe_config->name); \ 5013 ret = false; \ 5014 } \ 5015 } while (0) 5016 5017 #define PIPE_CONF_CHECK_M_N(name) do { \ 5018 if (!intel_compare_link_m_n(¤t_config->name, \ 5019 &pipe_config->name)) { \ 5020 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5021 "(expected tu %i data %i/%i link %i/%i, " \ 5022 "found tu %i, data %i/%i link %i/%i)", \ 5023 current_config->name.tu, \ 5024 current_config->name.data_m, \ 5025 current_config->name.data_n, \ 5026 current_config->name.link_m, \ 5027 current_config->name.link_n, \ 5028 pipe_config->name.tu, \ 5029 pipe_config->name.data_m, \ 5030 pipe_config->name.data_n, \ 5031 pipe_config->name.link_m, \ 5032 pipe_config->name.link_n); \ 5033 ret = false; \ 5034 } \ 5035 } while (0) 5036 5037 #define PIPE_CONF_CHECK_PLL(name) do { \ 5038 if (!intel_dpll_compare_hw_state(dev_priv, ¤t_config->name, \ 5039 &pipe_config->name)) { \ 5040 pipe_config_pll_mismatch(fastset, crtc, __stringify(name), \ 5041 ¤t_config->name, \ 5042 &pipe_config->name); \ 5043 ret = false; \ 5044 } \ 5045 } while (0) 5046 5047 #define PIPE_CONF_CHECK_TIMINGS(name) do { \ 5048 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ 5049 PIPE_CONF_CHECK_I(name.crtc_htotal); \ 5050 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ 5051 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ 5052 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ 5053 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ 5054 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ 5055 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ 5056 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ 5057 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ 5058 if (!fastset || !pipe_config->update_lrr) { \ 5059 PIPE_CONF_CHECK_I(name.crtc_vtotal); \ 5060 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ 5061 } \ 5062 } while (0) 5063 5064 #define PIPE_CONF_CHECK_RECT(name) do { \ 5065 PIPE_CONF_CHECK_I(name.x1); \ 5066 PIPE_CONF_CHECK_I(name.x2); \ 5067 PIPE_CONF_CHECK_I(name.y1); \ 5068 PIPE_CONF_CHECK_I(name.y2); \ 5069 } while (0) 5070 5071 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 5072 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 5073 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5074 "(%x) (expected %i, found %i)", \ 5075 (mask), \ 5076 current_config->name & (mask), \ 5077 pipe_config->name & (mask)); \ 5078 ret = false; \ 5079 } \ 5080 } while (0) 5081 5082 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 5083 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 5084 &pipe_config->infoframes.name)) { \ 5085 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 5086 ¤t_config->infoframes.name, \ 5087 &pipe_config->infoframes.name); \ 5088 ret = false; \ 5089 } \ 5090 } while (0) 5091 5092 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 5093 if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 5094 &pipe_config->infoframes.name)) { \ 5095 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ 5096 ¤t_config->infoframes.name, \ 5097 &pipe_config->infoframes.name); \ 5098 ret = false; \ 5099 } \ 5100 } while (0) 5101 5102 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \ 5103 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ 5104 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ 5105 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ 5106 pipe_config_buffer_mismatch(fastset, crtc, __stringify(name), \ 5107 current_config->name, \ 5108 pipe_config->name, \ 5109 (len)); \ 5110 ret = false; \ 5111 } \ 5112 } while (0) 5113 5114 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \ 5115 if (current_config->gamma_mode == pipe_config->gamma_mode && \ 5116 !intel_color_lut_equal(current_config, \ 5117 current_config->lut, pipe_config->lut, \ 5118 is_pre_csc_lut)) { \ 5119 pipe_config_mismatch(fastset, crtc, __stringify(lut), \ 5120 "hw_state doesn't match sw_state"); \ 5121 ret = false; \ 5122 } \ 5123 } while (0) 5124 5125 #define PIPE_CONF_CHECK_CSC(name) do { \ 5126 PIPE_CONF_CHECK_X(name.preoff[0]); \ 5127 PIPE_CONF_CHECK_X(name.preoff[1]); \ 5128 PIPE_CONF_CHECK_X(name.preoff[2]); \ 5129 PIPE_CONF_CHECK_X(name.coeff[0]); \ 5130 PIPE_CONF_CHECK_X(name.coeff[1]); \ 5131 PIPE_CONF_CHECK_X(name.coeff[2]); \ 5132 PIPE_CONF_CHECK_X(name.coeff[3]); \ 5133 PIPE_CONF_CHECK_X(name.coeff[4]); \ 5134 PIPE_CONF_CHECK_X(name.coeff[5]); \ 5135 PIPE_CONF_CHECK_X(name.coeff[6]); \ 5136 PIPE_CONF_CHECK_X(name.coeff[7]); \ 5137 PIPE_CONF_CHECK_X(name.coeff[8]); \ 5138 PIPE_CONF_CHECK_X(name.postoff[0]); \ 5139 PIPE_CONF_CHECK_X(name.postoff[1]); \ 5140 PIPE_CONF_CHECK_X(name.postoff[2]); \ 5141 } while (0) 5142 5143 #define PIPE_CONF_QUIRK(quirk) \ 5144 ((current_config->quirks | pipe_config->quirks) & (quirk)) 5145 5146 PIPE_CONF_CHECK_BOOL(hw.enable); 5147 PIPE_CONF_CHECK_BOOL(hw.active); 5148 5149 PIPE_CONF_CHECK_I(cpu_transcoder); 5150 PIPE_CONF_CHECK_I(mst_master_transcoder); 5151 5152 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 5153 PIPE_CONF_CHECK_I(fdi_lanes); 5154 PIPE_CONF_CHECK_M_N(fdi_m_n); 5155 5156 PIPE_CONF_CHECK_I(lane_count); 5157 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 5158 5159 if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) { 5160 if (!fastset || !pipe_config->update_m_n) 5161 PIPE_CONF_CHECK_M_N(dp_m_n); 5162 } else { 5163 PIPE_CONF_CHECK_M_N(dp_m_n); 5164 PIPE_CONF_CHECK_M_N(dp_m2_n2); 5165 } 5166 5167 PIPE_CONF_CHECK_X(output_types); 5168 5169 PIPE_CONF_CHECK_I(framestart_delay); 5170 PIPE_CONF_CHECK_I(msa_timing_delay); 5171 5172 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); 5173 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); 5174 5175 PIPE_CONF_CHECK_I(pixel_multiplier); 5176 5177 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5178 DRM_MODE_FLAG_INTERLACE); 5179 5180 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 5181 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5182 DRM_MODE_FLAG_PHSYNC); 5183 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5184 DRM_MODE_FLAG_NHSYNC); 5185 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5186 DRM_MODE_FLAG_PVSYNC); 5187 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5188 DRM_MODE_FLAG_NVSYNC); 5189 } 5190 5191 PIPE_CONF_CHECK_I(output_format); 5192 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 5193 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 5194 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5195 PIPE_CONF_CHECK_BOOL(limited_color_range); 5196 5197 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 5198 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 5199 PIPE_CONF_CHECK_BOOL(has_infoframe); 5200 PIPE_CONF_CHECK_BOOL(enhanced_framing); 5201 PIPE_CONF_CHECK_BOOL(fec_enable); 5202 5203 if (!fastset) { 5204 PIPE_CONF_CHECK_BOOL(has_audio); 5205 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5206 } 5207 5208 PIPE_CONF_CHECK_X(gmch_pfit.control); 5209 /* pfit ratios are autocomputed by the hw on gen4+ */ 5210 if (DISPLAY_VER(dev_priv) < 4) 5211 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 5212 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 5213 5214 /* 5215 * Changing the EDP transcoder input mux 5216 * (A_ONOFF vs. A_ON) requires a full modeset. 5217 */ 5218 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 5219 5220 if (!fastset) { 5221 PIPE_CONF_CHECK_RECT(pipe_src); 5222 5223 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 5224 PIPE_CONF_CHECK_RECT(pch_pfit.dst); 5225 5226 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 5227 PIPE_CONF_CHECK_I(pixel_rate); 5228 5229 PIPE_CONF_CHECK_X(gamma_mode); 5230 if (IS_CHERRYVIEW(dev_priv)) 5231 PIPE_CONF_CHECK_X(cgm_mode); 5232 else 5233 PIPE_CONF_CHECK_X(csc_mode); 5234 PIPE_CONF_CHECK_BOOL(gamma_enable); 5235 PIPE_CONF_CHECK_BOOL(csc_enable); 5236 PIPE_CONF_CHECK_BOOL(wgc_enable); 5237 5238 PIPE_CONF_CHECK_I(linetime); 5239 PIPE_CONF_CHECK_I(ips_linetime); 5240 5241 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); 5242 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); 5243 5244 PIPE_CONF_CHECK_CSC(csc); 5245 PIPE_CONF_CHECK_CSC(output_csc); 5246 } 5247 5248 PIPE_CONF_CHECK_BOOL(double_wide); 5249 5250 if (dev_priv->display.dpll.mgr) 5251 PIPE_CONF_CHECK_P(shared_dpll); 5252 5253 /* FIXME convert everything over the dpll_mgr */ 5254 if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv)) 5255 PIPE_CONF_CHECK_PLL(dpll_hw_state); 5256 5257 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 5258 PIPE_CONF_CHECK_X(dsi_pll.div); 5259 5260 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 5261 PIPE_CONF_CHECK_I(pipe_bpp); 5262 5263 if (!fastset || !pipe_config->update_m_n) { 5264 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); 5265 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); 5266 } 5267 PIPE_CONF_CHECK_I(port_clock); 5268 5269 PIPE_CONF_CHECK_I(min_voltage_level); 5270 5271 if (current_config->has_psr || pipe_config->has_psr) 5272 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, 5273 ~intel_hdmi_infoframe_enable(DP_SDP_VSC)); 5274 else 5275 PIPE_CONF_CHECK_X(infoframes.enable); 5276 5277 PIPE_CONF_CHECK_X(infoframes.gcp); 5278 PIPE_CONF_CHECK_INFOFRAME(avi); 5279 PIPE_CONF_CHECK_INFOFRAME(spd); 5280 PIPE_CONF_CHECK_INFOFRAME(hdmi); 5281 PIPE_CONF_CHECK_INFOFRAME(drm); 5282 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 5283 5284 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 5285 PIPE_CONF_CHECK_I(master_transcoder); 5286 PIPE_CONF_CHECK_X(bigjoiner_pipes); 5287 5288 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable); 5289 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb); 5290 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422); 5291 PIPE_CONF_CHECK_BOOL(dsc.config.native_422); 5292 PIPE_CONF_CHECK_BOOL(dsc.config.native_420); 5293 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable); 5294 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth); 5295 PIPE_CONF_CHECK_I(dsc.config.bits_per_component); 5296 PIPE_CONF_CHECK_I(dsc.config.pic_width); 5297 PIPE_CONF_CHECK_I(dsc.config.pic_height); 5298 PIPE_CONF_CHECK_I(dsc.config.slice_width); 5299 PIPE_CONF_CHECK_I(dsc.config.slice_height); 5300 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay); 5301 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay); 5302 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval); 5303 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval); 5304 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value); 5305 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset); 5306 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp); 5307 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp); 5308 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset); 5309 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset); 5310 PIPE_CONF_CHECK_I(dsc.config.initial_offset); 5311 PIPE_CONF_CHECK_I(dsc.config.final_offset); 5312 PIPE_CONF_CHECK_I(dsc.config.rc_model_size); 5313 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0); 5314 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1); 5315 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size); 5316 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset); 5317 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset); 5318 5319 PIPE_CONF_CHECK_BOOL(dsc.compression_enable); 5320 PIPE_CONF_CHECK_BOOL(dsc.dsc_split); 5321 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16); 5322 5323 PIPE_CONF_CHECK_BOOL(splitter.enable); 5324 PIPE_CONF_CHECK_I(splitter.link_count); 5325 PIPE_CONF_CHECK_I(splitter.pixel_overlap); 5326 5327 if (!fastset) { 5328 PIPE_CONF_CHECK_BOOL(vrr.enable); 5329 PIPE_CONF_CHECK_I(vrr.vmin); 5330 PIPE_CONF_CHECK_I(vrr.vmax); 5331 PIPE_CONF_CHECK_I(vrr.flipline); 5332 PIPE_CONF_CHECK_I(vrr.pipeline_full); 5333 PIPE_CONF_CHECK_I(vrr.guardband); 5334 } 5335 5336 #undef PIPE_CONF_CHECK_X 5337 #undef PIPE_CONF_CHECK_I 5338 #undef PIPE_CONF_CHECK_BOOL 5339 #undef PIPE_CONF_CHECK_P 5340 #undef PIPE_CONF_CHECK_FLAGS 5341 #undef PIPE_CONF_CHECK_COLOR_LUT 5342 #undef PIPE_CONF_CHECK_TIMINGS 5343 #undef PIPE_CONF_CHECK_RECT 5344 #undef PIPE_CONF_QUIRK 5345 5346 return ret; 5347 } 5348 5349 static void 5350 intel_verify_planes(struct intel_atomic_state *state) 5351 { 5352 struct intel_plane *plane; 5353 const struct intel_plane_state *plane_state; 5354 int i; 5355 5356 for_each_new_intel_plane_in_state(state, plane, 5357 plane_state, i) 5358 assert_plane(plane, plane_state->planar_slave || 5359 plane_state->uapi.visible); 5360 } 5361 5362 static int intel_modeset_pipe(struct intel_atomic_state *state, 5363 struct intel_crtc_state *crtc_state, 5364 const char *reason) 5365 { 5366 struct drm_i915_private *i915 = to_i915(state->base.dev); 5367 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5368 int ret; 5369 5370 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n", 5371 crtc->base.base.id, crtc->base.name, reason); 5372 5373 ret = drm_atomic_add_affected_connectors(&state->base, 5374 &crtc->base); 5375 if (ret) 5376 return ret; 5377 5378 ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc); 5379 if (ret) 5380 return ret; 5381 5382 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); 5383 if (ret) 5384 return ret; 5385 5386 ret = intel_atomic_add_affected_planes(state, crtc); 5387 if (ret) 5388 return ret; 5389 5390 crtc_state->uapi.mode_changed = true; 5391 5392 return 0; 5393 } 5394 5395 /** 5396 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes 5397 * @state: intel atomic state 5398 * @reason: the reason for the full modeset 5399 * @mask: mask of pipes to modeset 5400 * 5401 * Add pipes in @mask to @state and force a full modeset on the enabled ones 5402 * due to the description in @reason. 5403 * This function can be called only before new plane states are computed. 5404 * 5405 * Returns 0 in case of success, negative error code otherwise. 5406 */ 5407 int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state, 5408 const char *reason, u8 mask) 5409 { 5410 struct drm_i915_private *i915 = to_i915(state->base.dev); 5411 struct intel_crtc *crtc; 5412 5413 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) { 5414 struct intel_crtc_state *crtc_state; 5415 int ret; 5416 5417 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5418 if (IS_ERR(crtc_state)) 5419 return PTR_ERR(crtc_state); 5420 5421 if (!crtc_state->hw.enable || 5422 intel_crtc_needs_modeset(crtc_state)) 5423 continue; 5424 5425 ret = intel_modeset_pipe(state, crtc_state, reason); 5426 if (ret) 5427 return ret; 5428 } 5429 5430 return 0; 5431 } 5432 5433 static void 5434 intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state) 5435 { 5436 crtc_state->uapi.mode_changed = true; 5437 5438 crtc_state->update_pipe = false; 5439 crtc_state->update_m_n = false; 5440 crtc_state->update_lrr = false; 5441 } 5442 5443 /** 5444 * intel_modeset_all_pipes_late - force a full modeset on all pipes 5445 * @state: intel atomic state 5446 * @reason: the reason for the full modeset 5447 * 5448 * Add all pipes to @state and force a full modeset on the active ones due to 5449 * the description in @reason. 5450 * This function can be called only after new plane states are computed already. 5451 * 5452 * Returns 0 in case of success, negative error code otherwise. 5453 */ 5454 int intel_modeset_all_pipes_late(struct intel_atomic_state *state, 5455 const char *reason) 5456 { 5457 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5458 struct intel_crtc *crtc; 5459 5460 for_each_intel_crtc(&dev_priv->drm, crtc) { 5461 struct intel_crtc_state *crtc_state; 5462 int ret; 5463 5464 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5465 if (IS_ERR(crtc_state)) 5466 return PTR_ERR(crtc_state); 5467 5468 if (!crtc_state->hw.active || 5469 intel_crtc_needs_modeset(crtc_state)) 5470 continue; 5471 5472 ret = intel_modeset_pipe(state, crtc_state, reason); 5473 if (ret) 5474 return ret; 5475 5476 intel_crtc_flag_modeset(crtc_state); 5477 5478 crtc_state->update_planes |= crtc_state->active_planes; 5479 crtc_state->async_flip_planes = 0; 5480 crtc_state->do_async_flip = false; 5481 } 5482 5483 return 0; 5484 } 5485 5486 /* 5487 * This implements the workaround described in the "notes" section of the mode 5488 * set sequence documentation. When going from no pipes or single pipe to 5489 * multiple pipes, and planes are enabled after the pipe, we need to wait at 5490 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 5491 */ 5492 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 5493 { 5494 struct intel_crtc_state *crtc_state; 5495 struct intel_crtc *crtc; 5496 struct intel_crtc_state *first_crtc_state = NULL; 5497 struct intel_crtc_state *other_crtc_state = NULL; 5498 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 5499 int i; 5500 5501 /* look at all crtc's that are going to be enabled in during modeset */ 5502 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5503 if (!crtc_state->hw.active || 5504 !intel_crtc_needs_modeset(crtc_state)) 5505 continue; 5506 5507 if (first_crtc_state) { 5508 other_crtc_state = crtc_state; 5509 break; 5510 } else { 5511 first_crtc_state = crtc_state; 5512 first_pipe = crtc->pipe; 5513 } 5514 } 5515 5516 /* No workaround needed? */ 5517 if (!first_crtc_state) 5518 return 0; 5519 5520 /* w/a possibly needed, check how many crtc's are already enabled. */ 5521 for_each_intel_crtc(state->base.dev, crtc) { 5522 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5523 if (IS_ERR(crtc_state)) 5524 return PTR_ERR(crtc_state); 5525 5526 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 5527 5528 if (!crtc_state->hw.active || 5529 intel_crtc_needs_modeset(crtc_state)) 5530 continue; 5531 5532 /* 2 or more enabled crtcs means no need for w/a */ 5533 if (enabled_pipe != INVALID_PIPE) 5534 return 0; 5535 5536 enabled_pipe = crtc->pipe; 5537 } 5538 5539 if (enabled_pipe != INVALID_PIPE) 5540 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 5541 else if (other_crtc_state) 5542 other_crtc_state->hsw_workaround_pipe = first_pipe; 5543 5544 return 0; 5545 } 5546 5547 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 5548 u8 active_pipes) 5549 { 5550 const struct intel_crtc_state *crtc_state; 5551 struct intel_crtc *crtc; 5552 int i; 5553 5554 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5555 if (crtc_state->hw.active) 5556 active_pipes |= BIT(crtc->pipe); 5557 else 5558 active_pipes &= ~BIT(crtc->pipe); 5559 } 5560 5561 return active_pipes; 5562 } 5563 5564 static int intel_modeset_checks(struct intel_atomic_state *state) 5565 { 5566 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5567 5568 state->modeset = true; 5569 5570 if (IS_HASWELL(dev_priv)) 5571 return hsw_mode_set_planes_workaround(state); 5572 5573 return 0; 5574 } 5575 5576 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 5577 struct intel_crtc_state *new_crtc_state) 5578 { 5579 struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev); 5580 5581 /* only allow LRR when the timings stay within the VRR range */ 5582 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range) 5583 new_crtc_state->update_lrr = false; 5584 5585 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 5586 drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n"); 5587 else 5588 new_crtc_state->uapi.mode_changed = false; 5589 5590 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n, 5591 &new_crtc_state->dp_m_n)) 5592 new_crtc_state->update_m_n = false; 5593 5594 if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal && 5595 old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end)) 5596 new_crtc_state->update_lrr = false; 5597 5598 if (intel_crtc_needs_modeset(new_crtc_state)) 5599 intel_crtc_flag_modeset(new_crtc_state); 5600 else 5601 new_crtc_state->update_pipe = true; 5602 } 5603 5604 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 5605 struct intel_crtc *crtc, 5606 u8 plane_ids_mask) 5607 { 5608 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5609 struct intel_plane *plane; 5610 5611 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 5612 struct intel_plane_state *plane_state; 5613 5614 if ((plane_ids_mask & BIT(plane->id)) == 0) 5615 continue; 5616 5617 plane_state = intel_atomic_get_plane_state(state, plane); 5618 if (IS_ERR(plane_state)) 5619 return PTR_ERR(plane_state); 5620 } 5621 5622 return 0; 5623 } 5624 5625 int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 5626 struct intel_crtc *crtc) 5627 { 5628 const struct intel_crtc_state *old_crtc_state = 5629 intel_atomic_get_old_crtc_state(state, crtc); 5630 const struct intel_crtc_state *new_crtc_state = 5631 intel_atomic_get_new_crtc_state(state, crtc); 5632 5633 return intel_crtc_add_planes_to_state(state, crtc, 5634 old_crtc_state->enabled_planes | 5635 new_crtc_state->enabled_planes); 5636 } 5637 5638 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 5639 { 5640 /* See {hsw,vlv,ivb}_plane_ratio() */ 5641 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 5642 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 5643 IS_IVYBRIDGE(dev_priv); 5644 } 5645 5646 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state, 5647 struct intel_crtc *crtc, 5648 struct intel_crtc *other) 5649 { 5650 const struct intel_plane_state __maybe_unused *plane_state; 5651 struct intel_plane *plane; 5652 u8 plane_ids = 0; 5653 int i; 5654 5655 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 5656 if (plane->pipe == crtc->pipe) 5657 plane_ids |= BIT(plane->id); 5658 } 5659 5660 return intel_crtc_add_planes_to_state(state, other, plane_ids); 5661 } 5662 5663 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) 5664 { 5665 struct drm_i915_private *i915 = to_i915(state->base.dev); 5666 const struct intel_crtc_state *crtc_state; 5667 struct intel_crtc *crtc; 5668 int i; 5669 5670 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5671 struct intel_crtc *other; 5672 5673 for_each_intel_crtc_in_pipe_mask(&i915->drm, other, 5674 crtc_state->bigjoiner_pipes) { 5675 int ret; 5676 5677 if (crtc == other) 5678 continue; 5679 5680 ret = intel_crtc_add_bigjoiner_planes(state, crtc, other); 5681 if (ret) 5682 return ret; 5683 } 5684 } 5685 5686 return 0; 5687 } 5688 5689 static int intel_atomic_check_planes(struct intel_atomic_state *state) 5690 { 5691 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5692 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 5693 struct intel_plane_state __maybe_unused *plane_state; 5694 struct intel_plane *plane; 5695 struct intel_crtc *crtc; 5696 int i, ret; 5697 5698 ret = icl_add_linked_planes(state); 5699 if (ret) 5700 return ret; 5701 5702 ret = intel_bigjoiner_add_affected_planes(state); 5703 if (ret) 5704 return ret; 5705 5706 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 5707 ret = intel_plane_atomic_check(state, plane); 5708 if (ret) { 5709 drm_dbg_atomic(&dev_priv->drm, 5710 "[PLANE:%d:%s] atomic driver check failed\n", 5711 plane->base.base.id, plane->base.name); 5712 return ret; 5713 } 5714 } 5715 5716 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 5717 new_crtc_state, i) { 5718 u8 old_active_planes, new_active_planes; 5719 5720 ret = icl_check_nv12_planes(new_crtc_state); 5721 if (ret) 5722 return ret; 5723 5724 /* 5725 * On some platforms the number of active planes affects 5726 * the planes' minimum cdclk calculation. Add such planes 5727 * to the state before we compute the minimum cdclk. 5728 */ 5729 if (!active_planes_affects_min_cdclk(dev_priv)) 5730 continue; 5731 5732 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 5733 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 5734 5735 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 5736 continue; 5737 5738 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 5739 if (ret) 5740 return ret; 5741 } 5742 5743 return 0; 5744 } 5745 5746 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 5747 { 5748 struct intel_crtc_state __maybe_unused *crtc_state; 5749 struct intel_crtc *crtc; 5750 int i; 5751 5752 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5753 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5754 int ret; 5755 5756 ret = intel_crtc_atomic_check(state, crtc); 5757 if (ret) { 5758 drm_dbg_atomic(&i915->drm, 5759 "[CRTC:%d:%s] atomic driver check failed\n", 5760 crtc->base.base.id, crtc->base.name); 5761 return ret; 5762 } 5763 } 5764 5765 return 0; 5766 } 5767 5768 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 5769 u8 transcoders) 5770 { 5771 const struct intel_crtc_state *new_crtc_state; 5772 struct intel_crtc *crtc; 5773 int i; 5774 5775 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5776 if (new_crtc_state->hw.enable && 5777 transcoders & BIT(new_crtc_state->cpu_transcoder) && 5778 intel_crtc_needs_modeset(new_crtc_state)) 5779 return true; 5780 } 5781 5782 return false; 5783 } 5784 5785 static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 5786 u8 pipes) 5787 { 5788 const struct intel_crtc_state *new_crtc_state; 5789 struct intel_crtc *crtc; 5790 int i; 5791 5792 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5793 if (new_crtc_state->hw.enable && 5794 pipes & BIT(crtc->pipe) && 5795 intel_crtc_needs_modeset(new_crtc_state)) 5796 return true; 5797 } 5798 5799 return false; 5800 } 5801 5802 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, 5803 struct intel_crtc *master_crtc) 5804 { 5805 struct drm_i915_private *i915 = to_i915(state->base.dev); 5806 struct intel_crtc_state *master_crtc_state = 5807 intel_atomic_get_new_crtc_state(state, master_crtc); 5808 struct intel_crtc *slave_crtc; 5809 5810 if (!master_crtc_state->bigjoiner_pipes) 5811 return 0; 5812 5813 /* sanity check */ 5814 if (drm_WARN_ON(&i915->drm, 5815 master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state))) 5816 return -EINVAL; 5817 5818 if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) { 5819 drm_dbg_kms(&i915->drm, 5820 "[CRTC:%d:%s] Cannot act as big joiner master " 5821 "(need 0x%x as pipes, only 0x%x possible)\n", 5822 master_crtc->base.base.id, master_crtc->base.name, 5823 master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915)); 5824 return -EINVAL; 5825 } 5826 5827 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 5828 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { 5829 struct intel_crtc_state *slave_crtc_state; 5830 int ret; 5831 5832 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); 5833 if (IS_ERR(slave_crtc_state)) 5834 return PTR_ERR(slave_crtc_state); 5835 5836 /* master being enabled, slave was already configured? */ 5837 if (slave_crtc_state->uapi.enable) { 5838 drm_dbg_kms(&i915->drm, 5839 "[CRTC:%d:%s] Slave is enabled as normal CRTC, but " 5840 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", 5841 slave_crtc->base.base.id, slave_crtc->base.name, 5842 master_crtc->base.base.id, master_crtc->base.name); 5843 return -EINVAL; 5844 } 5845 5846 /* 5847 * The state copy logic assumes the master crtc gets processed 5848 * before the slave crtc during the main compute_config loop. 5849 * This works because the crtcs are created in pipe order, 5850 * and the hardware requires master pipe < slave pipe as well. 5851 * Should that change we need to rethink the logic. 5852 */ 5853 if (WARN_ON(drm_crtc_index(&master_crtc->base) > 5854 drm_crtc_index(&slave_crtc->base))) 5855 return -EINVAL; 5856 5857 drm_dbg_kms(&i915->drm, 5858 "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n", 5859 slave_crtc->base.base.id, slave_crtc->base.name, 5860 master_crtc->base.base.id, master_crtc->base.name); 5861 5862 slave_crtc_state->bigjoiner_pipes = 5863 master_crtc_state->bigjoiner_pipes; 5864 5865 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc); 5866 if (ret) 5867 return ret; 5868 } 5869 5870 return 0; 5871 } 5872 5873 static void kill_bigjoiner_slave(struct intel_atomic_state *state, 5874 struct intel_crtc *master_crtc) 5875 { 5876 struct drm_i915_private *i915 = to_i915(state->base.dev); 5877 struct intel_crtc_state *master_crtc_state = 5878 intel_atomic_get_new_crtc_state(state, master_crtc); 5879 struct intel_crtc *slave_crtc; 5880 5881 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 5882 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { 5883 struct intel_crtc_state *slave_crtc_state = 5884 intel_atomic_get_new_crtc_state(state, slave_crtc); 5885 5886 slave_crtc_state->bigjoiner_pipes = 0; 5887 5888 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc); 5889 } 5890 5891 master_crtc_state->bigjoiner_pipes = 0; 5892 } 5893 5894 /** 5895 * DOC: asynchronous flip implementation 5896 * 5897 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 5898 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 5899 * Correspondingly, support is currently added for primary plane only. 5900 * 5901 * Async flip can only change the plane surface address, so anything else 5902 * changing is rejected from the intel_async_flip_check_hw() function. 5903 * Once this check is cleared, flip done interrupt is enabled using 5904 * the intel_crtc_enable_flip_done() function. 5905 * 5906 * As soon as the surface address register is written, flip done interrupt is 5907 * generated and the requested events are sent to the usersapce in the interrupt 5908 * handler itself. The timestamp and sequence sent during the flip done event 5909 * correspond to the last vblank and have no relation to the actual time when 5910 * the flip done event was sent. 5911 */ 5912 static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 5913 struct intel_crtc *crtc) 5914 { 5915 struct drm_i915_private *i915 = to_i915(state->base.dev); 5916 const struct intel_crtc_state *new_crtc_state = 5917 intel_atomic_get_new_crtc_state(state, crtc); 5918 const struct intel_plane_state *old_plane_state; 5919 struct intel_plane_state *new_plane_state; 5920 struct intel_plane *plane; 5921 int i; 5922 5923 if (!new_crtc_state->uapi.async_flip) 5924 return 0; 5925 5926 if (!new_crtc_state->uapi.active) { 5927 drm_dbg_kms(&i915->drm, 5928 "[CRTC:%d:%s] not active\n", 5929 crtc->base.base.id, crtc->base.name); 5930 return -EINVAL; 5931 } 5932 5933 if (intel_crtc_needs_modeset(new_crtc_state)) { 5934 drm_dbg_kms(&i915->drm, 5935 "[CRTC:%d:%s] modeset required\n", 5936 crtc->base.base.id, crtc->base.name); 5937 return -EINVAL; 5938 } 5939 5940 /* 5941 * FIXME: Bigjoiner+async flip is busted currently. 5942 * Remove this check once the issues are fixed. 5943 */ 5944 if (new_crtc_state->bigjoiner_pipes) { 5945 drm_dbg_kms(&i915->drm, 5946 "[CRTC:%d:%s] async flip disallowed with bigjoiner\n", 5947 crtc->base.base.id, crtc->base.name); 5948 return -EINVAL; 5949 } 5950 5951 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 5952 new_plane_state, i) { 5953 if (plane->pipe != crtc->pipe) 5954 continue; 5955 5956 /* 5957 * TODO: Async flip is only supported through the page flip IOCTL 5958 * as of now. So support currently added for primary plane only. 5959 * Support for other planes on platforms on which supports 5960 * this(vlv/chv and icl+) should be added when async flip is 5961 * enabled in the atomic IOCTL path. 5962 */ 5963 if (!plane->async_flip) { 5964 drm_dbg_kms(&i915->drm, 5965 "[PLANE:%d:%s] async flip not supported\n", 5966 plane->base.base.id, plane->base.name); 5967 return -EINVAL; 5968 } 5969 5970 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 5971 drm_dbg_kms(&i915->drm, 5972 "[PLANE:%d:%s] no old or new framebuffer\n", 5973 plane->base.base.id, plane->base.name); 5974 return -EINVAL; 5975 } 5976 } 5977 5978 return 0; 5979 } 5980 5981 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 5982 { 5983 struct drm_i915_private *i915 = to_i915(state->base.dev); 5984 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 5985 const struct intel_plane_state *new_plane_state, *old_plane_state; 5986 struct intel_plane *plane; 5987 int i; 5988 5989 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 5990 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 5991 5992 if (!new_crtc_state->uapi.async_flip) 5993 return 0; 5994 5995 if (!new_crtc_state->hw.active) { 5996 drm_dbg_kms(&i915->drm, 5997 "[CRTC:%d:%s] not active\n", 5998 crtc->base.base.id, crtc->base.name); 5999 return -EINVAL; 6000 } 6001 6002 if (intel_crtc_needs_modeset(new_crtc_state)) { 6003 drm_dbg_kms(&i915->drm, 6004 "[CRTC:%d:%s] modeset required\n", 6005 crtc->base.base.id, crtc->base.name); 6006 return -EINVAL; 6007 } 6008 6009 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 6010 drm_dbg_kms(&i915->drm, 6011 "[CRTC:%d:%s] Active planes cannot be in async flip\n", 6012 crtc->base.base.id, crtc->base.name); 6013 return -EINVAL; 6014 } 6015 6016 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6017 new_plane_state, i) { 6018 if (plane->pipe != crtc->pipe) 6019 continue; 6020 6021 /* 6022 * Only async flip capable planes should be in the state 6023 * if we're really about to ask the hardware to perform 6024 * an async flip. We should never get this far otherwise. 6025 */ 6026 if (drm_WARN_ON(&i915->drm, 6027 new_crtc_state->do_async_flip && !plane->async_flip)) 6028 return -EINVAL; 6029 6030 /* 6031 * Only check async flip capable planes other planes 6032 * may be involved in the initial commit due to 6033 * the wm0/ddb optimization. 6034 * 6035 * TODO maybe should track which planes actually 6036 * were requested to do the async flip... 6037 */ 6038 if (!plane->async_flip) 6039 continue; 6040 6041 /* 6042 * FIXME: This check is kept generic for all platforms. 6043 * Need to verify this for all gen9 platforms to enable 6044 * this selectively if required. 6045 */ 6046 switch (new_plane_state->hw.fb->modifier) { 6047 case DRM_FORMAT_MOD_LINEAR: 6048 /* 6049 * FIXME: Async on Linear buffer is supported on ICL as 6050 * but with additional alignment and fbc restrictions 6051 * need to be taken care of. These aren't applicable for 6052 * gen12+. 6053 */ 6054 if (DISPLAY_VER(i915) < 12) { 6055 drm_dbg_kms(&i915->drm, 6056 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n", 6057 plane->base.base.id, plane->base.name, 6058 new_plane_state->hw.fb->modifier, DISPLAY_VER(i915)); 6059 return -EINVAL; 6060 } 6061 break; 6062 6063 case I915_FORMAT_MOD_X_TILED: 6064 case I915_FORMAT_MOD_Y_TILED: 6065 case I915_FORMAT_MOD_Yf_TILED: 6066 case I915_FORMAT_MOD_4_TILED: 6067 break; 6068 default: 6069 drm_dbg_kms(&i915->drm, 6070 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n", 6071 plane->base.base.id, plane->base.name, 6072 new_plane_state->hw.fb->modifier); 6073 return -EINVAL; 6074 } 6075 6076 if (new_plane_state->hw.fb->format->num_planes > 1) { 6077 drm_dbg_kms(&i915->drm, 6078 "[PLANE:%d:%s] Planar formats do not support async flips\n", 6079 plane->base.base.id, plane->base.name); 6080 return -EINVAL; 6081 } 6082 6083 if (old_plane_state->view.color_plane[0].mapping_stride != 6084 new_plane_state->view.color_plane[0].mapping_stride) { 6085 drm_dbg_kms(&i915->drm, 6086 "[PLANE:%d:%s] Stride cannot be changed in async flip\n", 6087 plane->base.base.id, plane->base.name); 6088 return -EINVAL; 6089 } 6090 6091 if (old_plane_state->hw.fb->modifier != 6092 new_plane_state->hw.fb->modifier) { 6093 drm_dbg_kms(&i915->drm, 6094 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", 6095 plane->base.base.id, plane->base.name); 6096 return -EINVAL; 6097 } 6098 6099 if (old_plane_state->hw.fb->format != 6100 new_plane_state->hw.fb->format) { 6101 drm_dbg_kms(&i915->drm, 6102 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", 6103 plane->base.base.id, plane->base.name); 6104 return -EINVAL; 6105 } 6106 6107 if (old_plane_state->hw.rotation != 6108 new_plane_state->hw.rotation) { 6109 drm_dbg_kms(&i915->drm, 6110 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", 6111 plane->base.base.id, plane->base.name); 6112 return -EINVAL; 6113 } 6114 6115 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 6116 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 6117 drm_dbg_kms(&i915->drm, 6118 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", 6119 plane->base.base.id, plane->base.name); 6120 return -EINVAL; 6121 } 6122 6123 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 6124 drm_dbg_kms(&i915->drm, 6125 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", 6126 plane->base.base.id, plane->base.name); 6127 return -EINVAL; 6128 } 6129 6130 if (old_plane_state->hw.pixel_blend_mode != 6131 new_plane_state->hw.pixel_blend_mode) { 6132 drm_dbg_kms(&i915->drm, 6133 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", 6134 plane->base.base.id, plane->base.name); 6135 return -EINVAL; 6136 } 6137 6138 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 6139 drm_dbg_kms(&i915->drm, 6140 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", 6141 plane->base.base.id, plane->base.name); 6142 return -EINVAL; 6143 } 6144 6145 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 6146 drm_dbg_kms(&i915->drm, 6147 "[PLANE:%d:%s] Color range cannot be changed in async flip\n", 6148 plane->base.base.id, plane->base.name); 6149 return -EINVAL; 6150 } 6151 6152 /* plane decryption is allow to change only in synchronous flips */ 6153 if (old_plane_state->decrypt != new_plane_state->decrypt) { 6154 drm_dbg_kms(&i915->drm, 6155 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", 6156 plane->base.base.id, plane->base.name); 6157 return -EINVAL; 6158 } 6159 } 6160 6161 return 0; 6162 } 6163 6164 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) 6165 { 6166 struct drm_i915_private *i915 = to_i915(state->base.dev); 6167 struct intel_crtc_state *crtc_state; 6168 struct intel_crtc *crtc; 6169 u8 affected_pipes = 0; 6170 u8 modeset_pipes = 0; 6171 int i; 6172 6173 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6174 affected_pipes |= crtc_state->bigjoiner_pipes; 6175 if (intel_crtc_needs_modeset(crtc_state)) 6176 modeset_pipes |= crtc_state->bigjoiner_pipes; 6177 } 6178 6179 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { 6180 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6181 if (IS_ERR(crtc_state)) 6182 return PTR_ERR(crtc_state); 6183 } 6184 6185 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { 6186 int ret; 6187 6188 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6189 6190 crtc_state->uapi.mode_changed = true; 6191 6192 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6193 if (ret) 6194 return ret; 6195 6196 ret = intel_atomic_add_affected_planes(state, crtc); 6197 if (ret) 6198 return ret; 6199 } 6200 6201 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6202 /* Kill old bigjoiner link, we may re-establish afterwards */ 6203 if (intel_crtc_needs_modeset(crtc_state) && 6204 intel_crtc_is_bigjoiner_master(crtc_state)) 6205 kill_bigjoiner_slave(state, crtc); 6206 } 6207 6208 return 0; 6209 } 6210 6211 static int intel_atomic_check_config(struct intel_atomic_state *state, 6212 struct intel_link_bw_limits *limits, 6213 enum pipe *failed_pipe) 6214 { 6215 struct drm_i915_private *i915 = to_i915(state->base.dev); 6216 struct intel_crtc_state *new_crtc_state; 6217 struct intel_crtc *crtc; 6218 int ret; 6219 int i; 6220 6221 *failed_pipe = INVALID_PIPE; 6222 6223 ret = intel_bigjoiner_add_affected_crtcs(state); 6224 if (ret) 6225 return ret; 6226 6227 ret = intel_fdi_add_affected_crtcs(state); 6228 if (ret) 6229 return ret; 6230 6231 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6232 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6233 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 6234 copy_bigjoiner_crtc_state_nomodeset(state, crtc); 6235 else 6236 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 6237 continue; 6238 } 6239 6240 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) { 6241 drm_WARN_ON(&i915->drm, new_crtc_state->uapi.enable); 6242 continue; 6243 } 6244 6245 ret = intel_crtc_prepare_cleared_state(state, crtc); 6246 if (ret) 6247 break; 6248 6249 if (!new_crtc_state->hw.enable) 6250 continue; 6251 6252 ret = intel_modeset_pipe_config(state, crtc, limits); 6253 if (ret) 6254 break; 6255 6256 ret = intel_atomic_check_bigjoiner(state, crtc); 6257 if (ret) 6258 break; 6259 } 6260 6261 if (ret) 6262 *failed_pipe = crtc->pipe; 6263 6264 return ret; 6265 } 6266 6267 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state) 6268 { 6269 struct intel_link_bw_limits new_limits; 6270 struct intel_link_bw_limits old_limits; 6271 int ret; 6272 6273 intel_link_bw_init_limits(state, &new_limits); 6274 old_limits = new_limits; 6275 6276 while (true) { 6277 enum pipe failed_pipe; 6278 6279 ret = intel_atomic_check_config(state, &new_limits, 6280 &failed_pipe); 6281 if (ret) { 6282 /* 6283 * The bpp limit for a pipe is below the minimum it supports, set the 6284 * limit to the minimum and recalculate the config. 6285 */ 6286 if (ret == -EINVAL && 6287 intel_link_bw_set_bpp_limit_for_pipe(state, 6288 &old_limits, 6289 &new_limits, 6290 failed_pipe)) 6291 continue; 6292 6293 break; 6294 } 6295 6296 old_limits = new_limits; 6297 6298 ret = intel_link_bw_atomic_check(state, &new_limits); 6299 if (ret != -EAGAIN) 6300 break; 6301 } 6302 6303 return ret; 6304 } 6305 /** 6306 * intel_atomic_check - validate state object 6307 * @dev: drm device 6308 * @_state: state to validate 6309 */ 6310 int intel_atomic_check(struct drm_device *dev, 6311 struct drm_atomic_state *_state) 6312 { 6313 struct drm_i915_private *dev_priv = to_i915(dev); 6314 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6315 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6316 struct intel_crtc *crtc; 6317 int ret, i; 6318 bool any_ms = false; 6319 6320 if (!intel_display_driver_check_access(dev_priv)) 6321 return -ENODEV; 6322 6323 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6324 new_crtc_state, i) { 6325 /* 6326 * crtc's state no longer considered to be inherited 6327 * after the first userspace/client initiated commit. 6328 */ 6329 if (!state->internal) 6330 new_crtc_state->inherited = false; 6331 6332 if (new_crtc_state->inherited != old_crtc_state->inherited) 6333 new_crtc_state->uapi.mode_changed = true; 6334 6335 if (new_crtc_state->uapi.scaling_filter != 6336 old_crtc_state->uapi.scaling_filter) 6337 new_crtc_state->uapi.mode_changed = true; 6338 } 6339 6340 intel_vrr_check_modeset(state); 6341 6342 ret = drm_atomic_helper_check_modeset(dev, &state->base); 6343 if (ret) 6344 goto fail; 6345 6346 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6347 ret = intel_async_flip_check_uapi(state, crtc); 6348 if (ret) 6349 return ret; 6350 } 6351 6352 ret = intel_atomic_check_config_and_link(state); 6353 if (ret) 6354 goto fail; 6355 6356 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6357 new_crtc_state, i) { 6358 if (!intel_crtc_needs_modeset(new_crtc_state)) 6359 continue; 6360 6361 if (new_crtc_state->hw.enable) { 6362 ret = intel_modeset_pipe_config_late(state, crtc); 6363 if (ret) 6364 goto fail; 6365 } 6366 6367 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 6368 } 6369 6370 /** 6371 * Check if fastset is allowed by external dependencies like other 6372 * pipes and transcoders. 6373 * 6374 * Right now it only forces a fullmodeset when the MST master 6375 * transcoder did not changed but the pipe of the master transcoder 6376 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 6377 * in case of port synced crtcs, if one of the synced crtcs 6378 * needs a full modeset, all other synced crtcs should be 6379 * forced a full modeset. 6380 */ 6381 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6382 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6383 continue; 6384 6385 if (intel_dp_mst_crtc_needs_modeset(state, crtc)) 6386 intel_crtc_flag_modeset(new_crtc_state); 6387 6388 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6389 enum transcoder master = new_crtc_state->mst_master_transcoder; 6390 6391 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) 6392 intel_crtc_flag_modeset(new_crtc_state); 6393 } 6394 6395 if (is_trans_port_sync_mode(new_crtc_state)) { 6396 u8 trans = new_crtc_state->sync_mode_slaves_mask; 6397 6398 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6399 trans |= BIT(new_crtc_state->master_transcoder); 6400 6401 if (intel_cpu_transcoders_need_modeset(state, trans)) 6402 intel_crtc_flag_modeset(new_crtc_state); 6403 } 6404 6405 if (new_crtc_state->bigjoiner_pipes) { 6406 if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) 6407 intel_crtc_flag_modeset(new_crtc_state); 6408 } 6409 } 6410 6411 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6412 new_crtc_state, i) { 6413 if (!intel_crtc_needs_modeset(new_crtc_state)) 6414 continue; 6415 6416 any_ms = true; 6417 6418 intel_release_shared_dplls(state, crtc); 6419 } 6420 6421 if (any_ms && !check_digital_port_conflicts(state)) { 6422 drm_dbg_kms(&dev_priv->drm, 6423 "rejecting conflicting digital port configuration\n"); 6424 ret = -EINVAL; 6425 goto fail; 6426 } 6427 6428 ret = intel_atomic_check_planes(state); 6429 if (ret) 6430 goto fail; 6431 6432 ret = intel_compute_global_watermarks(state); 6433 if (ret) 6434 goto fail; 6435 6436 ret = intel_bw_atomic_check(state); 6437 if (ret) 6438 goto fail; 6439 6440 ret = intel_cdclk_atomic_check(state, &any_ms); 6441 if (ret) 6442 goto fail; 6443 6444 if (intel_any_crtc_needs_modeset(state)) 6445 any_ms = true; 6446 6447 if (any_ms) { 6448 ret = intel_modeset_checks(state); 6449 if (ret) 6450 goto fail; 6451 6452 ret = intel_modeset_calc_cdclk(state); 6453 if (ret) 6454 return ret; 6455 } 6456 6457 ret = intel_pmdemand_atomic_check(state); 6458 if (ret) 6459 goto fail; 6460 6461 ret = intel_atomic_check_crtcs(state); 6462 if (ret) 6463 goto fail; 6464 6465 ret = intel_fbc_atomic_check(state); 6466 if (ret) 6467 goto fail; 6468 6469 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6470 new_crtc_state, i) { 6471 intel_color_assert_luts(new_crtc_state); 6472 6473 ret = intel_async_flip_check_hw(state, crtc); 6474 if (ret) 6475 goto fail; 6476 6477 /* Either full modeset or fastset (or neither), never both */ 6478 drm_WARN_ON(&dev_priv->drm, 6479 intel_crtc_needs_modeset(new_crtc_state) && 6480 intel_crtc_needs_fastset(new_crtc_state)); 6481 6482 if (!intel_crtc_needs_modeset(new_crtc_state) && 6483 !intel_crtc_needs_fastset(new_crtc_state)) 6484 continue; 6485 6486 intel_crtc_state_dump(new_crtc_state, state, 6487 intel_crtc_needs_modeset(new_crtc_state) ? 6488 "modeset" : "fastset"); 6489 } 6490 6491 return 0; 6492 6493 fail: 6494 if (ret == -EDEADLK) 6495 return ret; 6496 6497 /* 6498 * FIXME would probably be nice to know which crtc specifically 6499 * caused the failure, in cases where we can pinpoint it. 6500 */ 6501 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6502 new_crtc_state, i) 6503 intel_crtc_state_dump(new_crtc_state, state, "failed"); 6504 6505 return ret; 6506 } 6507 6508 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 6509 { 6510 struct intel_crtc_state *crtc_state; 6511 struct intel_crtc *crtc; 6512 int i, ret; 6513 6514 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 6515 if (ret < 0) 6516 return ret; 6517 6518 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6519 if (intel_crtc_needs_color_update(crtc_state)) 6520 intel_color_prepare_commit(crtc_state); 6521 } 6522 6523 return 0; 6524 } 6525 6526 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 6527 struct intel_crtc_state *crtc_state) 6528 { 6529 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6530 6531 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes) 6532 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 6533 6534 if (crtc_state->has_pch_encoder) { 6535 enum pipe pch_transcoder = 6536 intel_crtc_pch_transcoder(crtc); 6537 6538 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 6539 } 6540 } 6541 6542 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 6543 const struct intel_crtc_state *new_crtc_state) 6544 { 6545 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6546 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6547 6548 /* 6549 * Update pipe size and adjust fitter if needed: the reason for this is 6550 * that in compute_mode_changes we check the native mode (not the pfit 6551 * mode) to see if we can flip rather than do a full mode set. In the 6552 * fastboot case, we'll flip, but if we don't update the pipesrc and 6553 * pfit state, we'll end up with a big fb scanned out into the wrong 6554 * sized surface. 6555 */ 6556 intel_set_pipe_src_size(new_crtc_state); 6557 6558 /* on skylake this is done by detaching scalers */ 6559 if (DISPLAY_VER(dev_priv) >= 9) { 6560 if (new_crtc_state->pch_pfit.enabled) 6561 skl_pfit_enable(new_crtc_state); 6562 } else if (HAS_PCH_SPLIT(dev_priv)) { 6563 if (new_crtc_state->pch_pfit.enabled) 6564 ilk_pfit_enable(new_crtc_state); 6565 else if (old_crtc_state->pch_pfit.enabled) 6566 ilk_pfit_disable(old_crtc_state); 6567 } 6568 6569 /* 6570 * The register is supposedly single buffered so perhaps 6571 * not 100% correct to do this here. But SKL+ calculate 6572 * this based on the adjust pixel rate so pfit changes do 6573 * affect it and so it must be updated for fastsets. 6574 * HSW/BDW only really need this here for fastboot, after 6575 * that the value should not change without a full modeset. 6576 */ 6577 if (DISPLAY_VER(dev_priv) >= 9 || 6578 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 6579 hsw_set_linetime_wm(new_crtc_state); 6580 6581 if (new_crtc_state->update_m_n) 6582 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, 6583 &new_crtc_state->dp_m_n); 6584 6585 if (new_crtc_state->update_lrr) 6586 intel_set_transcoder_timings_lrr(new_crtc_state); 6587 } 6588 6589 static void commit_pipe_pre_planes(struct intel_atomic_state *state, 6590 struct intel_crtc *crtc) 6591 { 6592 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6593 const struct intel_crtc_state *old_crtc_state = 6594 intel_atomic_get_old_crtc_state(state, crtc); 6595 const struct intel_crtc_state *new_crtc_state = 6596 intel_atomic_get_new_crtc_state(state, crtc); 6597 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6598 6599 /* 6600 * During modesets pipe configuration was programmed as the 6601 * CRTC was enabled. 6602 */ 6603 if (!modeset) { 6604 if (intel_crtc_needs_color_update(new_crtc_state)) 6605 intel_color_commit_arm(new_crtc_state); 6606 6607 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6608 bdw_set_pipe_misc(new_crtc_state); 6609 6610 if (intel_crtc_needs_fastset(new_crtc_state)) 6611 intel_pipe_fastset(old_crtc_state, new_crtc_state); 6612 } 6613 6614 intel_psr2_program_trans_man_trk_ctl(new_crtc_state); 6615 6616 intel_atomic_update_watermarks(state, crtc); 6617 } 6618 6619 static void commit_pipe_post_planes(struct intel_atomic_state *state, 6620 struct intel_crtc *crtc) 6621 { 6622 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6623 const struct intel_crtc_state *old_crtc_state = 6624 intel_atomic_get_old_crtc_state(state, crtc); 6625 const struct intel_crtc_state *new_crtc_state = 6626 intel_atomic_get_new_crtc_state(state, crtc); 6627 6628 /* 6629 * Disable the scaler(s) after the plane(s) so that we don't 6630 * get a catastrophic underrun even if the two operations 6631 * end up happening in two different frames. 6632 */ 6633 if (DISPLAY_VER(dev_priv) >= 9 && 6634 !intel_crtc_needs_modeset(new_crtc_state)) 6635 skl_detach_scalers(new_crtc_state); 6636 6637 if (vrr_enabling(old_crtc_state, new_crtc_state)) 6638 intel_vrr_enable(new_crtc_state); 6639 } 6640 6641 static void intel_enable_crtc(struct intel_atomic_state *state, 6642 struct intel_crtc *crtc) 6643 { 6644 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6645 const struct intel_crtc_state *new_crtc_state = 6646 intel_atomic_get_new_crtc_state(state, crtc); 6647 6648 if (!intel_crtc_needs_modeset(new_crtc_state)) 6649 return; 6650 6651 /* VRR will be enable later, if required */ 6652 intel_crtc_update_active_timings(new_crtc_state, false); 6653 6654 dev_priv->display.funcs.display->crtc_enable(state, crtc); 6655 6656 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 6657 return; 6658 6659 /* vblanks work again, re-enable pipe CRC. */ 6660 intel_crtc_enable_pipe_crc(crtc); 6661 } 6662 6663 static void intel_pre_update_crtc(struct intel_atomic_state *state, 6664 struct intel_crtc *crtc) 6665 { 6666 struct drm_i915_private *i915 = to_i915(state->base.dev); 6667 const struct intel_crtc_state *old_crtc_state = 6668 intel_atomic_get_old_crtc_state(state, crtc); 6669 struct intel_crtc_state *new_crtc_state = 6670 intel_atomic_get_new_crtc_state(state, crtc); 6671 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6672 6673 if (old_crtc_state->inherited || 6674 intel_crtc_needs_modeset(new_crtc_state)) { 6675 if (HAS_DPT(i915)) 6676 intel_dpt_configure(crtc); 6677 } 6678 6679 if (!modeset) { 6680 if (new_crtc_state->preload_luts && 6681 intel_crtc_needs_color_update(new_crtc_state)) 6682 intel_color_load_luts(new_crtc_state); 6683 6684 intel_pre_plane_update(state, crtc); 6685 6686 if (intel_crtc_needs_fastset(new_crtc_state)) 6687 intel_encoders_update_pipe(state, crtc); 6688 6689 if (DISPLAY_VER(i915) >= 11 && 6690 intel_crtc_needs_fastset(new_crtc_state)) 6691 icl_set_pipe_chicken(new_crtc_state); 6692 6693 if (vrr_params_changed(old_crtc_state, new_crtc_state)) 6694 intel_vrr_set_transcoder_timings(new_crtc_state); 6695 } 6696 6697 intel_fbc_update(state, crtc); 6698 6699 drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); 6700 6701 if (!modeset && 6702 intel_crtc_needs_color_update(new_crtc_state)) 6703 intel_color_commit_noarm(new_crtc_state); 6704 6705 intel_crtc_planes_update_noarm(state, crtc); 6706 } 6707 6708 static void intel_update_crtc(struct intel_atomic_state *state, 6709 struct intel_crtc *crtc) 6710 { 6711 const struct intel_crtc_state *old_crtc_state = 6712 intel_atomic_get_old_crtc_state(state, crtc); 6713 struct intel_crtc_state *new_crtc_state = 6714 intel_atomic_get_new_crtc_state(state, crtc); 6715 6716 /* Perform vblank evasion around commit operation */ 6717 intel_pipe_update_start(state, crtc); 6718 6719 commit_pipe_pre_planes(state, crtc); 6720 6721 intel_crtc_planes_update_arm(state, crtc); 6722 6723 commit_pipe_post_planes(state, crtc); 6724 6725 intel_pipe_update_end(state, crtc); 6726 6727 /* 6728 * VRR/Seamless M/N update may need to update frame timings. 6729 * 6730 * FIXME Should be synchronized with the start of vblank somehow... 6731 */ 6732 if (vrr_enabling(old_crtc_state, new_crtc_state) || 6733 new_crtc_state->update_m_n || new_crtc_state->update_lrr) 6734 intel_crtc_update_active_timings(new_crtc_state, 6735 new_crtc_state->vrr.enable); 6736 6737 /* 6738 * We usually enable FIFO underrun interrupts as part of the 6739 * CRTC enable sequence during modesets. But when we inherit a 6740 * valid pipe configuration from the BIOS we need to take care 6741 * of enabling them on the CRTC's first fastset. 6742 */ 6743 if (intel_crtc_needs_fastset(new_crtc_state) && 6744 old_crtc_state->inherited) 6745 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 6746 } 6747 6748 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 6749 struct intel_crtc_state *old_crtc_state, 6750 struct intel_crtc_state *new_crtc_state, 6751 struct intel_crtc *crtc) 6752 { 6753 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6754 6755 /* 6756 * We need to disable pipe CRC before disabling the pipe, 6757 * or we race against vblank off. 6758 */ 6759 intel_crtc_disable_pipe_crc(crtc); 6760 6761 dev_priv->display.funcs.display->crtc_disable(state, crtc); 6762 crtc->active = false; 6763 intel_fbc_disable(crtc); 6764 6765 if (!new_crtc_state->hw.active) 6766 intel_initial_watermarks(state, crtc); 6767 } 6768 6769 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 6770 { 6771 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 6772 struct intel_crtc *crtc; 6773 u32 handled = 0; 6774 int i; 6775 6776 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6777 new_crtc_state, i) { 6778 if (!intel_crtc_needs_modeset(new_crtc_state)) 6779 continue; 6780 6781 intel_pre_plane_update(state, crtc); 6782 6783 if (!old_crtc_state->hw.active) 6784 continue; 6785 6786 intel_crtc_disable_planes(state, crtc); 6787 } 6788 6789 /* Only disable port sync and MST slaves */ 6790 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6791 new_crtc_state, i) { 6792 if (!intel_crtc_needs_modeset(new_crtc_state)) 6793 continue; 6794 6795 if (!old_crtc_state->hw.active) 6796 continue; 6797 6798 /* In case of Transcoder port Sync master slave CRTCs can be 6799 * assigned in any order and we need to make sure that 6800 * slave CRTCs are disabled first and then master CRTC since 6801 * Slave vblanks are masked till Master Vblanks. 6802 */ 6803 if (!is_trans_port_sync_slave(old_crtc_state) && 6804 !intel_dp_mst_is_slave_trans(old_crtc_state) && 6805 !intel_crtc_is_bigjoiner_slave(old_crtc_state)) 6806 continue; 6807 6808 intel_old_crtc_state_disables(state, old_crtc_state, 6809 new_crtc_state, crtc); 6810 handled |= BIT(crtc->pipe); 6811 } 6812 6813 /* Disable everything else left on */ 6814 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6815 new_crtc_state, i) { 6816 if (!intel_crtc_needs_modeset(new_crtc_state) || 6817 (handled & BIT(crtc->pipe))) 6818 continue; 6819 6820 if (!old_crtc_state->hw.active) 6821 continue; 6822 6823 intel_old_crtc_state_disables(state, old_crtc_state, 6824 new_crtc_state, crtc); 6825 } 6826 } 6827 6828 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 6829 { 6830 struct intel_crtc_state *new_crtc_state; 6831 struct intel_crtc *crtc; 6832 int i; 6833 6834 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6835 if (!new_crtc_state->hw.active) 6836 continue; 6837 6838 intel_enable_crtc(state, crtc); 6839 intel_pre_update_crtc(state, crtc); 6840 } 6841 6842 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6843 if (!new_crtc_state->hw.active) 6844 continue; 6845 6846 intel_update_crtc(state, crtc); 6847 } 6848 } 6849 6850 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 6851 { 6852 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6853 struct intel_crtc *crtc; 6854 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6855 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 6856 u8 update_pipes = 0, modeset_pipes = 0; 6857 int i; 6858 6859 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 6860 enum pipe pipe = crtc->pipe; 6861 6862 if (!new_crtc_state->hw.active) 6863 continue; 6864 6865 /* ignore allocations for crtc's that have been turned off. */ 6866 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6867 entries[pipe] = old_crtc_state->wm.skl.ddb; 6868 update_pipes |= BIT(pipe); 6869 } else { 6870 modeset_pipes |= BIT(pipe); 6871 } 6872 } 6873 6874 /* 6875 * Whenever the number of active pipes changes, we need to make sure we 6876 * update the pipes in the right order so that their ddb allocations 6877 * never overlap with each other between CRTC updates. Otherwise we'll 6878 * cause pipe underruns and other bad stuff. 6879 * 6880 * So first lets enable all pipes that do not need a fullmodeset as 6881 * those don't have any external dependency. 6882 */ 6883 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6884 enum pipe pipe = crtc->pipe; 6885 6886 if ((update_pipes & BIT(pipe)) == 0) 6887 continue; 6888 6889 intel_pre_update_crtc(state, crtc); 6890 } 6891 6892 while (update_pipes) { 6893 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6894 new_crtc_state, i) { 6895 enum pipe pipe = crtc->pipe; 6896 6897 if ((update_pipes & BIT(pipe)) == 0) 6898 continue; 6899 6900 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 6901 entries, I915_MAX_PIPES, pipe)) 6902 continue; 6903 6904 entries[pipe] = new_crtc_state->wm.skl.ddb; 6905 update_pipes &= ~BIT(pipe); 6906 6907 intel_update_crtc(state, crtc); 6908 6909 /* 6910 * If this is an already active pipe, it's DDB changed, 6911 * and this isn't the last pipe that needs updating 6912 * then we need to wait for a vblank to pass for the 6913 * new ddb allocation to take effect. 6914 */ 6915 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 6916 &old_crtc_state->wm.skl.ddb) && 6917 (update_pipes | modeset_pipes)) 6918 intel_crtc_wait_for_next_vblank(crtc); 6919 } 6920 } 6921 6922 update_pipes = modeset_pipes; 6923 6924 /* 6925 * Enable all pipes that needs a modeset and do not depends on other 6926 * pipes 6927 */ 6928 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6929 enum pipe pipe = crtc->pipe; 6930 6931 if ((modeset_pipes & BIT(pipe)) == 0) 6932 continue; 6933 6934 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 6935 is_trans_port_sync_master(new_crtc_state) || 6936 intel_crtc_is_bigjoiner_master(new_crtc_state)) 6937 continue; 6938 6939 modeset_pipes &= ~BIT(pipe); 6940 6941 intel_enable_crtc(state, crtc); 6942 } 6943 6944 /* 6945 * Then we enable all remaining pipes that depend on other 6946 * pipes: MST slaves and port sync masters, big joiner master 6947 */ 6948 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6949 enum pipe pipe = crtc->pipe; 6950 6951 if ((modeset_pipes & BIT(pipe)) == 0) 6952 continue; 6953 6954 modeset_pipes &= ~BIT(pipe); 6955 6956 intel_enable_crtc(state, crtc); 6957 } 6958 6959 /* 6960 * Finally we do the plane updates/etc. for all pipes that got enabled. 6961 */ 6962 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6963 enum pipe pipe = crtc->pipe; 6964 6965 if ((update_pipes & BIT(pipe)) == 0) 6966 continue; 6967 6968 intel_pre_update_crtc(state, crtc); 6969 } 6970 6971 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6972 enum pipe pipe = crtc->pipe; 6973 6974 if ((update_pipes & BIT(pipe)) == 0) 6975 continue; 6976 6977 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 6978 entries, I915_MAX_PIPES, pipe)); 6979 6980 entries[pipe] = new_crtc_state->wm.skl.ddb; 6981 update_pipes &= ~BIT(pipe); 6982 6983 intel_update_crtc(state, crtc); 6984 } 6985 6986 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 6987 drm_WARN_ON(&dev_priv->drm, update_pipes); 6988 } 6989 6990 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 6991 { 6992 struct drm_i915_private *i915 = to_i915(intel_state->base.dev); 6993 struct drm_plane *plane; 6994 struct drm_plane_state *new_plane_state; 6995 int ret, i; 6996 6997 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { 6998 if (new_plane_state->fence) { 6999 ret = dma_fence_wait_timeout(new_plane_state->fence, false, 7000 i915_fence_timeout(i915)); 7001 if (ret <= 0) 7002 break; 7003 7004 dma_fence_put(new_plane_state->fence); 7005 new_plane_state->fence = NULL; 7006 } 7007 } 7008 } 7009 7010 static void intel_atomic_cleanup_work(struct work_struct *work) 7011 { 7012 struct intel_atomic_state *state = 7013 container_of(work, struct intel_atomic_state, base.commit_work); 7014 struct drm_i915_private *i915 = to_i915(state->base.dev); 7015 struct intel_crtc_state *old_crtc_state; 7016 struct intel_crtc *crtc; 7017 int i; 7018 7019 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) 7020 intel_color_cleanup_commit(old_crtc_state); 7021 7022 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 7023 drm_atomic_helper_commit_cleanup_done(&state->base); 7024 drm_atomic_state_put(&state->base); 7025 } 7026 7027 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 7028 { 7029 struct drm_i915_private *i915 = to_i915(state->base.dev); 7030 struct intel_plane *plane; 7031 struct intel_plane_state *plane_state; 7032 int i; 7033 7034 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7035 struct drm_framebuffer *fb = plane_state->hw.fb; 7036 int cc_plane; 7037 int ret; 7038 7039 if (!fb) 7040 continue; 7041 7042 cc_plane = intel_fb_rc_ccs_cc_plane(fb); 7043 if (cc_plane < 0) 7044 continue; 7045 7046 /* 7047 * The layout of the fast clear color value expected by HW 7048 * (the DRM ABI requiring this value to be located in fb at 7049 * offset 0 of cc plane, plane #2 previous generations or 7050 * plane #1 for flat ccs): 7051 * - 4 x 4 bytes per-channel value 7052 * (in surface type specific float/int format provided by the fb user) 7053 * - 8 bytes native color value used by the display 7054 * (converted/written by GPU during a fast clear operation using the 7055 * above per-channel values) 7056 * 7057 * The commit's FB prepare hook already ensured that FB obj is pinned and the 7058 * caller made sure that the object is synced wrt. the related color clear value 7059 * GPU write on it. 7060 */ 7061 ret = i915_gem_object_read_from_page(intel_fb_obj(fb), 7062 fb->offsets[cc_plane] + 16, 7063 &plane_state->ccval, 7064 sizeof(plane_state->ccval)); 7065 /* The above could only fail if the FB obj has an unexpected backing store type. */ 7066 drm_WARN_ON(&i915->drm, ret); 7067 } 7068 } 7069 7070 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 7071 { 7072 struct drm_device *dev = state->base.dev; 7073 struct drm_i915_private *dev_priv = to_i915(dev); 7074 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7075 struct intel_crtc *crtc; 7076 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; 7077 intel_wakeref_t wakeref = 0; 7078 int i; 7079 7080 intel_atomic_commit_fence_wait(state); 7081 7082 drm_atomic_helper_wait_for_dependencies(&state->base); 7083 drm_dp_mst_atomic_wait_for_dependencies(&state->base); 7084 intel_atomic_global_state_wait_for_dependencies(state); 7085 7086 /* 7087 * During full modesets we write a lot of registers, wait 7088 * for PLLs, etc. Doing that while DC states are enabled 7089 * is not a good idea. 7090 * 7091 * During fastsets and other updates we also need to 7092 * disable DC states due to the following scenario: 7093 * 1. DC5 exit and PSR exit happen 7094 * 2. Some or all _noarm() registers are written 7095 * 3. Due to some long delay PSR is re-entered 7096 * 4. DC5 entry -> DMC saves the already written new 7097 * _noarm() registers and the old not yet written 7098 * _arm() registers 7099 * 5. DC5 exit -> DMC restores a mixture of old and 7100 * new register values and arms the update 7101 * 6. PSR exit -> hardware latches a mixture of old and 7102 * new register values -> corrupted frame, or worse 7103 * 7. New _arm() registers are finally written 7104 * 8. Hardware finally latches a complete set of new 7105 * register values, and subsequent frames will be OK again 7106 * 7107 * Also note that due to the pipe CSC hardware issues on 7108 * SKL/GLK DC states must remain off until the pipe CSC 7109 * state readout has happened. Otherwise we risk corrupting 7110 * the CSC latched register values with the readout (see 7111 * skl_read_csc() and skl_color_commit_noarm()). 7112 */ 7113 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); 7114 7115 intel_atomic_prepare_plane_clear_colors(state); 7116 7117 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7118 new_crtc_state, i) { 7119 if (intel_crtc_needs_modeset(new_crtc_state) || 7120 intel_crtc_needs_fastset(new_crtc_state)) 7121 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); 7122 } 7123 7124 intel_commit_modeset_disables(state); 7125 7126 intel_dp_tunnel_atomic_alloc_bw(state); 7127 7128 /* FIXME: Eventually get rid of our crtc->config pointer */ 7129 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7130 crtc->config = new_crtc_state; 7131 7132 /* 7133 * In XE_LPD+ Pmdemand combines many parameters such as voltage index, 7134 * plls, cdclk frequency, QGV point selection parameter etc. Voltage 7135 * index, cdclk/ddiclk frequencies are supposed to be configured before 7136 * the cdclk config is set. 7137 */ 7138 intel_pmdemand_pre_plane_update(state); 7139 7140 if (state->modeset) { 7141 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 7142 7143 intel_set_cdclk_pre_plane_update(state); 7144 7145 intel_modeset_verify_disabled(state); 7146 } 7147 7148 intel_sagv_pre_plane_update(state); 7149 7150 /* Complete the events for pipes that have now been disabled */ 7151 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7152 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7153 7154 /* Complete events for now disable pipes here. */ 7155 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 7156 spin_lock_irq(&dev->event_lock); 7157 drm_crtc_send_vblank_event(&crtc->base, 7158 new_crtc_state->uapi.event); 7159 spin_unlock_irq(&dev->event_lock); 7160 7161 new_crtc_state->uapi.event = NULL; 7162 } 7163 } 7164 7165 intel_encoders_update_prepare(state); 7166 7167 intel_dbuf_pre_plane_update(state); 7168 intel_mbus_dbox_update(state); 7169 7170 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7171 if (new_crtc_state->do_async_flip) 7172 intel_crtc_enable_flip_done(state, crtc); 7173 } 7174 7175 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7176 dev_priv->display.funcs.display->commit_modeset_enables(state); 7177 7178 if (state->modeset) 7179 intel_set_cdclk_post_plane_update(state); 7180 7181 intel_wait_for_vblank_workers(state); 7182 7183 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 7184 * already, but still need the state for the delayed optimization. To 7185 * fix this: 7186 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 7187 * - schedule that vblank worker _before_ calling hw_done 7188 * - at the start of commit_tail, cancel it _synchrously 7189 * - switch over to the vblank wait helper in the core after that since 7190 * we don't need out special handling any more. 7191 */ 7192 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 7193 7194 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7195 if (new_crtc_state->do_async_flip) 7196 intel_crtc_disable_flip_done(state, crtc); 7197 7198 intel_color_wait_commit(new_crtc_state); 7199 } 7200 7201 /* 7202 * Now that the vblank has passed, we can go ahead and program the 7203 * optimal watermarks on platforms that need two-step watermark 7204 * programming. 7205 * 7206 * TODO: Move this (and other cleanup) to an async worker eventually. 7207 */ 7208 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7209 new_crtc_state, i) { 7210 /* 7211 * Gen2 reports pipe underruns whenever all planes are disabled. 7212 * So re-enable underrun reporting after some planes get enabled. 7213 * 7214 * We do this before .optimize_watermarks() so that we have a 7215 * chance of catching underruns with the intermediate watermarks 7216 * vs. the new plane configuration. 7217 */ 7218 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 7219 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7220 7221 intel_optimize_watermarks(state, crtc); 7222 } 7223 7224 intel_dbuf_post_plane_update(state); 7225 7226 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7227 intel_post_plane_update(state, crtc); 7228 7229 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); 7230 7231 intel_modeset_verify_crtc(state, crtc); 7232 7233 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ 7234 hsw_ips_post_update(state, crtc); 7235 7236 /* 7237 * Activate DRRS after state readout to avoid 7238 * dp_m_n vs. dp_m2_n2 confusion on BDW+. 7239 */ 7240 intel_drrs_activate(new_crtc_state); 7241 7242 /* 7243 * DSB cleanup is done in cleanup_work aligning with framebuffer 7244 * cleanup. So copy and reset the dsb structure to sync with 7245 * commit_done and later do dsb cleanup in cleanup_work. 7246 * 7247 * FIXME get rid of this funny new->old swapping 7248 */ 7249 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); 7250 } 7251 7252 /* Underruns don't always raise interrupts, so check manually */ 7253 intel_check_cpu_fifo_underruns(dev_priv); 7254 intel_check_pch_fifo_underruns(dev_priv); 7255 7256 if (state->modeset) 7257 intel_verify_planes(state); 7258 7259 intel_sagv_post_plane_update(state); 7260 intel_pmdemand_post_plane_update(state); 7261 7262 drm_atomic_helper_commit_hw_done(&state->base); 7263 intel_atomic_global_state_commit_done(state); 7264 7265 if (state->modeset) { 7266 /* As one of the primary mmio accessors, KMS has a high 7267 * likelihood of triggering bugs in unclaimed access. After we 7268 * finish modesetting, see if an error has been flagged, and if 7269 * so enable debugging for the next modeset - and hope we catch 7270 * the culprit. 7271 */ 7272 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 7273 } 7274 /* 7275 * Delay re-enabling DC states by 17 ms to avoid the off->on->off 7276 * toggling overhead at and above 60 FPS. 7277 */ 7278 intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17); 7279 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7280 7281 /* 7282 * Defer the cleanup of the old state to a separate worker to not 7283 * impede the current task (userspace for blocking modesets) that 7284 * are executed inline. For out-of-line asynchronous modesets/flips, 7285 * deferring to a new worker seems overkill, but we would place a 7286 * schedule point (cond_resched()) here anyway to keep latencies 7287 * down. 7288 */ 7289 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 7290 queue_work(system_highpri_wq, &state->base.commit_work); 7291 } 7292 7293 static void intel_atomic_commit_work(struct work_struct *work) 7294 { 7295 struct intel_atomic_state *state = 7296 container_of(work, struct intel_atomic_state, base.commit_work); 7297 7298 intel_atomic_commit_tail(state); 7299 } 7300 7301 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7302 { 7303 struct intel_plane_state *old_plane_state, *new_plane_state; 7304 struct intel_plane *plane; 7305 int i; 7306 7307 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7308 new_plane_state, i) 7309 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 7310 to_intel_frontbuffer(new_plane_state->hw.fb), 7311 plane->frontbuffer_bit); 7312 } 7313 7314 static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock) 7315 { 7316 int ret; 7317 7318 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 7319 if (ret) 7320 return ret; 7321 7322 ret = intel_atomic_global_state_setup_commit(state); 7323 if (ret) 7324 return ret; 7325 7326 return 0; 7327 } 7328 7329 static int intel_atomic_swap_state(struct intel_atomic_state *state) 7330 { 7331 int ret; 7332 7333 ret = drm_atomic_helper_swap_state(&state->base, true); 7334 if (ret) 7335 return ret; 7336 7337 intel_atomic_swap_global_state(state); 7338 7339 intel_shared_dpll_swap_state(state); 7340 7341 intel_atomic_track_fbs(state); 7342 7343 return 0; 7344 } 7345 7346 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, 7347 bool nonblock) 7348 { 7349 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7350 struct drm_i915_private *dev_priv = to_i915(dev); 7351 int ret = 0; 7352 7353 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 7354 7355 /* 7356 * The intel_legacy_cursor_update() fast path takes care 7357 * of avoiding the vblank waits for simple cursor 7358 * movement and flips. For cursor on/off and size changes, 7359 * we want to perform the vblank waits so that watermark 7360 * updates happen during the correct frames. Gen9+ have 7361 * double buffered watermarks and so shouldn't need this. 7362 * 7363 * Unset state->legacy_cursor_update before the call to 7364 * drm_atomic_helper_setup_commit() because otherwise 7365 * drm_atomic_helper_wait_for_flip_done() is a noop and 7366 * we get FIFO underruns because we didn't wait 7367 * for vblank. 7368 * 7369 * FIXME doing watermarks and fb cleanup from a vblank worker 7370 * (assuming we had any) would solve these problems. 7371 */ 7372 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) { 7373 struct intel_crtc_state *new_crtc_state; 7374 struct intel_crtc *crtc; 7375 int i; 7376 7377 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7378 if (new_crtc_state->wm.need_postvbl_update || 7379 new_crtc_state->update_wm_post) 7380 state->base.legacy_cursor_update = false; 7381 } 7382 7383 ret = intel_atomic_prepare_commit(state); 7384 if (ret) { 7385 drm_dbg_atomic(&dev_priv->drm, 7386 "Preparing state failed with %i\n", ret); 7387 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7388 return ret; 7389 } 7390 7391 ret = intel_atomic_setup_commit(state, nonblock); 7392 if (!ret) 7393 ret = intel_atomic_swap_state(state); 7394 7395 if (ret) { 7396 struct intel_crtc_state *new_crtc_state; 7397 struct intel_crtc *crtc; 7398 int i; 7399 7400 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7401 intel_color_cleanup_commit(new_crtc_state); 7402 7403 drm_atomic_helper_unprepare_planes(dev, &state->base); 7404 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7405 return ret; 7406 } 7407 7408 drm_atomic_state_get(&state->base); 7409 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 7410 7411 if (nonblock && state->modeset) { 7412 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); 7413 } else if (nonblock) { 7414 queue_work(dev_priv->display.wq.flip, &state->base.commit_work); 7415 } else { 7416 if (state->modeset) 7417 flush_workqueue(dev_priv->display.wq.modeset); 7418 intel_atomic_commit_tail(state); 7419 } 7420 7421 return 0; 7422 } 7423 7424 /** 7425 * intel_plane_destroy - destroy a plane 7426 * @plane: plane to destroy 7427 * 7428 * Common destruction function for all types of planes (primary, cursor, 7429 * sprite). 7430 */ 7431 void intel_plane_destroy(struct drm_plane *plane) 7432 { 7433 drm_plane_cleanup(plane); 7434 kfree(to_intel_plane(plane)); 7435 } 7436 7437 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 7438 struct drm_file *file) 7439 { 7440 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 7441 struct drm_crtc *drmmode_crtc; 7442 struct intel_crtc *crtc; 7443 7444 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 7445 if (!drmmode_crtc) 7446 return -ENOENT; 7447 7448 crtc = to_intel_crtc(drmmode_crtc); 7449 pipe_from_crtc_id->pipe = crtc->pipe; 7450 7451 return 0; 7452 } 7453 7454 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 7455 { 7456 struct drm_device *dev = encoder->base.dev; 7457 struct intel_encoder *source_encoder; 7458 u32 possible_clones = 0; 7459 7460 for_each_intel_encoder(dev, source_encoder) { 7461 if (encoders_cloneable(encoder, source_encoder)) 7462 possible_clones |= drm_encoder_mask(&source_encoder->base); 7463 } 7464 7465 return possible_clones; 7466 } 7467 7468 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 7469 { 7470 struct drm_device *dev = encoder->base.dev; 7471 struct intel_crtc *crtc; 7472 u32 possible_crtcs = 0; 7473 7474 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) 7475 possible_crtcs |= drm_crtc_mask(&crtc->base); 7476 7477 return possible_crtcs; 7478 } 7479 7480 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 7481 { 7482 if (!IS_MOBILE(dev_priv)) 7483 return false; 7484 7485 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 7486 return false; 7487 7488 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 7489 return false; 7490 7491 return true; 7492 } 7493 7494 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 7495 { 7496 if (DISPLAY_VER(dev_priv) >= 9) 7497 return false; 7498 7499 if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)) 7500 return false; 7501 7502 if (HAS_PCH_LPT_H(dev_priv) && 7503 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 7504 return false; 7505 7506 /* DDI E can't be used if DDI A requires 4 lanes */ 7507 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 7508 return false; 7509 7510 if (!dev_priv->display.vbt.int_crt_support) 7511 return false; 7512 7513 return true; 7514 } 7515 7516 bool assert_port_valid(struct drm_i915_private *i915, enum port port) 7517 { 7518 return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)), 7519 "Platform does not support port %c\n", port_name(port)); 7520 } 7521 7522 void intel_setup_outputs(struct drm_i915_private *dev_priv) 7523 { 7524 struct intel_encoder *encoder; 7525 bool dpd_is_edp = false; 7526 7527 intel_pps_unlock_regs_wa(dev_priv); 7528 7529 if (!HAS_DISPLAY(dev_priv)) 7530 return; 7531 7532 if (HAS_DDI(dev_priv)) { 7533 if (intel_ddi_crt_present(dev_priv)) 7534 intel_crt_init(dev_priv); 7535 7536 intel_bios_for_each_encoder(dev_priv, intel_ddi_init); 7537 7538 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 7539 vlv_dsi_init(dev_priv); 7540 } else if (HAS_PCH_SPLIT(dev_priv)) { 7541 int found; 7542 7543 /* 7544 * intel_edp_init_connector() depends on this completing first, 7545 * to prevent the registration of both eDP and LVDS and the 7546 * incorrect sharing of the PPS. 7547 */ 7548 intel_lvds_init(dev_priv); 7549 intel_crt_init(dev_priv); 7550 7551 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 7552 7553 if (ilk_has_edp_a(dev_priv)) 7554 g4x_dp_init(dev_priv, DP_A, PORT_A); 7555 7556 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 7557 /* PCH SDVOB multiplex with HDMIB */ 7558 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 7559 if (!found) 7560 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 7561 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 7562 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B); 7563 } 7564 7565 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 7566 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 7567 7568 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 7569 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 7570 7571 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 7572 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C); 7573 7574 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 7575 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D); 7576 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7577 bool has_edp, has_port; 7578 7579 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) 7580 intel_crt_init(dev_priv); 7581 7582 /* 7583 * The DP_DETECTED bit is the latched state of the DDC 7584 * SDA pin at boot. However since eDP doesn't require DDC 7585 * (no way to plug in a DP->HDMI dongle) the DDC pins for 7586 * eDP ports may have been muxed to an alternate function. 7587 * Thus we can't rely on the DP_DETECTED bit alone to detect 7588 * eDP ports. Consult the VBT as well as DP_DETECTED to 7589 * detect eDP ports. 7590 * 7591 * Sadly the straps seem to be missing sometimes even for HDMI 7592 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 7593 * and VBT for the presence of the port. Additionally we can't 7594 * trust the port type the VBT declares as we've seen at least 7595 * HDMI ports that the VBT claim are DP or eDP. 7596 */ 7597 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 7598 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 7599 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 7600 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B); 7601 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 7602 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 7603 7604 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 7605 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 7606 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 7607 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C); 7608 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 7609 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 7610 7611 if (IS_CHERRYVIEW(dev_priv)) { 7612 /* 7613 * eDP not supported on port D, 7614 * so no need to worry about it 7615 */ 7616 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 7617 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 7618 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D); 7619 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 7620 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 7621 } 7622 7623 vlv_dsi_init(dev_priv); 7624 } else if (IS_PINEVIEW(dev_priv)) { 7625 intel_lvds_init(dev_priv); 7626 intel_crt_init(dev_priv); 7627 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { 7628 bool found = false; 7629 7630 if (IS_MOBILE(dev_priv)) 7631 intel_lvds_init(dev_priv); 7632 7633 intel_crt_init(dev_priv); 7634 7635 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 7636 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 7637 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 7638 if (!found && IS_G4X(dev_priv)) { 7639 drm_dbg_kms(&dev_priv->drm, 7640 "probing HDMI on SDVOB\n"); 7641 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 7642 } 7643 7644 if (!found && IS_G4X(dev_priv)) 7645 g4x_dp_init(dev_priv, DP_B, PORT_B); 7646 } 7647 7648 /* Before G4X SDVOC doesn't have its own detect register */ 7649 7650 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 7651 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 7652 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 7653 } 7654 7655 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 7656 7657 if (IS_G4X(dev_priv)) { 7658 drm_dbg_kms(&dev_priv->drm, 7659 "probing HDMI on SDVOC\n"); 7660 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 7661 } 7662 if (IS_G4X(dev_priv)) 7663 g4x_dp_init(dev_priv, DP_C, PORT_C); 7664 } 7665 7666 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 7667 g4x_dp_init(dev_priv, DP_D, PORT_D); 7668 7669 if (SUPPORTS_TV(dev_priv)) 7670 intel_tv_init(dev_priv); 7671 } else if (DISPLAY_VER(dev_priv) == 2) { 7672 if (IS_I85X(dev_priv)) 7673 intel_lvds_init(dev_priv); 7674 7675 intel_crt_init(dev_priv); 7676 intel_dvo_init(dev_priv); 7677 } 7678 7679 for_each_intel_encoder(&dev_priv->drm, encoder) { 7680 encoder->base.possible_crtcs = 7681 intel_encoder_possible_crtcs(encoder); 7682 encoder->base.possible_clones = 7683 intel_encoder_possible_clones(encoder); 7684 } 7685 7686 intel_init_pch_refclk(dev_priv); 7687 7688 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 7689 } 7690 7691 static int max_dotclock(struct drm_i915_private *i915) 7692 { 7693 int max_dotclock = i915->max_dotclk_freq; 7694 7695 /* icl+ might use bigjoiner */ 7696 if (DISPLAY_VER(i915) >= 11) 7697 max_dotclock *= 2; 7698 7699 return max_dotclock; 7700 } 7701 7702 enum drm_mode_status intel_mode_valid(struct drm_device *dev, 7703 const struct drm_display_mode *mode) 7704 { 7705 struct drm_i915_private *dev_priv = to_i915(dev); 7706 int hdisplay_max, htotal_max; 7707 int vdisplay_max, vtotal_max; 7708 7709 /* 7710 * Can't reject DBLSCAN here because Xorg ddxen can add piles 7711 * of DBLSCAN modes to the output's mode list when they detect 7712 * the scaling mode property on the connector. And they don't 7713 * ask the kernel to validate those modes in any way until 7714 * modeset time at which point the client gets a protocol error. 7715 * So in order to not upset those clients we silently ignore the 7716 * DBLSCAN flag on such connectors. For other connectors we will 7717 * reject modes with the DBLSCAN flag in encoder->compute_config(). 7718 * And we always reject DBLSCAN modes in connector->mode_valid() 7719 * as we never want such modes on the connector's mode list. 7720 */ 7721 7722 if (mode->vscan > 1) 7723 return MODE_NO_VSCAN; 7724 7725 if (mode->flags & DRM_MODE_FLAG_HSKEW) 7726 return MODE_H_ILLEGAL; 7727 7728 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 7729 DRM_MODE_FLAG_NCSYNC | 7730 DRM_MODE_FLAG_PCSYNC)) 7731 return MODE_HSYNC; 7732 7733 if (mode->flags & (DRM_MODE_FLAG_BCAST | 7734 DRM_MODE_FLAG_PIXMUX | 7735 DRM_MODE_FLAG_CLKDIV2)) 7736 return MODE_BAD; 7737 7738 /* 7739 * Reject clearly excessive dotclocks early to 7740 * avoid having to worry about huge integers later. 7741 */ 7742 if (mode->clock > max_dotclock(dev_priv)) 7743 return MODE_CLOCK_HIGH; 7744 7745 /* Transcoder timing limits */ 7746 if (DISPLAY_VER(dev_priv) >= 11) { 7747 hdisplay_max = 16384; 7748 vdisplay_max = 8192; 7749 htotal_max = 16384; 7750 vtotal_max = 8192; 7751 } else if (DISPLAY_VER(dev_priv) >= 9 || 7752 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 7753 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 7754 vdisplay_max = 4096; 7755 htotal_max = 8192; 7756 vtotal_max = 8192; 7757 } else if (DISPLAY_VER(dev_priv) >= 3) { 7758 hdisplay_max = 4096; 7759 vdisplay_max = 4096; 7760 htotal_max = 8192; 7761 vtotal_max = 8192; 7762 } else { 7763 hdisplay_max = 2048; 7764 vdisplay_max = 2048; 7765 htotal_max = 4096; 7766 vtotal_max = 4096; 7767 } 7768 7769 if (mode->hdisplay > hdisplay_max || 7770 mode->hsync_start > htotal_max || 7771 mode->hsync_end > htotal_max || 7772 mode->htotal > htotal_max) 7773 return MODE_H_ILLEGAL; 7774 7775 if (mode->vdisplay > vdisplay_max || 7776 mode->vsync_start > vtotal_max || 7777 mode->vsync_end > vtotal_max || 7778 mode->vtotal > vtotal_max) 7779 return MODE_V_ILLEGAL; 7780 7781 return MODE_OK; 7782 } 7783 7784 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv, 7785 const struct drm_display_mode *mode) 7786 { 7787 /* 7788 * Additional transcoder timing limits, 7789 * excluding BXT/GLK DSI transcoders. 7790 */ 7791 if (DISPLAY_VER(dev_priv) >= 5) { 7792 if (mode->hdisplay < 64 || 7793 mode->htotal - mode->hdisplay < 32) 7794 return MODE_H_ILLEGAL; 7795 7796 if (mode->vtotal - mode->vdisplay < 5) 7797 return MODE_V_ILLEGAL; 7798 } else { 7799 if (mode->htotal - mode->hdisplay < 32) 7800 return MODE_H_ILLEGAL; 7801 7802 if (mode->vtotal - mode->vdisplay < 3) 7803 return MODE_V_ILLEGAL; 7804 } 7805 7806 /* 7807 * Cantiga+ cannot handle modes with a hsync front porch of 0. 7808 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 7809 */ 7810 if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) && 7811 mode->hsync_start == mode->hdisplay) 7812 return MODE_H_ILLEGAL; 7813 7814 return MODE_OK; 7815 } 7816 7817 enum drm_mode_status 7818 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 7819 const struct drm_display_mode *mode, 7820 bool bigjoiner) 7821 { 7822 int plane_width_max, plane_height_max; 7823 7824 /* 7825 * intel_mode_valid() should be 7826 * sufficient on older platforms. 7827 */ 7828 if (DISPLAY_VER(dev_priv) < 9) 7829 return MODE_OK; 7830 7831 /* 7832 * Most people will probably want a fullscreen 7833 * plane so let's not advertize modes that are 7834 * too big for that. 7835 */ 7836 if (DISPLAY_VER(dev_priv) >= 11) { 7837 plane_width_max = 5120 << bigjoiner; 7838 plane_height_max = 4320; 7839 } else { 7840 plane_width_max = 5120; 7841 plane_height_max = 4096; 7842 } 7843 7844 if (mode->hdisplay > plane_width_max) 7845 return MODE_H_ILLEGAL; 7846 7847 if (mode->vdisplay > plane_height_max) 7848 return MODE_V_ILLEGAL; 7849 7850 return MODE_OK; 7851 } 7852 7853 static const struct intel_display_funcs skl_display_funcs = { 7854 .get_pipe_config = hsw_get_pipe_config, 7855 .crtc_enable = hsw_crtc_enable, 7856 .crtc_disable = hsw_crtc_disable, 7857 .commit_modeset_enables = skl_commit_modeset_enables, 7858 .get_initial_plane_config = skl_get_initial_plane_config, 7859 .fixup_initial_plane_config = skl_fixup_initial_plane_config, 7860 }; 7861 7862 static const struct intel_display_funcs ddi_display_funcs = { 7863 .get_pipe_config = hsw_get_pipe_config, 7864 .crtc_enable = hsw_crtc_enable, 7865 .crtc_disable = hsw_crtc_disable, 7866 .commit_modeset_enables = intel_commit_modeset_enables, 7867 .get_initial_plane_config = i9xx_get_initial_plane_config, 7868 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 7869 }; 7870 7871 static const struct intel_display_funcs pch_split_display_funcs = { 7872 .get_pipe_config = ilk_get_pipe_config, 7873 .crtc_enable = ilk_crtc_enable, 7874 .crtc_disable = ilk_crtc_disable, 7875 .commit_modeset_enables = intel_commit_modeset_enables, 7876 .get_initial_plane_config = i9xx_get_initial_plane_config, 7877 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 7878 }; 7879 7880 static const struct intel_display_funcs vlv_display_funcs = { 7881 .get_pipe_config = i9xx_get_pipe_config, 7882 .crtc_enable = valleyview_crtc_enable, 7883 .crtc_disable = i9xx_crtc_disable, 7884 .commit_modeset_enables = intel_commit_modeset_enables, 7885 .get_initial_plane_config = i9xx_get_initial_plane_config, 7886 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 7887 }; 7888 7889 static const struct intel_display_funcs i9xx_display_funcs = { 7890 .get_pipe_config = i9xx_get_pipe_config, 7891 .crtc_enable = i9xx_crtc_enable, 7892 .crtc_disable = i9xx_crtc_disable, 7893 .commit_modeset_enables = intel_commit_modeset_enables, 7894 .get_initial_plane_config = i9xx_get_initial_plane_config, 7895 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 7896 }; 7897 7898 /** 7899 * intel_init_display_hooks - initialize the display modesetting hooks 7900 * @dev_priv: device private 7901 */ 7902 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 7903 { 7904 if (DISPLAY_VER(dev_priv) >= 9) { 7905 dev_priv->display.funcs.display = &skl_display_funcs; 7906 } else if (HAS_DDI(dev_priv)) { 7907 dev_priv->display.funcs.display = &ddi_display_funcs; 7908 } else if (HAS_PCH_SPLIT(dev_priv)) { 7909 dev_priv->display.funcs.display = &pch_split_display_funcs; 7910 } else if (IS_CHERRYVIEW(dev_priv) || 7911 IS_VALLEYVIEW(dev_priv)) { 7912 dev_priv->display.funcs.display = &vlv_display_funcs; 7913 } else { 7914 dev_priv->display.funcs.display = &i9xx_display_funcs; 7915 } 7916 } 7917 7918 int intel_initial_commit(struct drm_device *dev) 7919 { 7920 struct drm_atomic_state *state = NULL; 7921 struct drm_modeset_acquire_ctx ctx; 7922 struct intel_crtc *crtc; 7923 int ret = 0; 7924 7925 state = drm_atomic_state_alloc(dev); 7926 if (!state) 7927 return -ENOMEM; 7928 7929 drm_modeset_acquire_init(&ctx, 0); 7930 7931 state->acquire_ctx = &ctx; 7932 to_intel_atomic_state(state)->internal = true; 7933 7934 retry: 7935 for_each_intel_crtc(dev, crtc) { 7936 struct intel_crtc_state *crtc_state = 7937 intel_atomic_get_crtc_state(state, crtc); 7938 7939 if (IS_ERR(crtc_state)) { 7940 ret = PTR_ERR(crtc_state); 7941 goto out; 7942 } 7943 7944 if (crtc_state->hw.active) { 7945 struct intel_encoder *encoder; 7946 7947 ret = drm_atomic_add_affected_planes(state, &crtc->base); 7948 if (ret) 7949 goto out; 7950 7951 /* 7952 * FIXME hack to force a LUT update to avoid the 7953 * plane update forcing the pipe gamma on without 7954 * having a proper LUT loaded. Remove once we 7955 * have readout for pipe gamma enable. 7956 */ 7957 crtc_state->uapi.color_mgmt_changed = true; 7958 7959 for_each_intel_encoder_mask(dev, encoder, 7960 crtc_state->uapi.encoder_mask) { 7961 if (encoder->initial_fastset_check && 7962 !encoder->initial_fastset_check(encoder, crtc_state)) { 7963 ret = drm_atomic_add_affected_connectors(state, 7964 &crtc->base); 7965 if (ret) 7966 goto out; 7967 } 7968 } 7969 } 7970 } 7971 7972 ret = drm_atomic_commit(state); 7973 7974 out: 7975 if (ret == -EDEADLK) { 7976 drm_atomic_state_clear(state); 7977 drm_modeset_backoff(&ctx); 7978 goto retry; 7979 } 7980 7981 drm_atomic_state_put(state); 7982 7983 drm_modeset_drop_locks(&ctx); 7984 drm_modeset_acquire_fini(&ctx); 7985 7986 return ret; 7987 } 7988 7989 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 7990 { 7991 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 7992 enum transcoder cpu_transcoder = (enum transcoder)pipe; 7993 /* 640x480@60Hz, ~25175 kHz */ 7994 struct dpll clock = { 7995 .m1 = 18, 7996 .m2 = 7, 7997 .p1 = 13, 7998 .p2 = 4, 7999 .n = 2, 8000 }; 8001 u32 dpll, fp; 8002 int i; 8003 8004 drm_WARN_ON(&dev_priv->drm, 8005 i9xx_calc_dpll_params(48000, &clock) != 25154); 8006 8007 drm_dbg_kms(&dev_priv->drm, 8008 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 8009 pipe_name(pipe), clock.vco, clock.dot); 8010 8011 fp = i9xx_dpll_compute_fp(&clock); 8012 dpll = DPLL_DVO_2X_MODE | 8013 DPLL_VGA_MODE_DIS | 8014 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 8015 PLL_P2_DIVIDE_BY_4 | 8016 PLL_REF_INPUT_DREFCLK | 8017 DPLL_VCO_ENABLE; 8018 8019 intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), 8020 HACTIVE(640 - 1) | HTOTAL(800 - 1)); 8021 intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), 8022 HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); 8023 intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), 8024 HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); 8025 intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), 8026 VACTIVE(480 - 1) | VTOTAL(525 - 1)); 8027 intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 8028 VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); 8029 intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), 8030 VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); 8031 intel_de_write(dev_priv, PIPESRC(pipe), 8032 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); 8033 8034 intel_de_write(dev_priv, FP0(pipe), fp); 8035 intel_de_write(dev_priv, FP1(pipe), fp); 8036 8037 /* 8038 * Apparently we need to have VGA mode enabled prior to changing 8039 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 8040 * dividers, even though the register value does change. 8041 */ 8042 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 8043 intel_de_write(dev_priv, DPLL(pipe), dpll); 8044 8045 /* Wait for the clocks to stabilize. */ 8046 intel_de_posting_read(dev_priv, DPLL(pipe)); 8047 udelay(150); 8048 8049 /* The pixel multiplier can only be updated once the 8050 * DPLL is enabled and the clocks are stable. 8051 * 8052 * So write it again. 8053 */ 8054 intel_de_write(dev_priv, DPLL(pipe), dpll); 8055 8056 /* We do this three times for luck */ 8057 for (i = 0; i < 3 ; i++) { 8058 intel_de_write(dev_priv, DPLL(pipe), dpll); 8059 intel_de_posting_read(dev_priv, DPLL(pipe)); 8060 udelay(150); /* wait for warmup */ 8061 } 8062 8063 intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE); 8064 intel_de_posting_read(dev_priv, TRANSCONF(pipe)); 8065 8066 intel_wait_for_pipe_scanline_moving(crtc); 8067 } 8068 8069 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 8070 { 8071 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 8072 8073 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", 8074 pipe_name(pipe)); 8075 8076 drm_WARN_ON(&dev_priv->drm, 8077 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE); 8078 drm_WARN_ON(&dev_priv->drm, 8079 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE); 8080 drm_WARN_ON(&dev_priv->drm, 8081 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE); 8082 drm_WARN_ON(&dev_priv->drm, 8083 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK); 8084 drm_WARN_ON(&dev_priv->drm, 8085 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); 8086 8087 intel_de_write(dev_priv, TRANSCONF(pipe), 0); 8088 intel_de_posting_read(dev_priv, TRANSCONF(pipe)); 8089 8090 intel_wait_for_pipe_scanline_stopped(crtc); 8091 8092 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 8093 intel_de_posting_read(dev_priv, DPLL(pipe)); 8094 } 8095 8096 void intel_hpd_poll_fini(struct drm_i915_private *i915) 8097 { 8098 struct intel_connector *connector; 8099 struct drm_connector_list_iter conn_iter; 8100 8101 /* Kill all the work that may have been queued by hpd. */ 8102 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 8103 for_each_intel_connector_iter(connector, &conn_iter) { 8104 if (connector->modeset_retry_work.func && 8105 cancel_work_sync(&connector->modeset_retry_work)) 8106 drm_connector_put(&connector->base); 8107 if (connector->hdcp.shim) { 8108 cancel_delayed_work_sync(&connector->hdcp.check_work); 8109 cancel_work_sync(&connector->hdcp.prop_work); 8110 } 8111 } 8112 drm_connector_list_iter_end(&conn_iter); 8113 } 8114 8115 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915) 8116 { 8117 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915); 8118 } 8119