1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dma-resv.h> 28 #include <linux/i2c.h> 29 #include <linux/input.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 #include <linux/string_helpers.h> 34 35 #include <drm/display/drm_dp_helper.h> 36 #include <drm/display/drm_dp_tunnel.h> 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_atomic_uapi.h> 40 #include <drm/drm_damage_helper.h> 41 #include <drm/drm_edid.h> 42 #include <drm/drm_fixed.h> 43 #include <drm/drm_fourcc.h> 44 #include <drm/drm_probe_helper.h> 45 #include <drm/drm_rect.h> 46 #include <drm/drm_vblank.h> 47 48 #include "g4x_dp.h" 49 #include "g4x_hdmi.h" 50 #include "hsw_ips.h" 51 #include "i915_config.h" 52 #include "i915_drv.h" 53 #include "i915_reg.h" 54 #include "i915_utils.h" 55 #include "i9xx_plane.h" 56 #include "i9xx_plane_regs.h" 57 #include "i9xx_wm.h" 58 #include "intel_atomic.h" 59 #include "intel_atomic_plane.h" 60 #include "intel_audio.h" 61 #include "intel_bo.h" 62 #include "intel_bw.h" 63 #include "intel_cdclk.h" 64 #include "intel_clock_gating.h" 65 #include "intel_color.h" 66 #include "intel_crt.h" 67 #include "intel_crtc.h" 68 #include "intel_crtc_state_dump.h" 69 #include "intel_cursor_regs.h" 70 #include "intel_cx0_phy.h" 71 #include "intel_cursor.h" 72 #include "intel_ddi.h" 73 #include "intel_de.h" 74 #include "intel_display_driver.h" 75 #include "intel_display_power.h" 76 #include "intel_display_types.h" 77 #include "intel_dmc.h" 78 #include "intel_dp.h" 79 #include "intel_dp_link_training.h" 80 #include "intel_dp_mst.h" 81 #include "intel_dp_tunnel.h" 82 #include "intel_dpll.h" 83 #include "intel_dpll_mgr.h" 84 #include "intel_dpt.h" 85 #include "intel_dpt_common.h" 86 #include "intel_drrs.h" 87 #include "intel_dsb.h" 88 #include "intel_dsi.h" 89 #include "intel_dvo.h" 90 #include "intel_fb.h" 91 #include "intel_fbc.h" 92 #include "intel_fdi.h" 93 #include "intel_fifo_underrun.h" 94 #include "intel_frontbuffer.h" 95 #include "intel_hdmi.h" 96 #include "intel_hotplug.h" 97 #include "intel_link_bw.h" 98 #include "intel_lvds.h" 99 #include "intel_lvds_regs.h" 100 #include "intel_modeset_setup.h" 101 #include "intel_modeset_verify.h" 102 #include "intel_overlay.h" 103 #include "intel_panel.h" 104 #include "intel_pch_display.h" 105 #include "intel_pch_refclk.h" 106 #include "intel_pcode.h" 107 #include "intel_pipe_crc.h" 108 #include "intel_plane_initial.h" 109 #include "intel_pmdemand.h" 110 #include "intel_pps.h" 111 #include "intel_psr.h" 112 #include "intel_psr_regs.h" 113 #include "intel_sdvo.h" 114 #include "intel_snps_phy.h" 115 #include "intel_tc.h" 116 #include "intel_tdf.h" 117 #include "intel_tv.h" 118 #include "intel_vblank.h" 119 #include "intel_vdsc.h" 120 #include "intel_vdsc_regs.h" 121 #include "intel_vga.h" 122 #include "intel_vrr.h" 123 #include "intel_wm.h" 124 #include "skl_scaler.h" 125 #include "skl_universal_plane.h" 126 #include "skl_watermark.h" 127 #include "vlv_dpio_phy_regs.h" 128 #include "vlv_dsi.h" 129 #include "vlv_dsi_pll.h" 130 #include "vlv_dsi_regs.h" 131 #include "vlv_sideband.h" 132 133 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 134 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 135 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 136 static void bdw_set_pipe_misc(struct intel_dsb *dsb, 137 const struct intel_crtc_state *crtc_state); 138 139 /* returns HPLL frequency in kHz */ 140 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 141 { 142 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 143 144 /* Obtain SKU information */ 145 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 146 CCK_FUSE_HPLL_FREQ_MASK; 147 148 return vco_freq[hpll_freq] * 1000; 149 } 150 151 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 152 const char *name, u32 reg, int ref_freq) 153 { 154 u32 val; 155 int divider; 156 157 val = vlv_cck_read(dev_priv, reg); 158 divider = val & CCK_FREQUENCY_VALUES; 159 160 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 161 (divider << CCK_FREQUENCY_STATUS_SHIFT), 162 "%s change in progress\n", name); 163 164 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 165 } 166 167 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 168 const char *name, u32 reg) 169 { 170 int hpll; 171 172 vlv_cck_get(dev_priv); 173 174 if (dev_priv->hpll_freq == 0) 175 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 176 177 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 178 179 vlv_cck_put(dev_priv); 180 181 return hpll; 182 } 183 184 void intel_update_czclk(struct drm_i915_private *dev_priv) 185 { 186 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 187 return; 188 189 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 190 CCK_CZ_CLOCK_CONTROL); 191 192 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 193 dev_priv->czclk_freq); 194 } 195 196 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 197 { 198 return (crtc_state->active_planes & 199 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 200 } 201 202 /* WA Display #0827: Gen9:all */ 203 static void 204 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 205 { 206 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 207 DUPS1_GATING_DIS | DUPS2_GATING_DIS, 208 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); 209 } 210 211 /* Wa_2006604312:icl,ehl */ 212 static void 213 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 214 bool enable) 215 { 216 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 217 DPFR_GATING_DIS, 218 enable ? DPFR_GATING_DIS : 0); 219 } 220 221 /* Wa_1604331009:icl,jsl,ehl */ 222 static void 223 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 224 bool enable) 225 { 226 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 227 CURSOR_GATING_DIS, 228 enable ? CURSOR_GATING_DIS : 0); 229 } 230 231 static bool 232 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 233 { 234 return crtc_state->master_transcoder != INVALID_TRANSCODER; 235 } 236 237 bool 238 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 239 { 240 return crtc_state->sync_mode_slaves_mask != 0; 241 } 242 243 bool 244 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 245 { 246 return is_trans_port_sync_master(crtc_state) || 247 is_trans_port_sync_slave(crtc_state); 248 } 249 250 static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state) 251 { 252 return ffs(crtc_state->joiner_pipes) - 1; 253 } 254 255 /* 256 * The following helper functions, despite being named for bigjoiner, 257 * are applicable to both bigjoiner and uncompressed joiner configurations. 258 */ 259 static bool is_bigjoiner(const struct intel_crtc_state *crtc_state) 260 { 261 return hweight8(crtc_state->joiner_pipes) >= 2; 262 } 263 264 static u8 bigjoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 265 { 266 if (!is_bigjoiner(crtc_state)) 267 return 0; 268 269 return crtc_state->joiner_pipes & (0b01010101 << joiner_primary_pipe(crtc_state)); 270 } 271 272 static unsigned int bigjoiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 273 { 274 if (!is_bigjoiner(crtc_state)) 275 return 0; 276 277 return crtc_state->joiner_pipes & (0b10101010 << joiner_primary_pipe(crtc_state)); 278 } 279 280 bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state) 281 { 282 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 283 284 if (!is_bigjoiner(crtc_state)) 285 return false; 286 287 return BIT(crtc->pipe) & bigjoiner_primary_pipes(crtc_state); 288 } 289 290 bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state) 291 { 292 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 293 294 if (!is_bigjoiner(crtc_state)) 295 return false; 296 297 return BIT(crtc->pipe) & bigjoiner_secondary_pipes(crtc_state); 298 } 299 300 u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state) 301 { 302 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 303 304 if (!is_bigjoiner(crtc_state)) 305 return BIT(crtc->pipe); 306 307 return bigjoiner_primary_pipes(crtc_state); 308 } 309 310 u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state) 311 { 312 return bigjoiner_secondary_pipes(crtc_state); 313 } 314 315 bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state) 316 { 317 return intel_crtc_num_joined_pipes(crtc_state) >= 4; 318 } 319 320 static u8 ultrajoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 321 { 322 if (!intel_crtc_is_ultrajoiner(crtc_state)) 323 return 0; 324 325 return crtc_state->joiner_pipes & (0b00010001 << joiner_primary_pipe(crtc_state)); 326 } 327 328 bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state) 329 { 330 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 331 332 return intel_crtc_is_ultrajoiner(crtc_state) && 333 BIT(crtc->pipe) & ultrajoiner_primary_pipes(crtc_state); 334 } 335 336 /* 337 * The ultrajoiner enable bit doesn't seem to follow primary/secondary logic or 338 * any other logic, so lets just add helper function to 339 * at least hide this hassle.. 340 */ 341 static u8 ultrajoiner_enable_pipes(const struct intel_crtc_state *crtc_state) 342 { 343 if (!intel_crtc_is_ultrajoiner(crtc_state)) 344 return 0; 345 346 return crtc_state->joiner_pipes & (0b01110111 << joiner_primary_pipe(crtc_state)); 347 } 348 349 bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state) 350 { 351 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 352 353 return intel_crtc_is_ultrajoiner(crtc_state) && 354 BIT(crtc->pipe) & ultrajoiner_enable_pipes(crtc_state); 355 } 356 357 u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 358 { 359 if (crtc_state->joiner_pipes) 360 return crtc_state->joiner_pipes & ~BIT(joiner_primary_pipe(crtc_state)); 361 else 362 return 0; 363 } 364 365 bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state) 366 { 367 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 368 369 return crtc_state->joiner_pipes && 370 crtc->pipe != joiner_primary_pipe(crtc_state); 371 } 372 373 bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state) 374 { 375 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 376 377 return crtc_state->joiner_pipes && 378 crtc->pipe == joiner_primary_pipe(crtc_state); 379 } 380 381 int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state) 382 { 383 return hweight8(intel_crtc_joined_pipe_mask(crtc_state)); 384 } 385 386 u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) 387 { 388 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 389 390 return BIT(crtc->pipe) | crtc_state->joiner_pipes; 391 } 392 393 struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state) 394 { 395 struct intel_display *display = to_intel_display(crtc_state); 396 397 if (intel_crtc_is_joiner_secondary(crtc_state)) 398 return intel_crtc_for_pipe(display, joiner_primary_pipe(crtc_state)); 399 else 400 return to_intel_crtc(crtc_state->uapi.crtc); 401 } 402 403 static void 404 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 405 { 406 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 407 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 408 409 if (DISPLAY_VER(dev_priv) >= 4) { 410 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 411 412 /* Wait for the Pipe State to go off */ 413 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), 414 TRANSCONF_STATE_ENABLE, 100)) 415 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); 416 } else { 417 intel_wait_for_pipe_scanline_stopped(crtc); 418 } 419 } 420 421 void assert_transcoder(struct intel_display *display, 422 enum transcoder cpu_transcoder, bool state) 423 { 424 bool cur_state; 425 enum intel_display_power_domain power_domain; 426 intel_wakeref_t wakeref; 427 428 /* we keep both pipes enabled on 830 */ 429 if (display->platform.i830) 430 state = true; 431 432 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 433 wakeref = intel_display_power_get_if_enabled(display, power_domain); 434 if (wakeref) { 435 u32 val = intel_de_read(display, 436 TRANSCONF(display, cpu_transcoder)); 437 cur_state = !!(val & TRANSCONF_ENABLE); 438 439 intel_display_power_put(display, power_domain, wakeref); 440 } else { 441 cur_state = false; 442 } 443 444 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 445 "transcoder %s assertion failure (expected %s, current %s)\n", 446 transcoder_name(cpu_transcoder), str_on_off(state), 447 str_on_off(cur_state)); 448 } 449 450 static void assert_plane(struct intel_plane *plane, bool state) 451 { 452 struct intel_display *display = to_intel_display(plane->base.dev); 453 enum pipe pipe; 454 bool cur_state; 455 456 cur_state = plane->get_hw_state(plane, &pipe); 457 458 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 459 "%s assertion failure (expected %s, current %s)\n", 460 plane->base.name, str_on_off(state), 461 str_on_off(cur_state)); 462 } 463 464 #define assert_plane_enabled(p) assert_plane(p, true) 465 #define assert_plane_disabled(p) assert_plane(p, false) 466 467 static void assert_planes_disabled(struct intel_crtc *crtc) 468 { 469 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 470 struct intel_plane *plane; 471 472 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 473 assert_plane_disabled(plane); 474 } 475 476 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 477 { 478 struct intel_display *display = to_intel_display(new_crtc_state); 479 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 480 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 481 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 482 enum pipe pipe = crtc->pipe; 483 u32 val; 484 485 drm_dbg_kms(display->drm, "enabling pipe %c\n", pipe_name(pipe)); 486 487 assert_planes_disabled(crtc); 488 489 /* 490 * A pipe without a PLL won't actually be able to drive bits from 491 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 492 * need the check. 493 */ 494 if (HAS_GMCH(dev_priv)) { 495 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 496 assert_dsi_pll_enabled(display); 497 else 498 assert_pll_enabled(display, pipe); 499 } else { 500 if (new_crtc_state->has_pch_encoder) { 501 /* if driving the PCH, we need FDI enabled */ 502 assert_fdi_rx_pll_enabled(display, 503 intel_crtc_pch_transcoder(crtc)); 504 assert_fdi_tx_pll_enabled(display, 505 (enum pipe) cpu_transcoder); 506 } 507 /* FIXME: assert CPU port conditions for SNB+ */ 508 } 509 510 /* Wa_22012358565:adl-p */ 511 if (DISPLAY_VER(dev_priv) == 13) 512 intel_de_rmw(display, PIPE_ARB_CTL(display, pipe), 513 0, PIPE_ARB_USE_PROG_SLOTS); 514 515 if (DISPLAY_VER(dev_priv) >= 14) { 516 u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA; 517 u32 set = 0; 518 519 if (DISPLAY_VER(display) == 14) 520 set |= DP_FEC_BS_JITTER_WA; 521 522 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 523 clear, set); 524 } 525 526 val = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); 527 if (val & TRANSCONF_ENABLE) { 528 /* we keep both pipes enabled on 830 */ 529 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 530 return; 531 } 532 533 /* Wa_1409098942:adlp+ */ 534 if (DISPLAY_VER(display) >= 13 && 535 new_crtc_state->dsc.compression_enable) { 536 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 537 val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK, 538 TRANSCONF_PIXEL_COUNT_SCALING_X4); 539 } 540 541 intel_de_write(display, TRANSCONF(display, cpu_transcoder), 542 val | TRANSCONF_ENABLE); 543 intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder)); 544 545 /* 546 * Until the pipe starts PIPEDSL reads will return a stale value, 547 * which causes an apparent vblank timestamp jump when PIPEDSL 548 * resets to its proper value. That also messes up the frame count 549 * when it's derived from the timestamps. So let's wait for the 550 * pipe to start properly before we call drm_crtc_vblank_on() 551 */ 552 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 553 intel_wait_for_pipe_scanline_moving(crtc); 554 } 555 556 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 557 { 558 struct intel_display *display = to_intel_display(old_crtc_state); 559 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 560 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 561 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 562 enum pipe pipe = crtc->pipe; 563 u32 val; 564 565 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 566 567 /* 568 * Make sure planes won't keep trying to pump pixels to us, 569 * or we might hang the display. 570 */ 571 assert_planes_disabled(crtc); 572 573 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 574 if ((val & TRANSCONF_ENABLE) == 0) 575 return; 576 577 /* 578 * Double wide has implications for planes 579 * so best keep it disabled when not needed. 580 */ 581 if (old_crtc_state->double_wide) 582 val &= ~TRANSCONF_DOUBLE_WIDE; 583 584 /* Don't disable pipe or pipe PLLs if needed */ 585 if (!IS_I830(dev_priv)) 586 val &= ~TRANSCONF_ENABLE; 587 588 /* Wa_1409098942:adlp+ */ 589 if (DISPLAY_VER(dev_priv) >= 13 && 590 old_crtc_state->dsc.compression_enable) 591 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 592 593 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 594 595 if (DISPLAY_VER(dev_priv) >= 12) 596 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 597 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 598 599 if ((val & TRANSCONF_ENABLE) == 0) 600 intel_wait_for_pipe_off(old_crtc_state); 601 } 602 603 u32 intel_plane_fb_max_stride(struct drm_device *drm, 604 u32 pixel_format, u64 modifier) 605 { 606 struct intel_display *display = to_intel_display(drm); 607 struct intel_crtc *crtc; 608 struct intel_plane *plane; 609 610 if (!HAS_DISPLAY(display)) 611 return 0; 612 613 /* 614 * We assume the primary plane for pipe A has 615 * the highest stride limits of them all, 616 * if in case pipe A is disabled, use the first pipe from pipe_mask. 617 */ 618 crtc = intel_first_crtc(display); 619 if (!crtc) 620 return 0; 621 622 plane = to_intel_plane(crtc->base.primary); 623 624 return plane->max_stride(plane, pixel_format, modifier, 625 DRM_MODE_ROTATE_0); 626 } 627 628 void intel_set_plane_visible(struct intel_crtc_state *crtc_state, 629 struct intel_plane_state *plane_state, 630 bool visible) 631 { 632 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 633 634 plane_state->uapi.visible = visible; 635 636 if (visible) 637 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 638 else 639 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 640 } 641 642 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) 643 { 644 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 645 struct drm_plane *plane; 646 647 /* 648 * Active_planes aliases if multiple "primary" or cursor planes 649 * have been used on the same (or wrong) pipe. plane_mask uses 650 * unique ids, hence we can use that to reconstruct active_planes. 651 */ 652 crtc_state->enabled_planes = 0; 653 crtc_state->active_planes = 0; 654 655 drm_for_each_plane_mask(plane, &dev_priv->drm, 656 crtc_state->uapi.plane_mask) { 657 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 658 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 659 } 660 } 661 662 void intel_plane_disable_noatomic(struct intel_crtc *crtc, 663 struct intel_plane *plane) 664 { 665 struct intel_display *display = to_intel_display(crtc); 666 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 667 struct intel_crtc_state *crtc_state = 668 to_intel_crtc_state(crtc->base.state); 669 struct intel_plane_state *plane_state = 670 to_intel_plane_state(plane->base.state); 671 672 drm_dbg_kms(&dev_priv->drm, 673 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 674 plane->base.base.id, plane->base.name, 675 crtc->base.base.id, crtc->base.name); 676 677 intel_set_plane_visible(crtc_state, plane_state, false); 678 intel_plane_fixup_bitmasks(crtc_state); 679 crtc_state->data_rate[plane->id] = 0; 680 crtc_state->data_rate_y[plane->id] = 0; 681 crtc_state->rel_data_rate[plane->id] = 0; 682 crtc_state->rel_data_rate_y[plane->id] = 0; 683 crtc_state->min_cdclk[plane->id] = 0; 684 685 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 686 hsw_ips_disable(crtc_state)) { 687 crtc_state->ips_enabled = false; 688 intel_plane_initial_vblank_wait(crtc); 689 } 690 691 /* 692 * Vblank time updates from the shadow to live plane control register 693 * are blocked if the memory self-refresh mode is active at that 694 * moment. So to make sure the plane gets truly disabled, disable 695 * first the self-refresh mode. The self-refresh enable bit in turn 696 * will be checked/applied by the HW only at the next frame start 697 * event which is after the vblank start event, so we need to have a 698 * wait-for-vblank between disabling the plane and the pipe. 699 */ 700 if (HAS_GMCH(dev_priv) && 701 intel_set_memory_cxsr(dev_priv, false)) 702 intel_plane_initial_vblank_wait(crtc); 703 704 /* 705 * Gen2 reports pipe underruns whenever all planes are disabled. 706 * So disable underrun reporting before all the planes get disabled. 707 */ 708 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) 709 intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, false); 710 711 intel_plane_disable_arm(NULL, plane, crtc_state); 712 intel_plane_initial_vblank_wait(crtc); 713 } 714 715 unsigned int 716 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 717 { 718 int x = 0, y = 0; 719 720 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 721 plane_state->view.color_plane[0].offset, 0); 722 723 return y; 724 } 725 726 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 727 { 728 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 729 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 730 enum pipe pipe = crtc->pipe; 731 u32 tmp; 732 733 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 734 735 /* 736 * Display WA #1153: icl 737 * enable hardware to bypass the alpha math 738 * and rounding for per-pixel values 00 and 0xff 739 */ 740 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 741 /* 742 * Display WA # 1605353570: icl 743 * Set the pixel rounding bit to 1 for allowing 744 * passthrough of Frame buffer pixels unmodified 745 * across pipe 746 */ 747 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 748 749 /* 750 * Underrun recovery must always be disabled on display 13+. 751 * DG2 chicken bit meaning is inverted compared to other platforms. 752 */ 753 if (IS_DG2(dev_priv)) 754 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 755 else if ((DISPLAY_VER(dev_priv) >= 13) && (DISPLAY_VER(dev_priv) < 30)) 756 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 757 758 /* Wa_14010547955:dg2 */ 759 if (IS_DG2(dev_priv)) 760 tmp |= DG2_RENDER_CCSTAG_4_3_EN; 761 762 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 763 } 764 765 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 766 { 767 struct drm_crtc *crtc; 768 bool cleanup_done; 769 770 drm_for_each_crtc(crtc, &dev_priv->drm) { 771 struct drm_crtc_commit *commit; 772 spin_lock(&crtc->commit_lock); 773 commit = list_first_entry_or_null(&crtc->commit_list, 774 struct drm_crtc_commit, commit_entry); 775 cleanup_done = commit ? 776 try_wait_for_completion(&commit->cleanup_done) : true; 777 spin_unlock(&crtc->commit_lock); 778 779 if (cleanup_done) 780 continue; 781 782 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 783 784 return true; 785 } 786 787 return false; 788 } 789 790 /* 791 * Finds the encoder associated with the given CRTC. This can only be 792 * used when we know that the CRTC isn't feeding multiple encoders! 793 */ 794 struct intel_encoder * 795 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 796 const struct intel_crtc_state *crtc_state) 797 { 798 const struct drm_connector_state *connector_state; 799 const struct drm_connector *connector; 800 struct intel_encoder *encoder = NULL; 801 struct intel_crtc *primary_crtc; 802 int num_encoders = 0; 803 int i; 804 805 primary_crtc = intel_primary_crtc(crtc_state); 806 807 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 808 if (connector_state->crtc != &primary_crtc->base) 809 continue; 810 811 encoder = to_intel_encoder(connector_state->best_encoder); 812 num_encoders++; 813 } 814 815 drm_WARN(state->base.dev, num_encoders != 1, 816 "%d encoders for pipe %c\n", 817 num_encoders, pipe_name(primary_crtc->pipe)); 818 819 return encoder; 820 } 821 822 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 823 { 824 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 825 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 826 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 827 enum pipe pipe = crtc->pipe; 828 int width = drm_rect_width(dst); 829 int height = drm_rect_height(dst); 830 int x = dst->x1; 831 int y = dst->y1; 832 833 if (!crtc_state->pch_pfit.enabled) 834 return; 835 836 /* Force use of hard-coded filter coefficients 837 * as some pre-programmed values are broken, 838 * e.g. x201. 839 */ 840 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 841 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 842 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 843 else 844 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 845 PF_FILTER_MED_3x3); 846 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 847 PF_WIN_XPOS(x) | PF_WIN_YPOS(y)); 848 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 849 PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height)); 850 } 851 852 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 853 { 854 if (crtc->overlay) 855 (void) intel_overlay_switch_off(crtc->overlay); 856 857 /* Let userspace switch the overlay on again. In most cases userspace 858 * has to recompute where to put it anyway. 859 */ 860 } 861 862 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 863 { 864 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 865 866 if (!crtc_state->nv12_planes) 867 return false; 868 869 /* WA Display #0827: Gen9:all */ 870 if (DISPLAY_VER(dev_priv) == 9) 871 return true; 872 873 return false; 874 } 875 876 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 877 { 878 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 879 880 /* Wa_2006604312:icl,ehl */ 881 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) 882 return true; 883 884 return false; 885 } 886 887 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 888 { 889 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 890 891 /* Wa_1604331009:icl,jsl,ehl */ 892 if (is_hdr_mode(crtc_state) && 893 crtc_state->active_planes & BIT(PLANE_CURSOR) && 894 DISPLAY_VER(dev_priv) == 11) 895 return true; 896 897 return false; 898 } 899 900 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, 901 enum pipe pipe, bool enable) 902 { 903 if (DISPLAY_VER(i915) == 9) { 904 /* 905 * "Plane N stretch max must be programmed to 11b (x1) 906 * when Async flips are enabled on that plane." 907 */ 908 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 909 SKL_PLANE1_STRETCH_MAX_MASK, 910 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 911 } else { 912 /* Also needed on HSW/BDW albeit undocumented */ 913 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 914 HSW_PRI_STRETCH_MAX_MASK, 915 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 916 } 917 } 918 919 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 920 { 921 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 922 923 return crtc_state->uapi.async_flip && i915_vtd_active(i915) && 924 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); 925 } 926 927 static void intel_encoders_audio_enable(struct intel_atomic_state *state, 928 struct intel_crtc *crtc) 929 { 930 const struct intel_crtc_state *crtc_state = 931 intel_atomic_get_new_crtc_state(state, crtc); 932 const struct drm_connector_state *conn_state; 933 struct drm_connector *conn; 934 int i; 935 936 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 937 struct intel_encoder *encoder = 938 to_intel_encoder(conn_state->best_encoder); 939 940 if (conn_state->crtc != &crtc->base) 941 continue; 942 943 if (encoder->audio_enable) 944 encoder->audio_enable(encoder, crtc_state, conn_state); 945 } 946 } 947 948 static void intel_encoders_audio_disable(struct intel_atomic_state *state, 949 struct intel_crtc *crtc) 950 { 951 const struct intel_crtc_state *old_crtc_state = 952 intel_atomic_get_old_crtc_state(state, crtc); 953 const struct drm_connector_state *old_conn_state; 954 struct drm_connector *conn; 955 int i; 956 957 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 958 struct intel_encoder *encoder = 959 to_intel_encoder(old_conn_state->best_encoder); 960 961 if (old_conn_state->crtc != &crtc->base) 962 continue; 963 964 if (encoder->audio_disable) 965 encoder->audio_disable(encoder, old_crtc_state, old_conn_state); 966 } 967 } 968 969 #define is_enabling(feature, old_crtc_state, new_crtc_state) \ 970 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ 971 (new_crtc_state)->feature) 972 #define is_disabling(feature, old_crtc_state, new_crtc_state) \ 973 ((old_crtc_state)->feature && \ 974 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) 975 976 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 977 const struct intel_crtc_state *new_crtc_state) 978 { 979 if (!new_crtc_state->hw.active) 980 return false; 981 982 return is_enabling(active_planes, old_crtc_state, new_crtc_state); 983 } 984 985 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 986 const struct intel_crtc_state *new_crtc_state) 987 { 988 if (!old_crtc_state->hw.active) 989 return false; 990 991 return is_disabling(active_planes, old_crtc_state, new_crtc_state); 992 } 993 994 static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, 995 const struct intel_crtc_state *new_crtc_state) 996 { 997 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline || 998 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || 999 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || 1000 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || 1001 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full; 1002 } 1003 1004 static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state, 1005 const struct intel_crtc_state *new_crtc_state) 1006 { 1007 return old_crtc_state->cmrr.cmrr_m != new_crtc_state->cmrr.cmrr_m || 1008 old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n; 1009 } 1010 1011 static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state, 1012 struct intel_crtc *crtc) 1013 { 1014 const struct intel_crtc_state *old_crtc_state = 1015 intel_atomic_get_old_crtc_state(state, crtc); 1016 const struct intel_crtc_state *new_crtc_state = 1017 intel_atomic_get_new_crtc_state(state, crtc); 1018 1019 if (!new_crtc_state->hw.active) 1020 return false; 1021 1022 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) || 1023 (new_crtc_state->vrr.enable && 1024 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 1025 vrr_params_changed(old_crtc_state, new_crtc_state))); 1026 } 1027 1028 bool intel_crtc_vrr_disabling(struct intel_atomic_state *state, 1029 struct intel_crtc *crtc) 1030 { 1031 const struct intel_crtc_state *old_crtc_state = 1032 intel_atomic_get_old_crtc_state(state, crtc); 1033 const struct intel_crtc_state *new_crtc_state = 1034 intel_atomic_get_new_crtc_state(state, crtc); 1035 1036 if (!old_crtc_state->hw.active) 1037 return false; 1038 1039 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) || 1040 (old_crtc_state->vrr.enable && 1041 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 1042 vrr_params_changed(old_crtc_state, new_crtc_state))); 1043 } 1044 1045 static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, 1046 const struct intel_crtc_state *new_crtc_state) 1047 { 1048 if (!new_crtc_state->hw.active) 1049 return false; 1050 1051 return is_enabling(has_audio, old_crtc_state, new_crtc_state) || 1052 (new_crtc_state->has_audio && 1053 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1054 } 1055 1056 static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, 1057 const struct intel_crtc_state *new_crtc_state) 1058 { 1059 if (!old_crtc_state->hw.active) 1060 return false; 1061 1062 return is_disabling(has_audio, old_crtc_state, new_crtc_state) || 1063 (old_crtc_state->has_audio && 1064 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1065 } 1066 1067 #undef is_disabling 1068 #undef is_enabling 1069 1070 static void intel_post_plane_update(struct intel_atomic_state *state, 1071 struct intel_crtc *crtc) 1072 { 1073 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1074 const struct intel_crtc_state *old_crtc_state = 1075 intel_atomic_get_old_crtc_state(state, crtc); 1076 const struct intel_crtc_state *new_crtc_state = 1077 intel_atomic_get_new_crtc_state(state, crtc); 1078 enum pipe pipe = crtc->pipe; 1079 1080 intel_psr_post_plane_update(state, crtc); 1081 1082 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 1083 1084 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 1085 intel_update_watermarks(dev_priv); 1086 1087 intel_fbc_post_update(state, crtc); 1088 1089 if (needs_async_flip_vtd_wa(old_crtc_state) && 1090 !needs_async_flip_vtd_wa(new_crtc_state)) 1091 intel_async_flip_vtd_wa(dev_priv, pipe, false); 1092 1093 if (needs_nv12_wa(old_crtc_state) && 1094 !needs_nv12_wa(new_crtc_state)) 1095 skl_wa_827(dev_priv, pipe, false); 1096 1097 if (needs_scalerclk_wa(old_crtc_state) && 1098 !needs_scalerclk_wa(new_crtc_state)) 1099 icl_wa_scalerclkgating(dev_priv, pipe, false); 1100 1101 if (needs_cursorclk_wa(old_crtc_state) && 1102 !needs_cursorclk_wa(new_crtc_state)) 1103 icl_wa_cursorclkgating(dev_priv, pipe, false); 1104 1105 if (intel_crtc_needs_color_update(new_crtc_state)) 1106 intel_color_post_update(new_crtc_state); 1107 1108 if (audio_enabling(old_crtc_state, new_crtc_state)) 1109 intel_encoders_audio_enable(state, crtc); 1110 } 1111 1112 static void intel_post_plane_update_after_readout(struct intel_atomic_state *state, 1113 struct intel_crtc *crtc) 1114 { 1115 const struct intel_crtc_state *new_crtc_state = 1116 intel_atomic_get_new_crtc_state(state, crtc); 1117 1118 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ 1119 hsw_ips_post_update(state, crtc); 1120 1121 /* 1122 * Activate DRRS after state readout to avoid 1123 * dp_m_n vs. dp_m2_n2 confusion on BDW+. 1124 */ 1125 intel_drrs_activate(new_crtc_state); 1126 } 1127 1128 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 1129 struct intel_crtc *crtc) 1130 { 1131 const struct intel_crtc_state *crtc_state = 1132 intel_atomic_get_new_crtc_state(state, crtc); 1133 u8 update_planes = crtc_state->update_planes; 1134 const struct intel_plane_state __maybe_unused *plane_state; 1135 struct intel_plane *plane; 1136 int i; 1137 1138 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1139 if (plane->pipe == crtc->pipe && 1140 update_planes & BIT(plane->id)) 1141 plane->enable_flip_done(plane); 1142 } 1143 } 1144 1145 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 1146 struct intel_crtc *crtc) 1147 { 1148 const struct intel_crtc_state *crtc_state = 1149 intel_atomic_get_new_crtc_state(state, crtc); 1150 u8 update_planes = crtc_state->update_planes; 1151 const struct intel_plane_state __maybe_unused *plane_state; 1152 struct intel_plane *plane; 1153 int i; 1154 1155 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1156 if (plane->pipe == crtc->pipe && 1157 update_planes & BIT(plane->id)) 1158 plane->disable_flip_done(plane); 1159 } 1160 } 1161 1162 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 1163 struct intel_crtc *crtc) 1164 { 1165 const struct intel_crtc_state *old_crtc_state = 1166 intel_atomic_get_old_crtc_state(state, crtc); 1167 const struct intel_crtc_state *new_crtc_state = 1168 intel_atomic_get_new_crtc_state(state, crtc); 1169 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & 1170 ~new_crtc_state->async_flip_planes; 1171 const struct intel_plane_state *old_plane_state; 1172 struct intel_plane *plane; 1173 bool need_vbl_wait = false; 1174 int i; 1175 1176 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1177 if (plane->need_async_flip_toggle_wa && 1178 plane->pipe == crtc->pipe && 1179 disable_async_flip_planes & BIT(plane->id)) { 1180 /* 1181 * Apart from the async flip bit we want to 1182 * preserve the old state for the plane. 1183 */ 1184 intel_plane_async_flip(NULL, plane, 1185 old_crtc_state, old_plane_state, false); 1186 need_vbl_wait = true; 1187 } 1188 } 1189 1190 if (need_vbl_wait) 1191 intel_crtc_wait_for_next_vblank(crtc); 1192 } 1193 1194 static void intel_pre_plane_update(struct intel_atomic_state *state, 1195 struct intel_crtc *crtc) 1196 { 1197 struct intel_display *display = to_intel_display(state); 1198 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1199 const struct intel_crtc_state *old_crtc_state = 1200 intel_atomic_get_old_crtc_state(state, crtc); 1201 const struct intel_crtc_state *new_crtc_state = 1202 intel_atomic_get_new_crtc_state(state, crtc); 1203 enum pipe pipe = crtc->pipe; 1204 1205 if (intel_crtc_vrr_disabling(state, crtc)) { 1206 intel_vrr_disable(old_crtc_state); 1207 intel_crtc_update_active_timings(old_crtc_state, false); 1208 } 1209 1210 if (audio_disabling(old_crtc_state, new_crtc_state)) 1211 intel_encoders_audio_disable(state, crtc); 1212 1213 intel_drrs_deactivate(old_crtc_state); 1214 1215 intel_psr_pre_plane_update(state, crtc); 1216 1217 if (hsw_ips_pre_update(state, crtc)) 1218 intel_crtc_wait_for_next_vblank(crtc); 1219 1220 if (intel_fbc_pre_update(state, crtc)) 1221 intel_crtc_wait_for_next_vblank(crtc); 1222 1223 if (!needs_async_flip_vtd_wa(old_crtc_state) && 1224 needs_async_flip_vtd_wa(new_crtc_state)) 1225 intel_async_flip_vtd_wa(dev_priv, pipe, true); 1226 1227 /* Display WA 827 */ 1228 if (!needs_nv12_wa(old_crtc_state) && 1229 needs_nv12_wa(new_crtc_state)) 1230 skl_wa_827(dev_priv, pipe, true); 1231 1232 /* Wa_2006604312:icl,ehl */ 1233 if (!needs_scalerclk_wa(old_crtc_state) && 1234 needs_scalerclk_wa(new_crtc_state)) 1235 icl_wa_scalerclkgating(dev_priv, pipe, true); 1236 1237 /* Wa_1604331009:icl,jsl,ehl */ 1238 if (!needs_cursorclk_wa(old_crtc_state) && 1239 needs_cursorclk_wa(new_crtc_state)) 1240 icl_wa_cursorclkgating(dev_priv, pipe, true); 1241 1242 /* 1243 * Vblank time updates from the shadow to live plane control register 1244 * are blocked if the memory self-refresh mode is active at that 1245 * moment. So to make sure the plane gets truly disabled, disable 1246 * first the self-refresh mode. The self-refresh enable bit in turn 1247 * will be checked/applied by the HW only at the next frame start 1248 * event which is after the vblank start event, so we need to have a 1249 * wait-for-vblank between disabling the plane and the pipe. 1250 */ 1251 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1252 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 1253 intel_crtc_wait_for_next_vblank(crtc); 1254 1255 /* 1256 * IVB workaround: must disable low power watermarks for at least 1257 * one frame before enabling scaling. LP watermarks can be re-enabled 1258 * when scaling is disabled. 1259 * 1260 * WaCxSRDisabledForSpriteScaling:ivb 1261 */ 1262 if (!HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1263 new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv)) 1264 intel_crtc_wait_for_next_vblank(crtc); 1265 1266 /* 1267 * If we're doing a modeset we don't need to do any 1268 * pre-vblank watermark programming here. 1269 */ 1270 if (!intel_crtc_needs_modeset(new_crtc_state)) { 1271 /* 1272 * For platforms that support atomic watermarks, program the 1273 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1274 * will be the intermediate values that are safe for both pre- and 1275 * post- vblank; when vblank happens, the 'active' values will be set 1276 * to the final 'target' values and we'll do this again to get the 1277 * optimal watermarks. For gen9+ platforms, the values we program here 1278 * will be the final target values which will get automatically latched 1279 * at vblank time; no further programming will be necessary. 1280 * 1281 * If a platform hasn't been transitioned to atomic watermarks yet, 1282 * we'll continue to update watermarks the old way, if flags tell 1283 * us to. 1284 */ 1285 if (!intel_initial_watermarks(state, crtc)) 1286 if (new_crtc_state->update_wm_pre) 1287 intel_update_watermarks(dev_priv); 1288 } 1289 1290 /* 1291 * Gen2 reports pipe underruns whenever all planes are disabled. 1292 * So disable underrun reporting before all the planes get disabled. 1293 * 1294 * We do this after .initial_watermarks() so that we have a 1295 * chance of catching underruns with the intermediate watermarks 1296 * vs. the old plane configuration. 1297 */ 1298 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1299 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 1300 1301 /* 1302 * WA for platforms where async address update enable bit 1303 * is double buffered and only latched at start of vblank. 1304 */ 1305 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) 1306 intel_crtc_async_flip_disable_wa(state, crtc); 1307 } 1308 1309 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1310 struct intel_crtc *crtc) 1311 { 1312 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1313 const struct intel_crtc_state *new_crtc_state = 1314 intel_atomic_get_new_crtc_state(state, crtc); 1315 unsigned int update_mask = new_crtc_state->update_planes; 1316 const struct intel_plane_state *old_plane_state; 1317 struct intel_plane *plane; 1318 unsigned fb_bits = 0; 1319 int i; 1320 1321 intel_crtc_dpms_overlay_disable(crtc); 1322 1323 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1324 if (crtc->pipe != plane->pipe || 1325 !(update_mask & BIT(plane->id))) 1326 continue; 1327 1328 intel_plane_disable_arm(NULL, plane, new_crtc_state); 1329 1330 if (old_plane_state->uapi.visible) 1331 fb_bits |= plane->frontbuffer_bit; 1332 } 1333 1334 intel_frontbuffer_flip(dev_priv, fb_bits); 1335 } 1336 1337 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1338 { 1339 struct drm_i915_private *i915 = to_i915(state->base.dev); 1340 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1341 struct intel_crtc *crtc; 1342 int i; 1343 1344 /* 1345 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 1346 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 1347 */ 1348 if (i915->display.dpll.mgr) { 1349 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1350 if (intel_crtc_needs_modeset(new_crtc_state)) 1351 continue; 1352 1353 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 1354 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1355 } 1356 } 1357 } 1358 1359 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1360 struct intel_crtc *crtc) 1361 { 1362 const struct intel_crtc_state *crtc_state = 1363 intel_atomic_get_new_crtc_state(state, crtc); 1364 const struct drm_connector_state *conn_state; 1365 struct drm_connector *conn; 1366 int i; 1367 1368 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1369 struct intel_encoder *encoder = 1370 to_intel_encoder(conn_state->best_encoder); 1371 1372 if (conn_state->crtc != &crtc->base) 1373 continue; 1374 1375 if (encoder->pre_pll_enable) 1376 encoder->pre_pll_enable(state, encoder, 1377 crtc_state, conn_state); 1378 } 1379 } 1380 1381 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1382 struct intel_crtc *crtc) 1383 { 1384 const struct intel_crtc_state *crtc_state = 1385 intel_atomic_get_new_crtc_state(state, crtc); 1386 const struct drm_connector_state *conn_state; 1387 struct drm_connector *conn; 1388 int i; 1389 1390 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1391 struct intel_encoder *encoder = 1392 to_intel_encoder(conn_state->best_encoder); 1393 1394 if (conn_state->crtc != &crtc->base) 1395 continue; 1396 1397 if (encoder->pre_enable) 1398 encoder->pre_enable(state, encoder, 1399 crtc_state, conn_state); 1400 } 1401 } 1402 1403 static void intel_encoders_enable(struct intel_atomic_state *state, 1404 struct intel_crtc *crtc) 1405 { 1406 const struct intel_crtc_state *crtc_state = 1407 intel_atomic_get_new_crtc_state(state, crtc); 1408 const struct drm_connector_state *conn_state; 1409 struct drm_connector *conn; 1410 int i; 1411 1412 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1413 struct intel_encoder *encoder = 1414 to_intel_encoder(conn_state->best_encoder); 1415 1416 if (conn_state->crtc != &crtc->base) 1417 continue; 1418 1419 if (encoder->enable) 1420 encoder->enable(state, encoder, 1421 crtc_state, conn_state); 1422 intel_opregion_notify_encoder(encoder, true); 1423 } 1424 } 1425 1426 static void intel_encoders_disable(struct intel_atomic_state *state, 1427 struct intel_crtc *crtc) 1428 { 1429 const struct intel_crtc_state *old_crtc_state = 1430 intel_atomic_get_old_crtc_state(state, crtc); 1431 const struct drm_connector_state *old_conn_state; 1432 struct drm_connector *conn; 1433 int i; 1434 1435 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1436 struct intel_encoder *encoder = 1437 to_intel_encoder(old_conn_state->best_encoder); 1438 1439 if (old_conn_state->crtc != &crtc->base) 1440 continue; 1441 1442 intel_opregion_notify_encoder(encoder, false); 1443 if (encoder->disable) 1444 encoder->disable(state, encoder, 1445 old_crtc_state, old_conn_state); 1446 } 1447 } 1448 1449 static void intel_encoders_post_disable(struct intel_atomic_state *state, 1450 struct intel_crtc *crtc) 1451 { 1452 const struct intel_crtc_state *old_crtc_state = 1453 intel_atomic_get_old_crtc_state(state, crtc); 1454 const struct drm_connector_state *old_conn_state; 1455 struct drm_connector *conn; 1456 int i; 1457 1458 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1459 struct intel_encoder *encoder = 1460 to_intel_encoder(old_conn_state->best_encoder); 1461 1462 if (old_conn_state->crtc != &crtc->base) 1463 continue; 1464 1465 if (encoder->post_disable) 1466 encoder->post_disable(state, encoder, 1467 old_crtc_state, old_conn_state); 1468 } 1469 } 1470 1471 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1472 struct intel_crtc *crtc) 1473 { 1474 const struct intel_crtc_state *old_crtc_state = 1475 intel_atomic_get_old_crtc_state(state, crtc); 1476 const struct drm_connector_state *old_conn_state; 1477 struct drm_connector *conn; 1478 int i; 1479 1480 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1481 struct intel_encoder *encoder = 1482 to_intel_encoder(old_conn_state->best_encoder); 1483 1484 if (old_conn_state->crtc != &crtc->base) 1485 continue; 1486 1487 if (encoder->post_pll_disable) 1488 encoder->post_pll_disable(state, encoder, 1489 old_crtc_state, old_conn_state); 1490 } 1491 } 1492 1493 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1494 struct intel_crtc *crtc) 1495 { 1496 const struct intel_crtc_state *crtc_state = 1497 intel_atomic_get_new_crtc_state(state, crtc); 1498 const struct drm_connector_state *conn_state; 1499 struct drm_connector *conn; 1500 int i; 1501 1502 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1503 struct intel_encoder *encoder = 1504 to_intel_encoder(conn_state->best_encoder); 1505 1506 if (conn_state->crtc != &crtc->base) 1507 continue; 1508 1509 if (encoder->update_pipe) 1510 encoder->update_pipe(state, encoder, 1511 crtc_state, conn_state); 1512 } 1513 } 1514 1515 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1516 { 1517 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1518 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1519 1520 if (crtc_state->has_pch_encoder) { 1521 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1522 &crtc_state->fdi_m_n); 1523 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1524 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1525 &crtc_state->dp_m_n); 1526 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1527 &crtc_state->dp_m2_n2); 1528 } 1529 1530 intel_set_transcoder_timings(crtc_state); 1531 1532 ilk_set_pipeconf(crtc_state); 1533 } 1534 1535 static void ilk_crtc_enable(struct intel_atomic_state *state, 1536 struct intel_crtc *crtc) 1537 { 1538 struct intel_display *display = to_intel_display(crtc); 1539 const struct intel_crtc_state *new_crtc_state = 1540 intel_atomic_get_new_crtc_state(state, crtc); 1541 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1542 enum pipe pipe = crtc->pipe; 1543 1544 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1545 return; 1546 1547 /* 1548 * Sometimes spurious CPU pipe underruns happen during FDI 1549 * training, at least with VGA+HDMI cloning. Suppress them. 1550 * 1551 * On ILK we get an occasional spurious CPU pipe underruns 1552 * between eDP port A enable and vdd enable. Also PCH port 1553 * enable seems to result in the occasional CPU pipe underrun. 1554 * 1555 * Spurious PCH underruns also occur during PCH enabling. 1556 */ 1557 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 1558 intel_set_pch_fifo_underrun_reporting(display, pipe, false); 1559 1560 ilk_configure_cpu_transcoder(new_crtc_state); 1561 1562 intel_set_pipe_src_size(new_crtc_state); 1563 1564 crtc->active = true; 1565 1566 intel_encoders_pre_enable(state, crtc); 1567 1568 if (new_crtc_state->has_pch_encoder) { 1569 ilk_pch_pre_enable(state, crtc); 1570 } else { 1571 assert_fdi_tx_disabled(dev_priv, pipe); 1572 assert_fdi_rx_disabled(dev_priv, pipe); 1573 } 1574 1575 ilk_pfit_enable(new_crtc_state); 1576 1577 /* 1578 * On ILK+ LUT must be loaded before the pipe is running but with 1579 * clocks enabled 1580 */ 1581 intel_color_modeset(new_crtc_state); 1582 1583 intel_initial_watermarks(state, crtc); 1584 intel_enable_transcoder(new_crtc_state); 1585 1586 if (new_crtc_state->has_pch_encoder) 1587 ilk_pch_enable(state, crtc); 1588 1589 intel_crtc_vblank_on(new_crtc_state); 1590 1591 intel_encoders_enable(state, crtc); 1592 1593 if (HAS_PCH_CPT(dev_priv)) 1594 intel_wait_for_pipe_scanline_moving(crtc); 1595 1596 /* 1597 * Must wait for vblank to avoid spurious PCH FIFO underruns. 1598 * And a second vblank wait is needed at least on ILK with 1599 * some interlaced HDMI modes. Let's do the double wait always 1600 * in case there are more corner cases we don't know about. 1601 */ 1602 if (new_crtc_state->has_pch_encoder) { 1603 intel_crtc_wait_for_next_vblank(crtc); 1604 intel_crtc_wait_for_next_vblank(crtc); 1605 } 1606 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 1607 intel_set_pch_fifo_underrun_reporting(display, pipe, true); 1608 } 1609 1610 /* Display WA #1180: WaDisableScalarClockGating: glk */ 1611 static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state) 1612 { 1613 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1614 1615 return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled; 1616 } 1617 1618 static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable) 1619 { 1620 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1621 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1622 1623 intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe), 1624 mask, enable ? mask : 0); 1625 } 1626 1627 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1628 { 1629 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1630 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1631 1632 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 1633 HSW_LINETIME(crtc_state->linetime) | 1634 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1635 } 1636 1637 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1638 { 1639 struct intel_display *display = to_intel_display(crtc_state); 1640 1641 intel_de_rmw(display, CHICKEN_TRANS(display, crtc_state->cpu_transcoder), 1642 HSW_FRAME_START_DELAY_MASK, 1643 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1644 } 1645 1646 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1647 { 1648 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1649 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1650 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1651 1652 if (crtc_state->has_pch_encoder) { 1653 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1654 &crtc_state->fdi_m_n); 1655 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1656 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1657 &crtc_state->dp_m_n); 1658 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1659 &crtc_state->dp_m2_n2); 1660 } 1661 1662 intel_set_transcoder_timings(crtc_state); 1663 if (HAS_VRR(dev_priv)) 1664 intel_vrr_set_transcoder_timings(crtc_state); 1665 1666 if (cpu_transcoder != TRANSCODER_EDP) 1667 intel_de_write(dev_priv, TRANS_MULT(dev_priv, cpu_transcoder), 1668 crtc_state->pixel_multiplier - 1); 1669 1670 hsw_set_frame_start_delay(crtc_state); 1671 1672 hsw_set_transconf(crtc_state); 1673 } 1674 1675 static void hsw_crtc_enable(struct intel_atomic_state *state, 1676 struct intel_crtc *crtc) 1677 { 1678 struct intel_display *display = to_intel_display(state); 1679 const struct intel_crtc_state *new_crtc_state = 1680 intel_atomic_get_new_crtc_state(state, crtc); 1681 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1682 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1683 struct intel_crtc *pipe_crtc; 1684 int i; 1685 1686 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1687 return; 1688 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) 1689 intel_dmc_enable_pipe(display, pipe_crtc->pipe); 1690 1691 intel_encoders_pre_pll_enable(state, crtc); 1692 1693 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1694 const struct intel_crtc_state *pipe_crtc_state = 1695 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1696 1697 if (pipe_crtc_state->shared_dpll) 1698 intel_enable_shared_dpll(pipe_crtc_state); 1699 } 1700 1701 intel_encoders_pre_enable(state, crtc); 1702 1703 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1704 const struct intel_crtc_state *pipe_crtc_state = 1705 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1706 1707 intel_dsc_enable(pipe_crtc_state); 1708 1709 if (HAS_UNCOMPRESSED_JOINER(dev_priv)) 1710 intel_uncompressed_joiner_enable(pipe_crtc_state); 1711 1712 intel_set_pipe_src_size(pipe_crtc_state); 1713 1714 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 1715 bdw_set_pipe_misc(NULL, pipe_crtc_state); 1716 } 1717 1718 if (!transcoder_is_dsi(cpu_transcoder)) 1719 hsw_configure_cpu_transcoder(new_crtc_state); 1720 1721 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1722 const struct intel_crtc_state *pipe_crtc_state = 1723 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1724 1725 pipe_crtc->active = true; 1726 1727 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) 1728 glk_pipe_scaler_clock_gating_wa(pipe_crtc, true); 1729 1730 if (DISPLAY_VER(dev_priv) >= 9) 1731 skl_pfit_enable(pipe_crtc_state); 1732 else 1733 ilk_pfit_enable(pipe_crtc_state); 1734 1735 /* 1736 * On ILK+ LUT must be loaded before the pipe is running but with 1737 * clocks enabled 1738 */ 1739 intel_color_modeset(pipe_crtc_state); 1740 1741 hsw_set_linetime_wm(pipe_crtc_state); 1742 1743 if (DISPLAY_VER(dev_priv) >= 11) 1744 icl_set_pipe_chicken(pipe_crtc_state); 1745 1746 intel_initial_watermarks(state, pipe_crtc); 1747 } 1748 1749 intel_encoders_enable(state, crtc); 1750 1751 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1752 const struct intel_crtc_state *pipe_crtc_state = 1753 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1754 enum pipe hsw_workaround_pipe; 1755 1756 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) { 1757 intel_crtc_wait_for_next_vblank(pipe_crtc); 1758 glk_pipe_scaler_clock_gating_wa(pipe_crtc, false); 1759 } 1760 1761 /* 1762 * If we change the relative order between pipe/planes 1763 * enabling, we need to change the workaround. 1764 */ 1765 hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe; 1766 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 1767 struct intel_crtc *wa_crtc = 1768 intel_crtc_for_pipe(display, hsw_workaround_pipe); 1769 1770 intel_crtc_wait_for_next_vblank(wa_crtc); 1771 intel_crtc_wait_for_next_vblank(wa_crtc); 1772 } 1773 } 1774 } 1775 1776 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 1777 { 1778 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1779 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1780 enum pipe pipe = crtc->pipe; 1781 1782 /* To avoid upsetting the power well on haswell only disable the pfit if 1783 * it's in use. The hw state code will make sure we get this right. */ 1784 if (!old_crtc_state->pch_pfit.enabled) 1785 return; 1786 1787 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0); 1788 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0); 1789 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0); 1790 } 1791 1792 static void ilk_crtc_disable(struct intel_atomic_state *state, 1793 struct intel_crtc *crtc) 1794 { 1795 struct intel_display *display = to_intel_display(crtc); 1796 const struct intel_crtc_state *old_crtc_state = 1797 intel_atomic_get_old_crtc_state(state, crtc); 1798 enum pipe pipe = crtc->pipe; 1799 1800 /* 1801 * Sometimes spurious CPU pipe underruns happen when the 1802 * pipe is already disabled, but FDI RX/TX is still enabled. 1803 * Happens at least with VGA+HDMI cloning. Suppress them. 1804 */ 1805 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 1806 intel_set_pch_fifo_underrun_reporting(display, pipe, false); 1807 1808 intel_encoders_disable(state, crtc); 1809 1810 intel_crtc_vblank_off(old_crtc_state); 1811 1812 intel_disable_transcoder(old_crtc_state); 1813 1814 ilk_pfit_disable(old_crtc_state); 1815 1816 if (old_crtc_state->has_pch_encoder) 1817 ilk_pch_disable(state, crtc); 1818 1819 intel_encoders_post_disable(state, crtc); 1820 1821 if (old_crtc_state->has_pch_encoder) 1822 ilk_pch_post_disable(state, crtc); 1823 1824 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 1825 intel_set_pch_fifo_underrun_reporting(display, pipe, true); 1826 1827 intel_disable_shared_dpll(old_crtc_state); 1828 } 1829 1830 static void hsw_crtc_disable(struct intel_atomic_state *state, 1831 struct intel_crtc *crtc) 1832 { 1833 struct intel_display *display = to_intel_display(state); 1834 const struct intel_crtc_state *old_crtc_state = 1835 intel_atomic_get_old_crtc_state(state, crtc); 1836 struct intel_crtc *pipe_crtc; 1837 int i; 1838 1839 /* 1840 * FIXME collapse everything to one hook. 1841 * Need care with mst->ddi interactions. 1842 */ 1843 intel_encoders_disable(state, crtc); 1844 intel_encoders_post_disable(state, crtc); 1845 1846 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1847 const struct intel_crtc_state *old_pipe_crtc_state = 1848 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1849 1850 intel_disable_shared_dpll(old_pipe_crtc_state); 1851 } 1852 1853 intel_encoders_post_pll_disable(state, crtc); 1854 1855 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) 1856 intel_dmc_disable_pipe(display, pipe_crtc->pipe); 1857 } 1858 1859 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 1860 { 1861 struct intel_display *display = to_intel_display(crtc_state); 1862 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1863 1864 if (!crtc_state->gmch_pfit.control) 1865 return; 1866 1867 /* 1868 * The panel fitter should only be adjusted whilst the pipe is disabled, 1869 * according to register description and PRM. 1870 */ 1871 drm_WARN_ON(display->drm, 1872 intel_de_read(display, PFIT_CONTROL(display)) & PFIT_ENABLE); 1873 assert_transcoder_disabled(display, crtc_state->cpu_transcoder); 1874 1875 intel_de_write(display, PFIT_PGM_RATIOS(display), 1876 crtc_state->gmch_pfit.pgm_ratios); 1877 intel_de_write(display, PFIT_CONTROL(display), 1878 crtc_state->gmch_pfit.control); 1879 1880 /* Border color in case we don't scale up to the full screen. Black by 1881 * default, change to something else for debugging. */ 1882 intel_de_write(display, BCLRPAT(display, crtc->pipe), 0); 1883 } 1884 1885 /* Prefer intel_encoder_is_combo() */ 1886 bool intel_phy_is_combo(struct intel_display *display, enum phy phy) 1887 { 1888 if (phy == PHY_NONE) 1889 return false; 1890 else if (display->platform.alderlake_s) 1891 return phy <= PHY_E; 1892 else if (display->platform.dg1 || display->platform.rocketlake) 1893 return phy <= PHY_D; 1894 else if (display->platform.jasperlake || display->platform.elkhartlake) 1895 return phy <= PHY_C; 1896 else if (display->platform.alderlake_p || IS_DISPLAY_VER(display, 11, 12)) 1897 return phy <= PHY_B; 1898 else 1899 /* 1900 * DG2 outputs labelled as "combo PHY" in the bspec use 1901 * SNPS PHYs with completely different programming, 1902 * hence we always return false here. 1903 */ 1904 return false; 1905 } 1906 1907 /* Prefer intel_encoder_is_tc() */ 1908 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 1909 { 1910 /* 1911 * Discrete GPU phy's are not attached to FIA's to support TC 1912 * subsystem Legacy or non-legacy, and only support native DP/HDMI 1913 */ 1914 if (IS_DGFX(dev_priv)) 1915 return false; 1916 1917 if (DISPLAY_VER(dev_priv) >= 13) 1918 return phy >= PHY_F && phy <= PHY_I; 1919 else if (IS_TIGERLAKE(dev_priv)) 1920 return phy >= PHY_D && phy <= PHY_I; 1921 else if (IS_ICELAKE(dev_priv)) 1922 return phy >= PHY_C && phy <= PHY_F; 1923 1924 return false; 1925 } 1926 1927 /* Prefer intel_encoder_is_snps() */ 1928 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) 1929 { 1930 /* 1931 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port 1932 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). 1933 */ 1934 return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E; 1935 } 1936 1937 /* Prefer intel_encoder_to_phy() */ 1938 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 1939 { 1940 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) 1941 return PHY_D + port - PORT_D_XELPD; 1942 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) 1943 return PHY_F + port - PORT_TC1; 1944 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) 1945 return PHY_B + port - PORT_TC1; 1946 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) 1947 return PHY_C + port - PORT_TC1; 1948 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 1949 port == PORT_D) 1950 return PHY_A; 1951 1952 return PHY_A + port - PORT_A; 1953 } 1954 1955 /* Prefer intel_encoder_to_tc() */ 1956 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 1957 { 1958 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 1959 return TC_PORT_NONE; 1960 1961 if (DISPLAY_VER(dev_priv) >= 12) 1962 return TC_PORT_1 + port - PORT_TC1; 1963 else 1964 return TC_PORT_1 + port - PORT_C; 1965 } 1966 1967 enum phy intel_encoder_to_phy(struct intel_encoder *encoder) 1968 { 1969 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1970 1971 return intel_port_to_phy(i915, encoder->port); 1972 } 1973 1974 bool intel_encoder_is_combo(struct intel_encoder *encoder) 1975 { 1976 struct intel_display *display = to_intel_display(encoder); 1977 1978 return intel_phy_is_combo(display, intel_encoder_to_phy(encoder)); 1979 } 1980 1981 bool intel_encoder_is_snps(struct intel_encoder *encoder) 1982 { 1983 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1984 1985 return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder)); 1986 } 1987 1988 bool intel_encoder_is_tc(struct intel_encoder *encoder) 1989 { 1990 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1991 1992 return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder)); 1993 } 1994 1995 enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder) 1996 { 1997 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1998 1999 return intel_port_to_tc(i915, encoder->port); 2000 } 2001 2002 enum intel_display_power_domain 2003 intel_aux_power_domain(struct intel_digital_port *dig_port) 2004 { 2005 struct intel_display *display = to_intel_display(dig_port); 2006 2007 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 2008 return intel_display_power_tbt_aux_domain(display, dig_port->aux_ch); 2009 2010 return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch); 2011 } 2012 2013 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, 2014 struct intel_power_domain_mask *mask) 2015 { 2016 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2017 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2018 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2019 struct drm_encoder *encoder; 2020 enum pipe pipe = crtc->pipe; 2021 2022 bitmap_zero(mask->bits, POWER_DOMAIN_NUM); 2023 2024 if (!crtc_state->hw.active) 2025 return; 2026 2027 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); 2028 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); 2029 if (crtc_state->pch_pfit.enabled || 2030 crtc_state->pch_pfit.force_thru) 2031 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); 2032 2033 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 2034 crtc_state->uapi.encoder_mask) { 2035 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2036 2037 set_bit(intel_encoder->power_domain, mask->bits); 2038 } 2039 2040 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 2041 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); 2042 2043 if (crtc_state->shared_dpll) 2044 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); 2045 2046 if (crtc_state->dsc.compression_enable) 2047 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); 2048 } 2049 2050 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, 2051 struct intel_power_domain_mask *old_domains) 2052 { 2053 struct intel_display *display = to_intel_display(crtc_state); 2054 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2055 enum intel_display_power_domain domain; 2056 struct intel_power_domain_mask domains, new_domains; 2057 2058 get_crtc_power_domains(crtc_state, &domains); 2059 2060 bitmap_andnot(new_domains.bits, 2061 domains.bits, 2062 crtc->enabled_power_domains.mask.bits, 2063 POWER_DOMAIN_NUM); 2064 bitmap_andnot(old_domains->bits, 2065 crtc->enabled_power_domains.mask.bits, 2066 domains.bits, 2067 POWER_DOMAIN_NUM); 2068 2069 for_each_power_domain(domain, &new_domains) 2070 intel_display_power_get_in_set(display, 2071 &crtc->enabled_power_domains, 2072 domain); 2073 } 2074 2075 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, 2076 struct intel_power_domain_mask *domains) 2077 { 2078 struct intel_display *display = to_intel_display(crtc); 2079 2080 intel_display_power_put_mask_in_set(display, 2081 &crtc->enabled_power_domains, 2082 domains); 2083 } 2084 2085 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 2086 { 2087 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2088 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2089 2090 if (intel_crtc_has_dp_encoder(crtc_state)) { 2091 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 2092 &crtc_state->dp_m_n); 2093 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 2094 &crtc_state->dp_m2_n2); 2095 } 2096 2097 intel_set_transcoder_timings(crtc_state); 2098 2099 i9xx_set_pipeconf(crtc_state); 2100 } 2101 2102 static void valleyview_crtc_enable(struct intel_atomic_state *state, 2103 struct intel_crtc *crtc) 2104 { 2105 struct intel_display *display = to_intel_display(crtc); 2106 const struct intel_crtc_state *new_crtc_state = 2107 intel_atomic_get_new_crtc_state(state, crtc); 2108 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2109 enum pipe pipe = crtc->pipe; 2110 2111 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2112 return; 2113 2114 i9xx_configure_cpu_transcoder(new_crtc_state); 2115 2116 intel_set_pipe_src_size(new_crtc_state); 2117 2118 intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); 2119 2120 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 2121 intel_de_write(dev_priv, CHV_BLEND(dev_priv, pipe), 2122 CHV_BLEND_LEGACY); 2123 intel_de_write(dev_priv, CHV_CANVAS(dev_priv, pipe), 0); 2124 } 2125 2126 crtc->active = true; 2127 2128 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 2129 2130 intel_encoders_pre_pll_enable(state, crtc); 2131 2132 if (IS_CHERRYVIEW(dev_priv)) 2133 chv_enable_pll(new_crtc_state); 2134 else 2135 vlv_enable_pll(new_crtc_state); 2136 2137 intel_encoders_pre_enable(state, crtc); 2138 2139 i9xx_pfit_enable(new_crtc_state); 2140 2141 intel_color_modeset(new_crtc_state); 2142 2143 intel_initial_watermarks(state, crtc); 2144 intel_enable_transcoder(new_crtc_state); 2145 2146 intel_crtc_vblank_on(new_crtc_state); 2147 2148 intel_encoders_enable(state, crtc); 2149 } 2150 2151 static void i9xx_crtc_enable(struct intel_atomic_state *state, 2152 struct intel_crtc *crtc) 2153 { 2154 struct intel_display *display = to_intel_display(crtc); 2155 const struct intel_crtc_state *new_crtc_state = 2156 intel_atomic_get_new_crtc_state(state, crtc); 2157 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2158 enum pipe pipe = crtc->pipe; 2159 2160 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2161 return; 2162 2163 i9xx_configure_cpu_transcoder(new_crtc_state); 2164 2165 intel_set_pipe_src_size(new_crtc_state); 2166 2167 crtc->active = true; 2168 2169 if (DISPLAY_VER(dev_priv) != 2) 2170 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 2171 2172 intel_encoders_pre_enable(state, crtc); 2173 2174 i9xx_enable_pll(new_crtc_state); 2175 2176 i9xx_pfit_enable(new_crtc_state); 2177 2178 intel_color_modeset(new_crtc_state); 2179 2180 if (!intel_initial_watermarks(state, crtc)) 2181 intel_update_watermarks(dev_priv); 2182 intel_enable_transcoder(new_crtc_state); 2183 2184 intel_crtc_vblank_on(new_crtc_state); 2185 2186 intel_encoders_enable(state, crtc); 2187 2188 /* prevents spurious underruns */ 2189 if (DISPLAY_VER(dev_priv) == 2) 2190 intel_crtc_wait_for_next_vblank(crtc); 2191 } 2192 2193 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 2194 { 2195 struct intel_display *display = to_intel_display(old_crtc_state); 2196 2197 if (!old_crtc_state->gmch_pfit.control) 2198 return; 2199 2200 assert_transcoder_disabled(display, old_crtc_state->cpu_transcoder); 2201 2202 drm_dbg_kms(display->drm, "disabling pfit, current: 0x%08x\n", 2203 intel_de_read(display, PFIT_CONTROL(display))); 2204 intel_de_write(display, PFIT_CONTROL(display), 0); 2205 } 2206 2207 static void i9xx_crtc_disable(struct intel_atomic_state *state, 2208 struct intel_crtc *crtc) 2209 { 2210 struct intel_display *display = to_intel_display(state); 2211 struct drm_i915_private *dev_priv = to_i915(display->drm); 2212 struct intel_crtc_state *old_crtc_state = 2213 intel_atomic_get_old_crtc_state(state, crtc); 2214 enum pipe pipe = crtc->pipe; 2215 2216 /* 2217 * On gen2 planes are double buffered but the pipe isn't, so we must 2218 * wait for planes to fully turn off before disabling the pipe. 2219 */ 2220 if (DISPLAY_VER(dev_priv) == 2) 2221 intel_crtc_wait_for_next_vblank(crtc); 2222 2223 intel_encoders_disable(state, crtc); 2224 2225 intel_crtc_vblank_off(old_crtc_state); 2226 2227 intel_disable_transcoder(old_crtc_state); 2228 2229 i9xx_pfit_disable(old_crtc_state); 2230 2231 intel_encoders_post_disable(state, crtc); 2232 2233 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2234 if (IS_CHERRYVIEW(dev_priv)) 2235 chv_disable_pll(dev_priv, pipe); 2236 else if (IS_VALLEYVIEW(dev_priv)) 2237 vlv_disable_pll(dev_priv, pipe); 2238 else 2239 i9xx_disable_pll(old_crtc_state); 2240 } 2241 2242 intel_encoders_post_pll_disable(state, crtc); 2243 2244 if (DISPLAY_VER(dev_priv) != 2) 2245 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 2246 2247 if (!dev_priv->display.funcs.wm->initial_watermarks) 2248 intel_update_watermarks(dev_priv); 2249 2250 /* clock the pipe down to 640x480@60 to potentially save power */ 2251 if (IS_I830(dev_priv)) 2252 i830_enable_pipe(display, pipe); 2253 } 2254 2255 void intel_encoder_destroy(struct drm_encoder *encoder) 2256 { 2257 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2258 2259 drm_encoder_cleanup(encoder); 2260 kfree(intel_encoder); 2261 } 2262 2263 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2264 { 2265 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2266 2267 /* GDG double wide on either pipe, otherwise pipe A only */ 2268 return HAS_DOUBLE_WIDE(dev_priv) && 2269 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 2270 } 2271 2272 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2273 { 2274 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 2275 struct drm_rect src; 2276 2277 /* 2278 * We only use IF-ID interlacing. If we ever use 2279 * PF-ID we'll need to adjust the pixel_rate here. 2280 */ 2281 2282 if (!crtc_state->pch_pfit.enabled) 2283 return pixel_rate; 2284 2285 drm_rect_init(&src, 0, 0, 2286 drm_rect_width(&crtc_state->pipe_src) << 16, 2287 drm_rect_height(&crtc_state->pipe_src) << 16); 2288 2289 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 2290 pixel_rate); 2291 } 2292 2293 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 2294 const struct drm_display_mode *timings) 2295 { 2296 mode->hdisplay = timings->crtc_hdisplay; 2297 mode->htotal = timings->crtc_htotal; 2298 mode->hsync_start = timings->crtc_hsync_start; 2299 mode->hsync_end = timings->crtc_hsync_end; 2300 2301 mode->vdisplay = timings->crtc_vdisplay; 2302 mode->vtotal = timings->crtc_vtotal; 2303 mode->vsync_start = timings->crtc_vsync_start; 2304 mode->vsync_end = timings->crtc_vsync_end; 2305 2306 mode->flags = timings->flags; 2307 mode->type = DRM_MODE_TYPE_DRIVER; 2308 2309 mode->clock = timings->crtc_clock; 2310 2311 drm_mode_set_name(mode); 2312 } 2313 2314 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2315 { 2316 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2317 2318 if (HAS_GMCH(dev_priv)) 2319 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2320 crtc_state->pixel_rate = 2321 crtc_state->hw.pipe_mode.crtc_clock; 2322 else 2323 crtc_state->pixel_rate = 2324 ilk_pipe_pixel_rate(crtc_state); 2325 } 2326 2327 static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state, 2328 struct drm_display_mode *mode) 2329 { 2330 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2331 2332 if (num_pipes == 1) 2333 return; 2334 2335 mode->crtc_clock /= num_pipes; 2336 mode->crtc_hdisplay /= num_pipes; 2337 mode->crtc_hblank_start /= num_pipes; 2338 mode->crtc_hblank_end /= num_pipes; 2339 mode->crtc_hsync_start /= num_pipes; 2340 mode->crtc_hsync_end /= num_pipes; 2341 mode->crtc_htotal /= num_pipes; 2342 } 2343 2344 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, 2345 struct drm_display_mode *mode) 2346 { 2347 int overlap = crtc_state->splitter.pixel_overlap; 2348 int n = crtc_state->splitter.link_count; 2349 2350 if (!crtc_state->splitter.enable) 2351 return; 2352 2353 /* 2354 * eDP MSO uses segment timings from EDID for transcoder 2355 * timings, but full mode for everything else. 2356 * 2357 * h_full = (h_segment - pixel_overlap) * link_count 2358 */ 2359 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; 2360 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; 2361 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; 2362 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; 2363 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; 2364 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; 2365 mode->crtc_clock *= n; 2366 } 2367 2368 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 2369 { 2370 struct drm_display_mode *mode = &crtc_state->hw.mode; 2371 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2372 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2373 2374 /* 2375 * Start with the adjusted_mode crtc timings, which 2376 * have been filled with the transcoder timings. 2377 */ 2378 drm_mode_copy(pipe_mode, adjusted_mode); 2379 2380 /* Expand MSO per-segment transcoder timings to full */ 2381 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2382 2383 /* 2384 * We want the full numbers in adjusted_mode normal timings, 2385 * adjusted_mode crtc timings are left with the raw transcoder 2386 * timings. 2387 */ 2388 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 2389 2390 /* Populate the "user" mode with full numbers */ 2391 drm_mode_copy(mode, pipe_mode); 2392 intel_mode_from_crtc_timings(mode, mode); 2393 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * 2394 intel_crtc_num_joined_pipes(crtc_state); 2395 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); 2396 2397 /* Derive per-pipe timings in case joiner is used */ 2398 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2399 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2400 2401 intel_crtc_compute_pixel_rate(crtc_state); 2402 } 2403 2404 void intel_encoder_get_config(struct intel_encoder *encoder, 2405 struct intel_crtc_state *crtc_state) 2406 { 2407 encoder->get_config(encoder, crtc_state); 2408 2409 intel_crtc_readout_derived_state(crtc_state); 2410 } 2411 2412 static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state) 2413 { 2414 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2415 int width, height; 2416 2417 if (num_pipes == 1) 2418 return; 2419 2420 width = drm_rect_width(&crtc_state->pipe_src); 2421 height = drm_rect_height(&crtc_state->pipe_src); 2422 2423 drm_rect_init(&crtc_state->pipe_src, 0, 0, 2424 width / num_pipes, height); 2425 } 2426 2427 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) 2428 { 2429 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2430 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2431 2432 intel_joiner_compute_pipe_src(crtc_state); 2433 2434 /* 2435 * Pipe horizontal size must be even in: 2436 * - DVO ganged mode 2437 * - LVDS dual channel mode 2438 * - Double wide pipe 2439 */ 2440 if (drm_rect_width(&crtc_state->pipe_src) & 1) { 2441 if (crtc_state->double_wide) { 2442 drm_dbg_kms(&i915->drm, 2443 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", 2444 crtc->base.base.id, crtc->base.name); 2445 return -EINVAL; 2446 } 2447 2448 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 2449 intel_is_dual_link_lvds(i915)) { 2450 drm_dbg_kms(&i915->drm, 2451 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", 2452 crtc->base.base.id, crtc->base.name); 2453 return -EINVAL; 2454 } 2455 } 2456 2457 return 0; 2458 } 2459 2460 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) 2461 { 2462 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2463 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2464 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2465 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2466 int clock_limit = i915->display.cdclk.max_dotclk_freq; 2467 2468 /* 2469 * Start with the adjusted_mode crtc timings, which 2470 * have been filled with the transcoder timings. 2471 */ 2472 drm_mode_copy(pipe_mode, adjusted_mode); 2473 2474 /* Expand MSO per-segment transcoder timings to full */ 2475 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2476 2477 /* Derive per-pipe timings in case joiner is used */ 2478 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2479 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2480 2481 if (DISPLAY_VER(i915) < 4) { 2482 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; 2483 2484 /* 2485 * Enable double wide mode when the dot clock 2486 * is > 90% of the (display) core speed. 2487 */ 2488 if (intel_crtc_supports_double_wide(crtc) && 2489 pipe_mode->crtc_clock > clock_limit) { 2490 clock_limit = i915->display.cdclk.max_dotclk_freq; 2491 crtc_state->double_wide = true; 2492 } 2493 } 2494 2495 if (pipe_mode->crtc_clock > clock_limit) { 2496 drm_dbg_kms(&i915->drm, 2497 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 2498 crtc->base.base.id, crtc->base.name, 2499 pipe_mode->crtc_clock, clock_limit, 2500 str_yes_no(crtc_state->double_wide)); 2501 return -EINVAL; 2502 } 2503 2504 return 0; 2505 } 2506 2507 static bool intel_crtc_needs_wa_14015401596(const struct intel_crtc_state *crtc_state) 2508 { 2509 struct intel_display *display = to_intel_display(crtc_state); 2510 2511 return intel_vrr_possible(crtc_state) && crtc_state->has_psr && 2512 IS_DISPLAY_VER(display, 13, 14); 2513 } 2514 2515 static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state) 2516 { 2517 struct intel_display *display = to_intel_display(crtc_state); 2518 int vblank_delay = 0; 2519 2520 if (!HAS_DSB(display)) 2521 return 0; 2522 2523 /* Wa_14015401596 */ 2524 if (intel_crtc_needs_wa_14015401596(crtc_state)) 2525 vblank_delay = max(vblank_delay, 1); 2526 2527 return vblank_delay; 2528 } 2529 2530 static int intel_crtc_compute_vblank_delay(struct intel_atomic_state *state, 2531 struct intel_crtc *crtc) 2532 { 2533 struct intel_display *display = to_intel_display(state); 2534 struct intel_crtc_state *crtc_state = 2535 intel_atomic_get_new_crtc_state(state, crtc); 2536 struct drm_display_mode *adjusted_mode = 2537 &crtc_state->hw.adjusted_mode; 2538 int vblank_delay, max_vblank_delay; 2539 2540 vblank_delay = intel_crtc_vblank_delay(crtc_state); 2541 max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1; 2542 2543 if (vblank_delay > max_vblank_delay) { 2544 drm_dbg_kms(display->drm, "[CRTC:%d:%s] vblank delay (%d) exceeds max (%d)\n", 2545 crtc->base.base.id, crtc->base.name, vblank_delay, max_vblank_delay); 2546 return -EINVAL; 2547 } 2548 2549 adjusted_mode->crtc_vblank_start += vblank_delay; 2550 2551 return 0; 2552 } 2553 2554 static int intel_crtc_compute_config(struct intel_atomic_state *state, 2555 struct intel_crtc *crtc) 2556 { 2557 struct intel_crtc_state *crtc_state = 2558 intel_atomic_get_new_crtc_state(state, crtc); 2559 int ret; 2560 2561 ret = intel_crtc_compute_vblank_delay(state, crtc); 2562 if (ret) 2563 return ret; 2564 2565 ret = intel_dpll_crtc_compute_clock(state, crtc); 2566 if (ret) 2567 return ret; 2568 2569 ret = intel_crtc_compute_pipe_src(crtc_state); 2570 if (ret) 2571 return ret; 2572 2573 ret = intel_crtc_compute_pipe_mode(crtc_state); 2574 if (ret) 2575 return ret; 2576 2577 intel_crtc_compute_pixel_rate(crtc_state); 2578 2579 if (crtc_state->has_pch_encoder) 2580 return ilk_fdi_compute_config(crtc, crtc_state); 2581 2582 return 0; 2583 } 2584 2585 static void 2586 intel_reduce_m_n_ratio(u32 *num, u32 *den) 2587 { 2588 while (*num > DATA_LINK_M_N_MASK || 2589 *den > DATA_LINK_M_N_MASK) { 2590 *num >>= 1; 2591 *den >>= 1; 2592 } 2593 } 2594 2595 static void compute_m_n(u32 *ret_m, u32 *ret_n, 2596 u32 m, u32 n, u32 constant_n) 2597 { 2598 if (constant_n) 2599 *ret_n = constant_n; 2600 else 2601 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2602 2603 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2604 intel_reduce_m_n_ratio(ret_m, ret_n); 2605 } 2606 2607 void 2608 intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, 2609 int pixel_clock, int link_clock, 2610 int bw_overhead, 2611 struct intel_link_m_n *m_n) 2612 { 2613 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); 2614 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, 2615 bw_overhead); 2616 u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); 2617 2618 /* 2619 * Windows/BIOS uses fixed M/N values always. Follow suit. 2620 * 2621 * Also several DP dongles in particular seem to be fussy 2622 * about too large link M/N values. Presumably the 20bit 2623 * value used by Windows/BIOS is acceptable to everyone. 2624 */ 2625 m_n->tu = 64; 2626 compute_m_n(&m_n->data_m, &m_n->data_n, 2627 data_m, data_n, 2628 0x8000000); 2629 2630 compute_m_n(&m_n->link_m, &m_n->link_n, 2631 pixel_clock, link_symbol_clock, 2632 0x80000); 2633 } 2634 2635 void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 2636 { 2637 /* 2638 * There may be no VBT; and if the BIOS enabled SSC we can 2639 * just keep using it to avoid unnecessary flicker. Whereas if the 2640 * BIOS isn't using it, don't assume it will work even if the VBT 2641 * indicates as much. 2642 */ 2643 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 2644 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 2645 PCH_DREF_CONTROL) & 2646 DREF_SSC1_ENABLE; 2647 2648 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2649 drm_dbg_kms(&dev_priv->drm, 2650 "SSC %s by BIOS, overriding VBT which says %s\n", 2651 str_enabled_disabled(bios_lvds_use_ssc), 2652 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); 2653 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; 2654 } 2655 } 2656 } 2657 2658 void intel_zero_m_n(struct intel_link_m_n *m_n) 2659 { 2660 /* corresponds to 0 register value */ 2661 memset(m_n, 0, sizeof(*m_n)); 2662 m_n->tu = 1; 2663 } 2664 2665 void intel_set_m_n(struct drm_i915_private *i915, 2666 const struct intel_link_m_n *m_n, 2667 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 2668 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 2669 { 2670 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 2671 intel_de_write(i915, data_n_reg, m_n->data_n); 2672 intel_de_write(i915, link_m_reg, m_n->link_m); 2673 /* 2674 * On BDW+ writing LINK_N arms the double buffered update 2675 * of all the M/N registers, so it must be written last. 2676 */ 2677 intel_de_write(i915, link_n_reg, m_n->link_n); 2678 } 2679 2680 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 2681 enum transcoder transcoder) 2682 { 2683 if (IS_HASWELL(dev_priv)) 2684 return transcoder == TRANSCODER_EDP; 2685 2686 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); 2687 } 2688 2689 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 2690 enum transcoder transcoder, 2691 const struct intel_link_m_n *m_n) 2692 { 2693 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2694 enum pipe pipe = crtc->pipe; 2695 2696 if (DISPLAY_VER(dev_priv) >= 5) 2697 intel_set_m_n(dev_priv, m_n, 2698 PIPE_DATA_M1(dev_priv, transcoder), 2699 PIPE_DATA_N1(dev_priv, transcoder), 2700 PIPE_LINK_M1(dev_priv, transcoder), 2701 PIPE_LINK_N1(dev_priv, transcoder)); 2702 else 2703 intel_set_m_n(dev_priv, m_n, 2704 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 2705 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 2706 } 2707 2708 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 2709 enum transcoder transcoder, 2710 const struct intel_link_m_n *m_n) 2711 { 2712 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2713 2714 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 2715 return; 2716 2717 intel_set_m_n(dev_priv, m_n, 2718 PIPE_DATA_M2(dev_priv, transcoder), 2719 PIPE_DATA_N2(dev_priv, transcoder), 2720 PIPE_LINK_M2(dev_priv, transcoder), 2721 PIPE_LINK_N2(dev_priv, transcoder)); 2722 } 2723 2724 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 2725 { 2726 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2727 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2728 enum pipe pipe = crtc->pipe; 2729 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2730 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2731 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2732 int vsyncshift = 0; 2733 2734 drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)); 2735 2736 /* We need to be careful not to changed the adjusted mode, for otherwise 2737 * the hw state checker will get angry at the mismatch. */ 2738 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2739 crtc_vtotal = adjusted_mode->crtc_vtotal; 2740 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2741 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2742 2743 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2744 /* the chip adds 2 halflines automatically */ 2745 crtc_vtotal -= 1; 2746 crtc_vblank_end -= 1; 2747 2748 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2749 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 2750 else 2751 vsyncshift = adjusted_mode->crtc_hsync_start - 2752 adjusted_mode->crtc_htotal / 2; 2753 if (vsyncshift < 0) 2754 vsyncshift += adjusted_mode->crtc_htotal; 2755 } 2756 2757 /* 2758 * VBLANK_START no longer works on ADL+, instead we must use 2759 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. 2760 */ 2761 if (DISPLAY_VER(dev_priv) >= 13) { 2762 intel_de_write(dev_priv, 2763 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder), 2764 crtc_vblank_start - crtc_vdisplay); 2765 2766 /* 2767 * VBLANK_START not used by hw, just clear it 2768 * to make it stand out in register dumps. 2769 */ 2770 crtc_vblank_start = 1; 2771 } 2772 2773 if (DISPLAY_VER(dev_priv) >= 4) 2774 intel_de_write(dev_priv, 2775 TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder), 2776 vsyncshift); 2777 2778 intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder), 2779 HACTIVE(adjusted_mode->crtc_hdisplay - 1) | 2780 HTOTAL(adjusted_mode->crtc_htotal - 1)); 2781 intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder), 2782 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | 2783 HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); 2784 intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder), 2785 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | 2786 HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); 2787 2788 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder), 2789 VACTIVE(crtc_vdisplay - 1) | 2790 VTOTAL(crtc_vtotal - 1)); 2791 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder), 2792 VBLANK_START(crtc_vblank_start - 1) | 2793 VBLANK_END(crtc_vblank_end - 1)); 2794 intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder), 2795 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | 2796 VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); 2797 2798 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2799 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 2800 * documented on the DDI_FUNC_CTL register description, EDP Input Select 2801 * bits. */ 2802 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 2803 (pipe == PIPE_B || pipe == PIPE_C)) 2804 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, pipe), 2805 VACTIVE(crtc_vdisplay - 1) | 2806 VTOTAL(crtc_vtotal - 1)); 2807 } 2808 2809 static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state) 2810 { 2811 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2812 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2813 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2814 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2815 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2816 2817 drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)); 2818 2819 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2820 crtc_vtotal = adjusted_mode->crtc_vtotal; 2821 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2822 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2823 2824 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2825 /* the chip adds 2 halflines automatically */ 2826 crtc_vtotal -= 1; 2827 crtc_vblank_end -= 1; 2828 } 2829 2830 if (DISPLAY_VER(dev_priv) >= 13) { 2831 intel_de_write(dev_priv, 2832 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder), 2833 crtc_vblank_start - crtc_vdisplay); 2834 2835 /* 2836 * VBLANK_START not used by hw, just clear it 2837 * to make it stand out in register dumps. 2838 */ 2839 crtc_vblank_start = 1; 2840 } 2841 2842 /* 2843 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode. 2844 * But let's write it anyway to keep the state checker happy. 2845 */ 2846 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder), 2847 VBLANK_START(crtc_vblank_start - 1) | 2848 VBLANK_END(crtc_vblank_end - 1)); 2849 /* 2850 * The double buffer latch point for TRANS_VTOTAL 2851 * is the transcoder's undelayed vblank. 2852 */ 2853 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder), 2854 VACTIVE(crtc_vdisplay - 1) | 2855 VTOTAL(crtc_vtotal - 1)); 2856 } 2857 2858 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 2859 { 2860 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2861 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2862 int width = drm_rect_width(&crtc_state->pipe_src); 2863 int height = drm_rect_height(&crtc_state->pipe_src); 2864 enum pipe pipe = crtc->pipe; 2865 2866 /* pipesrc controls the size that is scaled from, which should 2867 * always be the user's requested size. 2868 */ 2869 intel_de_write(dev_priv, PIPESRC(dev_priv, pipe), 2870 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2871 } 2872 2873 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 2874 { 2875 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2876 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2877 2878 if (DISPLAY_VER(dev_priv) == 2) 2879 return false; 2880 2881 if (DISPLAY_VER(dev_priv) >= 9 || 2882 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2883 return intel_de_read(dev_priv, 2884 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; 2885 else 2886 return intel_de_read(dev_priv, 2887 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; 2888 } 2889 2890 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 2891 struct intel_crtc_state *pipe_config) 2892 { 2893 struct drm_device *dev = crtc->base.dev; 2894 struct drm_i915_private *dev_priv = to_i915(dev); 2895 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2896 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2897 u32 tmp; 2898 2899 tmp = intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder)); 2900 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; 2901 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; 2902 2903 if (!transcoder_is_dsi(cpu_transcoder)) { 2904 tmp = intel_de_read(dev_priv, 2905 TRANS_HBLANK(dev_priv, cpu_transcoder)); 2906 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; 2907 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; 2908 } 2909 2910 tmp = intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder)); 2911 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; 2912 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; 2913 2914 tmp = intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder)); 2915 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; 2916 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; 2917 2918 /* FIXME TGL+ DSI transcoders have this! */ 2919 if (!transcoder_is_dsi(cpu_transcoder)) { 2920 tmp = intel_de_read(dev_priv, 2921 TRANS_VBLANK(dev_priv, cpu_transcoder)); 2922 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; 2923 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; 2924 } 2925 tmp = intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder)); 2926 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; 2927 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; 2928 2929 if (intel_pipe_is_interlaced(pipe_config)) { 2930 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; 2931 adjusted_mode->crtc_vtotal += 1; 2932 adjusted_mode->crtc_vblank_end += 1; 2933 } 2934 2935 if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) 2936 adjusted_mode->crtc_vblank_start = 2937 adjusted_mode->crtc_vdisplay + 2938 intel_de_read(dev_priv, 2939 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder)); 2940 } 2941 2942 static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) 2943 { 2944 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2945 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2946 enum pipe primary_pipe, pipe = crtc->pipe; 2947 int width; 2948 2949 if (num_pipes == 1) 2950 return; 2951 2952 primary_pipe = joiner_primary_pipe(crtc_state); 2953 width = drm_rect_width(&crtc_state->pipe_src); 2954 2955 drm_rect_translate_to(&crtc_state->pipe_src, 2956 (pipe - primary_pipe) * width, 0); 2957 } 2958 2959 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 2960 struct intel_crtc_state *pipe_config) 2961 { 2962 struct drm_device *dev = crtc->base.dev; 2963 struct drm_i915_private *dev_priv = to_i915(dev); 2964 u32 tmp; 2965 2966 tmp = intel_de_read(dev_priv, PIPESRC(dev_priv, crtc->pipe)); 2967 2968 drm_rect_init(&pipe_config->pipe_src, 0, 0, 2969 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, 2970 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); 2971 2972 intel_joiner_adjust_pipe_src(pipe_config); 2973 } 2974 2975 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 2976 { 2977 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2978 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2979 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2980 u32 val = 0; 2981 2982 /* 2983 * - We keep both pipes enabled on 830 2984 * - During modeset the pipe is still disabled and must remain so 2985 * - During fastset the pipe is already enabled and must remain so 2986 */ 2987 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) 2988 val |= TRANSCONF_ENABLE; 2989 2990 if (crtc_state->double_wide) 2991 val |= TRANSCONF_DOUBLE_WIDE; 2992 2993 /* only g4x and later have fancy bpc/dither controls */ 2994 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2995 IS_CHERRYVIEW(dev_priv)) { 2996 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 2997 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 2998 val |= TRANSCONF_DITHER_EN | 2999 TRANSCONF_DITHER_TYPE_SP; 3000 3001 switch (crtc_state->pipe_bpp) { 3002 default: 3003 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3004 MISSING_CASE(crtc_state->pipe_bpp); 3005 fallthrough; 3006 case 18: 3007 val |= TRANSCONF_BPC_6; 3008 break; 3009 case 24: 3010 val |= TRANSCONF_BPC_8; 3011 break; 3012 case 30: 3013 val |= TRANSCONF_BPC_10; 3014 break; 3015 } 3016 } 3017 3018 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 3019 if (DISPLAY_VER(dev_priv) < 4 || 3020 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3021 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; 3022 else 3023 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; 3024 } else { 3025 val |= TRANSCONF_INTERLACE_PROGRESSIVE; 3026 } 3027 3028 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3029 crtc_state->limited_color_range) 3030 val |= TRANSCONF_COLOR_RANGE_SELECT; 3031 3032 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3033 3034 if (crtc_state->wgc_enable) 3035 val |= TRANSCONF_WGC_ENABLE; 3036 3037 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3038 3039 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3040 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3041 } 3042 3043 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 3044 { 3045 if (IS_I830(dev_priv)) 3046 return false; 3047 3048 return DISPLAY_VER(dev_priv) >= 4 || 3049 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 3050 } 3051 3052 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 3053 { 3054 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3055 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3056 enum pipe pipe; 3057 u32 tmp; 3058 3059 if (!i9xx_has_pfit(dev_priv)) 3060 return; 3061 3062 tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)); 3063 if (!(tmp & PFIT_ENABLE)) 3064 return; 3065 3066 /* Check whether the pfit is attached to our pipe. */ 3067 if (DISPLAY_VER(dev_priv) >= 4) 3068 pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp); 3069 else 3070 pipe = PIPE_B; 3071 3072 if (pipe != crtc->pipe) 3073 return; 3074 3075 crtc_state->gmch_pfit.control = tmp; 3076 crtc_state->gmch_pfit.pgm_ratios = 3077 intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv)); 3078 } 3079 3080 static enum intel_output_format 3081 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) 3082 { 3083 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3084 u32 tmp; 3085 3086 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3087 3088 if (tmp & PIPE_MISC_YUV420_ENABLE) { 3089 /* 3090 * We support 4:2:0 in full blend mode only. 3091 * For xe3_lpd+ this is implied in YUV420 Enable bit. 3092 * Ensure the same for prior platforms in YUV420 Mode bit. 3093 */ 3094 if (DISPLAY_VER(dev_priv) < 30) 3095 drm_WARN_ON(&dev_priv->drm, 3096 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); 3097 3098 return INTEL_OUTPUT_FORMAT_YCBCR420; 3099 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { 3100 return INTEL_OUTPUT_FORMAT_YCBCR444; 3101 } else { 3102 return INTEL_OUTPUT_FORMAT_RGB; 3103 } 3104 } 3105 3106 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 3107 struct intel_crtc_state *pipe_config) 3108 { 3109 struct intel_display *display = to_intel_display(crtc); 3110 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3111 enum intel_display_power_domain power_domain; 3112 intel_wakeref_t wakeref; 3113 u32 tmp; 3114 bool ret; 3115 3116 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3117 wakeref = intel_display_power_get_if_enabled(display, power_domain); 3118 if (!wakeref) 3119 return false; 3120 3121 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3122 pipe_config->sink_format = pipe_config->output_format; 3123 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3124 pipe_config->shared_dpll = NULL; 3125 3126 ret = false; 3127 3128 tmp = intel_de_read(dev_priv, 3129 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 3130 if (!(tmp & TRANSCONF_ENABLE)) 3131 goto out; 3132 3133 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3134 IS_CHERRYVIEW(dev_priv)) { 3135 switch (tmp & TRANSCONF_BPC_MASK) { 3136 case TRANSCONF_BPC_6: 3137 pipe_config->pipe_bpp = 18; 3138 break; 3139 case TRANSCONF_BPC_8: 3140 pipe_config->pipe_bpp = 24; 3141 break; 3142 case TRANSCONF_BPC_10: 3143 pipe_config->pipe_bpp = 30; 3144 break; 3145 default: 3146 MISSING_CASE(tmp); 3147 break; 3148 } 3149 } 3150 3151 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3152 (tmp & TRANSCONF_COLOR_RANGE_SELECT)) 3153 pipe_config->limited_color_range = true; 3154 3155 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); 3156 3157 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3158 3159 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3160 (tmp & TRANSCONF_WGC_ENABLE)) 3161 pipe_config->wgc_enable = true; 3162 3163 intel_color_get_config(pipe_config); 3164 3165 if (HAS_DOUBLE_WIDE(dev_priv)) 3166 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; 3167 3168 intel_get_transcoder_timings(crtc, pipe_config); 3169 intel_get_pipe_src_size(crtc, pipe_config); 3170 3171 i9xx_get_pfit_config(pipe_config); 3172 3173 i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state); 3174 3175 if (DISPLAY_VER(dev_priv) >= 4) { 3176 tmp = pipe_config->dpll_hw_state.i9xx.dpll_md; 3177 pipe_config->pixel_multiplier = 3178 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 3179 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 3180 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 3181 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 3182 tmp = pipe_config->dpll_hw_state.i9xx.dpll; 3183 pipe_config->pixel_multiplier = 3184 ((tmp & SDVO_MULTIPLIER_MASK) 3185 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 3186 } else { 3187 /* Note that on i915G/GM the pixel multiplier is in the sdvo 3188 * port and will be fixed up in the encoder->get_config 3189 * function. */ 3190 pipe_config->pixel_multiplier = 1; 3191 } 3192 3193 if (IS_CHERRYVIEW(dev_priv)) 3194 chv_crtc_clock_get(pipe_config); 3195 else if (IS_VALLEYVIEW(dev_priv)) 3196 vlv_crtc_clock_get(pipe_config); 3197 else 3198 i9xx_crtc_clock_get(pipe_config); 3199 3200 /* 3201 * Normally the dotclock is filled in by the encoder .get_config() 3202 * but in case the pipe is enabled w/o any ports we need a sane 3203 * default. 3204 */ 3205 pipe_config->hw.adjusted_mode.crtc_clock = 3206 pipe_config->port_clock / pipe_config->pixel_multiplier; 3207 3208 ret = true; 3209 3210 out: 3211 intel_display_power_put(display, power_domain, wakeref); 3212 3213 return ret; 3214 } 3215 3216 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3217 { 3218 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3219 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3220 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3221 u32 val = 0; 3222 3223 /* 3224 * - During modeset the pipe is still disabled and must remain so 3225 * - During fastset the pipe is already enabled and must remain so 3226 */ 3227 if (!intel_crtc_needs_modeset(crtc_state)) 3228 val |= TRANSCONF_ENABLE; 3229 3230 switch (crtc_state->pipe_bpp) { 3231 default: 3232 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3233 MISSING_CASE(crtc_state->pipe_bpp); 3234 fallthrough; 3235 case 18: 3236 val |= TRANSCONF_BPC_6; 3237 break; 3238 case 24: 3239 val |= TRANSCONF_BPC_8; 3240 break; 3241 case 30: 3242 val |= TRANSCONF_BPC_10; 3243 break; 3244 case 36: 3245 val |= TRANSCONF_BPC_12; 3246 break; 3247 } 3248 3249 if (crtc_state->dither) 3250 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3251 3252 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3253 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3254 else 3255 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3256 3257 /* 3258 * This would end up with an odd purple hue over 3259 * the entire display. Make sure we don't do it. 3260 */ 3261 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 3262 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3263 3264 if (crtc_state->limited_color_range && 3265 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3266 val |= TRANSCONF_COLOR_RANGE_SELECT; 3267 3268 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3269 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; 3270 3271 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3272 3273 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3274 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3275 3276 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3277 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3278 } 3279 3280 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3281 { 3282 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3283 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3284 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3285 u32 val = 0; 3286 3287 /* 3288 * - During modeset the pipe is still disabled and must remain so 3289 * - During fastset the pipe is already enabled and must remain so 3290 */ 3291 if (!intel_crtc_needs_modeset(crtc_state)) 3292 val |= TRANSCONF_ENABLE; 3293 3294 if (IS_HASWELL(dev_priv) && crtc_state->dither) 3295 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3296 3297 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3298 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3299 else 3300 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3301 3302 if (IS_HASWELL(dev_priv) && 3303 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3304 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; 3305 3306 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3307 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3308 } 3309 3310 static void bdw_set_pipe_misc(struct intel_dsb *dsb, 3311 const struct intel_crtc_state *crtc_state) 3312 { 3313 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3314 struct intel_display *display = to_intel_display(crtc->base.dev); 3315 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3316 u32 val = 0; 3317 3318 switch (crtc_state->pipe_bpp) { 3319 case 18: 3320 val |= PIPE_MISC_BPC_6; 3321 break; 3322 case 24: 3323 val |= PIPE_MISC_BPC_8; 3324 break; 3325 case 30: 3326 val |= PIPE_MISC_BPC_10; 3327 break; 3328 case 36: 3329 /* Port output 12BPC defined for ADLP+ */ 3330 if (DISPLAY_VER(dev_priv) >= 13) 3331 val |= PIPE_MISC_BPC_12_ADLP; 3332 break; 3333 default: 3334 MISSING_CASE(crtc_state->pipe_bpp); 3335 break; 3336 } 3337 3338 if (crtc_state->dither) 3339 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; 3340 3341 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3342 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3343 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; 3344 3345 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3346 val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE : 3347 PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND; 3348 3349 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) 3350 val |= PIPE_MISC_HDR_MODE_PRECISION; 3351 3352 if (DISPLAY_VER(dev_priv) >= 12) 3353 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; 3354 3355 /* allow PSR with sprite enabled */ 3356 if (IS_BROADWELL(dev_priv)) 3357 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; 3358 3359 intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val); 3360 } 3361 3362 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) 3363 { 3364 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3365 u32 tmp; 3366 3367 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3368 3369 switch (tmp & PIPE_MISC_BPC_MASK) { 3370 case PIPE_MISC_BPC_6: 3371 return 18; 3372 case PIPE_MISC_BPC_8: 3373 return 24; 3374 case PIPE_MISC_BPC_10: 3375 return 30; 3376 /* 3377 * PORT OUTPUT 12 BPC defined for ADLP+. 3378 * 3379 * TODO: 3380 * For previous platforms with DSI interface, bits 5:7 3381 * are used for storing pipe_bpp irrespective of dithering. 3382 * Since the value of 12 BPC is not defined for these bits 3383 * on older platforms, need to find a workaround for 12 BPC 3384 * MIPI DSI HW readout. 3385 */ 3386 case PIPE_MISC_BPC_12_ADLP: 3387 if (DISPLAY_VER(dev_priv) >= 13) 3388 return 36; 3389 fallthrough; 3390 default: 3391 MISSING_CASE(tmp); 3392 return 0; 3393 } 3394 } 3395 3396 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3397 { 3398 /* 3399 * Account for spread spectrum to avoid 3400 * oversubscribing the link. Max center spread 3401 * is 2.5%; use 5% for safety's sake. 3402 */ 3403 u32 bps = target_clock * bpp * 21 / 20; 3404 return DIV_ROUND_UP(bps, link_bw * 8); 3405 } 3406 3407 void intel_get_m_n(struct drm_i915_private *i915, 3408 struct intel_link_m_n *m_n, 3409 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 3410 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3411 { 3412 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; 3413 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; 3414 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; 3415 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; 3416 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; 3417 } 3418 3419 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3420 enum transcoder transcoder, 3421 struct intel_link_m_n *m_n) 3422 { 3423 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3424 enum pipe pipe = crtc->pipe; 3425 3426 if (DISPLAY_VER(dev_priv) >= 5) 3427 intel_get_m_n(dev_priv, m_n, 3428 PIPE_DATA_M1(dev_priv, transcoder), 3429 PIPE_DATA_N1(dev_priv, transcoder), 3430 PIPE_LINK_M1(dev_priv, transcoder), 3431 PIPE_LINK_N1(dev_priv, transcoder)); 3432 else 3433 intel_get_m_n(dev_priv, m_n, 3434 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 3435 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3436 } 3437 3438 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 3439 enum transcoder transcoder, 3440 struct intel_link_m_n *m_n) 3441 { 3442 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3443 3444 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 3445 return; 3446 3447 intel_get_m_n(dev_priv, m_n, 3448 PIPE_DATA_M2(dev_priv, transcoder), 3449 PIPE_DATA_N2(dev_priv, transcoder), 3450 PIPE_LINK_M2(dev_priv, transcoder), 3451 PIPE_LINK_N2(dev_priv, transcoder)); 3452 } 3453 3454 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 3455 { 3456 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3457 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3458 u32 ctl, pos, size; 3459 enum pipe pipe; 3460 3461 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 3462 if ((ctl & PF_ENABLE) == 0) 3463 return; 3464 3465 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 3466 pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl); 3467 else 3468 pipe = crtc->pipe; 3469 3470 crtc_state->pch_pfit.enabled = true; 3471 3472 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 3473 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 3474 3475 drm_rect_init(&crtc_state->pch_pfit.dst, 3476 REG_FIELD_GET(PF_WIN_XPOS_MASK, pos), 3477 REG_FIELD_GET(PF_WIN_YPOS_MASK, pos), 3478 REG_FIELD_GET(PF_WIN_XSIZE_MASK, size), 3479 REG_FIELD_GET(PF_WIN_YSIZE_MASK, size)); 3480 3481 /* 3482 * We currently do not free assignments of panel fitters on 3483 * ivb/hsw (since we don't use the higher upscaling modes which 3484 * differentiates them) so just WARN about this case for now. 3485 */ 3486 drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe); 3487 } 3488 3489 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3490 struct intel_crtc_state *pipe_config) 3491 { 3492 struct intel_display *display = to_intel_display(crtc); 3493 struct drm_device *dev = crtc->base.dev; 3494 struct drm_i915_private *dev_priv = to_i915(dev); 3495 enum intel_display_power_domain power_domain; 3496 intel_wakeref_t wakeref; 3497 u32 tmp; 3498 bool ret; 3499 3500 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3501 wakeref = intel_display_power_get_if_enabled(display, power_domain); 3502 if (!wakeref) 3503 return false; 3504 3505 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3506 pipe_config->shared_dpll = NULL; 3507 3508 ret = false; 3509 tmp = intel_de_read(dev_priv, 3510 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 3511 if (!(tmp & TRANSCONF_ENABLE)) 3512 goto out; 3513 3514 switch (tmp & TRANSCONF_BPC_MASK) { 3515 case TRANSCONF_BPC_6: 3516 pipe_config->pipe_bpp = 18; 3517 break; 3518 case TRANSCONF_BPC_8: 3519 pipe_config->pipe_bpp = 24; 3520 break; 3521 case TRANSCONF_BPC_10: 3522 pipe_config->pipe_bpp = 30; 3523 break; 3524 case TRANSCONF_BPC_12: 3525 pipe_config->pipe_bpp = 36; 3526 break; 3527 default: 3528 break; 3529 } 3530 3531 if (tmp & TRANSCONF_COLOR_RANGE_SELECT) 3532 pipe_config->limited_color_range = true; 3533 3534 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { 3535 case TRANSCONF_OUTPUT_COLORSPACE_YUV601: 3536 case TRANSCONF_OUTPUT_COLORSPACE_YUV709: 3537 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3538 break; 3539 default: 3540 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3541 break; 3542 } 3543 3544 pipe_config->sink_format = pipe_config->output_format; 3545 3546 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); 3547 3548 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3549 3550 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); 3551 3552 intel_color_get_config(pipe_config); 3553 3554 pipe_config->pixel_multiplier = 1; 3555 3556 ilk_pch_get_config(pipe_config); 3557 3558 intel_get_transcoder_timings(crtc, pipe_config); 3559 intel_get_pipe_src_size(crtc, pipe_config); 3560 3561 ilk_get_pfit_config(pipe_config); 3562 3563 ret = true; 3564 3565 out: 3566 intel_display_power_put(display, power_domain, wakeref); 3567 3568 return ret; 3569 } 3570 3571 static u8 joiner_pipes(struct drm_i915_private *i915) 3572 { 3573 u8 pipes; 3574 3575 if (DISPLAY_VER(i915) >= 12) 3576 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3577 else if (DISPLAY_VER(i915) >= 11) 3578 pipes = BIT(PIPE_B) | BIT(PIPE_C); 3579 else 3580 pipes = 0; 3581 3582 return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask; 3583 } 3584 3585 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, 3586 enum transcoder cpu_transcoder) 3587 { 3588 struct intel_display *display = &dev_priv->display; 3589 enum intel_display_power_domain power_domain; 3590 intel_wakeref_t wakeref; 3591 u32 tmp = 0; 3592 3593 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3594 3595 with_intel_display_power_if_enabled(display, power_domain, wakeref) 3596 tmp = intel_de_read(dev_priv, 3597 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); 3598 3599 return tmp & TRANS_DDI_FUNC_ENABLE; 3600 } 3601 3602 static void enabled_uncompressed_joiner_pipes(struct intel_display *display, 3603 u8 *primary_pipes, u8 *secondary_pipes) 3604 { 3605 struct drm_i915_private *i915 = to_i915(display->drm); 3606 struct intel_crtc *crtc; 3607 3608 *primary_pipes = 0; 3609 *secondary_pipes = 0; 3610 3611 if (!HAS_UNCOMPRESSED_JOINER(display)) 3612 return; 3613 3614 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 3615 joiner_pipes(i915)) { 3616 enum intel_display_power_domain power_domain; 3617 enum pipe pipe = crtc->pipe; 3618 intel_wakeref_t wakeref; 3619 3620 power_domain = POWER_DOMAIN_PIPE(pipe); 3621 with_intel_display_power_if_enabled(display, power_domain, wakeref) { 3622 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3623 3624 if (tmp & UNCOMPRESSED_JOINER_PRIMARY) 3625 *primary_pipes |= BIT(pipe); 3626 if (tmp & UNCOMPRESSED_JOINER_SECONDARY) 3627 *secondary_pipes |= BIT(pipe); 3628 } 3629 } 3630 } 3631 3632 static void enabled_bigjoiner_pipes(struct intel_display *display, 3633 u8 *primary_pipes, u8 *secondary_pipes) 3634 { 3635 struct drm_i915_private *i915 = to_i915(display->drm); 3636 struct intel_crtc *crtc; 3637 3638 *primary_pipes = 0; 3639 *secondary_pipes = 0; 3640 3641 if (!HAS_BIGJOINER(display)) 3642 return; 3643 3644 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 3645 joiner_pipes(i915)) { 3646 enum intel_display_power_domain power_domain; 3647 enum pipe pipe = crtc->pipe; 3648 intel_wakeref_t wakeref; 3649 3650 power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); 3651 with_intel_display_power_if_enabled(display, power_domain, wakeref) { 3652 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3653 3654 if (!(tmp & BIG_JOINER_ENABLE)) 3655 continue; 3656 3657 if (tmp & PRIMARY_BIG_JOINER_ENABLE) 3658 *primary_pipes |= BIT(pipe); 3659 else 3660 *secondary_pipes |= BIT(pipe); 3661 } 3662 } 3663 } 3664 3665 static u8 expected_secondary_pipes(u8 primary_pipes, int num_pipes) 3666 { 3667 u8 secondary_pipes = 0; 3668 3669 for (int i = 1; i < num_pipes; i++) 3670 secondary_pipes |= primary_pipes << i; 3671 3672 return secondary_pipes; 3673 } 3674 3675 static u8 expected_uncompressed_joiner_secondary_pipes(u8 uncompjoiner_primary_pipes) 3676 { 3677 return expected_secondary_pipes(uncompjoiner_primary_pipes, 2); 3678 } 3679 3680 static u8 expected_bigjoiner_secondary_pipes(u8 bigjoiner_primary_pipes) 3681 { 3682 return expected_secondary_pipes(bigjoiner_primary_pipes, 2); 3683 } 3684 3685 static u8 get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes) 3686 { 3687 primary_pipes &= GENMASK(pipe, 0); 3688 3689 return primary_pipes ? BIT(fls(primary_pipes) - 1) : 0; 3690 } 3691 3692 static u8 expected_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes) 3693 { 3694 return expected_secondary_pipes(ultrajoiner_primary_pipes, 4); 3695 } 3696 3697 static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes, 3698 u8 ultrajoiner_secondary_pipes) 3699 { 3700 return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3; 3701 } 3702 3703 static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915, 3704 u8 *primary_pipes, u8 *secondary_pipes) 3705 { 3706 struct intel_display *display = &i915->display; 3707 struct intel_crtc *crtc; 3708 3709 *primary_pipes = 0; 3710 *secondary_pipes = 0; 3711 3712 if (!HAS_ULTRAJOINER(display)) 3713 return; 3714 3715 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 3716 joiner_pipes(i915)) { 3717 enum intel_display_power_domain power_domain; 3718 enum pipe pipe = crtc->pipe; 3719 intel_wakeref_t wakeref; 3720 3721 power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); 3722 with_intel_display_power_if_enabled(display, power_domain, wakeref) { 3723 u32 tmp = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe)); 3724 3725 if (!(tmp & ULTRA_JOINER_ENABLE)) 3726 continue; 3727 3728 if (tmp & PRIMARY_ULTRA_JOINER_ENABLE) 3729 *primary_pipes |= BIT(pipe); 3730 else 3731 *secondary_pipes |= BIT(pipe); 3732 } 3733 } 3734 } 3735 3736 static void enabled_joiner_pipes(struct drm_i915_private *dev_priv, 3737 enum pipe pipe, 3738 u8 *primary_pipe, u8 *secondary_pipes) 3739 { 3740 struct intel_display *display = to_intel_display(&dev_priv->drm); 3741 u8 primary_ultrajoiner_pipes; 3742 u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes; 3743 u8 secondary_ultrajoiner_pipes; 3744 u8 secondary_uncompressed_joiner_pipes, secondary_bigjoiner_pipes; 3745 u8 ultrajoiner_pipes; 3746 u8 uncompressed_joiner_pipes, bigjoiner_pipes; 3747 3748 enabled_ultrajoiner_pipes(dev_priv, &primary_ultrajoiner_pipes, 3749 &secondary_ultrajoiner_pipes); 3750 /* 3751 * For some strange reason the last pipe in the set of four 3752 * shouldn't have ultrajoiner enable bit set in hardware. 3753 * Set the bit anyway to make life easier. 3754 */ 3755 drm_WARN_ON(&dev_priv->drm, 3756 expected_secondary_pipes(primary_ultrajoiner_pipes, 3) != 3757 secondary_ultrajoiner_pipes); 3758 secondary_ultrajoiner_pipes = 3759 fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes, 3760 secondary_ultrajoiner_pipes); 3761 3762 drm_WARN_ON(&dev_priv->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0); 3763 3764 enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes, 3765 &secondary_uncompressed_joiner_pipes); 3766 3767 drm_WARN_ON(display->drm, 3768 (primary_uncompressed_joiner_pipes & secondary_uncompressed_joiner_pipes) != 0); 3769 3770 enabled_bigjoiner_pipes(display, &primary_bigjoiner_pipes, 3771 &secondary_bigjoiner_pipes); 3772 3773 drm_WARN_ON(display->drm, 3774 (primary_bigjoiner_pipes & secondary_bigjoiner_pipes) != 0); 3775 3776 ultrajoiner_pipes = primary_ultrajoiner_pipes | secondary_ultrajoiner_pipes; 3777 uncompressed_joiner_pipes = primary_uncompressed_joiner_pipes | 3778 secondary_uncompressed_joiner_pipes; 3779 bigjoiner_pipes = primary_bigjoiner_pipes | secondary_bigjoiner_pipes; 3780 3781 drm_WARN(display->drm, (ultrajoiner_pipes & bigjoiner_pipes) != ultrajoiner_pipes, 3782 "Ultrajoiner pipes(%#x) should be bigjoiner pipes(%#x)\n", 3783 ultrajoiner_pipes, bigjoiner_pipes); 3784 3785 drm_WARN(display->drm, secondary_ultrajoiner_pipes != 3786 expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), 3787 "Wrong secondary ultrajoiner pipes(expected %#x, current %#x)\n", 3788 expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), 3789 secondary_ultrajoiner_pipes); 3790 3791 drm_WARN(display->drm, (uncompressed_joiner_pipes & bigjoiner_pipes) != 0, 3792 "Uncompressed joiner pipes(%#x) and bigjoiner pipes(%#x) can't intersect\n", 3793 uncompressed_joiner_pipes, bigjoiner_pipes); 3794 3795 drm_WARN(display->drm, secondary_bigjoiner_pipes != 3796 expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), 3797 "Wrong secondary bigjoiner pipes(expected %#x, current %#x)\n", 3798 expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), 3799 secondary_bigjoiner_pipes); 3800 3801 drm_WARN(display->drm, secondary_uncompressed_joiner_pipes != 3802 expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), 3803 "Wrong secondary uncompressed joiner pipes(expected %#x, current %#x)\n", 3804 expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), 3805 secondary_uncompressed_joiner_pipes); 3806 3807 *primary_pipe = 0; 3808 *secondary_pipes = 0; 3809 3810 if (ultrajoiner_pipes & BIT(pipe)) { 3811 *primary_pipe = get_joiner_primary_pipe(pipe, primary_ultrajoiner_pipes); 3812 *secondary_pipes = secondary_ultrajoiner_pipes & 3813 expected_ultrajoiner_secondary_pipes(*primary_pipe); 3814 3815 drm_WARN(display->drm, 3816 expected_ultrajoiner_secondary_pipes(*primary_pipe) != 3817 *secondary_pipes, 3818 "Wrong ultrajoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3819 *primary_pipe, 3820 expected_ultrajoiner_secondary_pipes(*primary_pipe), 3821 *secondary_pipes); 3822 return; 3823 } 3824 3825 if (uncompressed_joiner_pipes & BIT(pipe)) { 3826 *primary_pipe = get_joiner_primary_pipe(pipe, primary_uncompressed_joiner_pipes); 3827 *secondary_pipes = secondary_uncompressed_joiner_pipes & 3828 expected_uncompressed_joiner_secondary_pipes(*primary_pipe); 3829 3830 drm_WARN(display->drm, 3831 expected_uncompressed_joiner_secondary_pipes(*primary_pipe) != 3832 *secondary_pipes, 3833 "Wrong uncompressed joiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3834 *primary_pipe, 3835 expected_uncompressed_joiner_secondary_pipes(*primary_pipe), 3836 *secondary_pipes); 3837 return; 3838 } 3839 3840 if (bigjoiner_pipes & BIT(pipe)) { 3841 *primary_pipe = get_joiner_primary_pipe(pipe, primary_bigjoiner_pipes); 3842 *secondary_pipes = secondary_bigjoiner_pipes & 3843 expected_bigjoiner_secondary_pipes(*primary_pipe); 3844 3845 drm_WARN(display->drm, 3846 expected_bigjoiner_secondary_pipes(*primary_pipe) != 3847 *secondary_pipes, 3848 "Wrong bigjoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3849 *primary_pipe, 3850 expected_bigjoiner_secondary_pipes(*primary_pipe), 3851 *secondary_pipes); 3852 return; 3853 } 3854 } 3855 3856 static u8 hsw_panel_transcoders(struct drm_i915_private *i915) 3857 { 3858 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 3859 3860 if (DISPLAY_VER(i915) >= 11) 3861 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 3862 3863 return panel_transcoder_mask; 3864 } 3865 3866 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3867 { 3868 struct intel_display *display = to_intel_display(crtc); 3869 struct drm_device *dev = crtc->base.dev; 3870 struct drm_i915_private *dev_priv = to_i915(dev); 3871 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); 3872 enum transcoder cpu_transcoder; 3873 u8 primary_pipe, secondary_pipes; 3874 u8 enabled_transcoders = 0; 3875 3876 /* 3877 * XXX: Do intel_display_power_get_if_enabled before reading this (for 3878 * consistency and less surprising code; it's in always on power). 3879 */ 3880 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, 3881 panel_transcoder_mask) { 3882 enum intel_display_power_domain power_domain; 3883 intel_wakeref_t wakeref; 3884 enum pipe trans_pipe; 3885 u32 tmp = 0; 3886 3887 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3888 with_intel_display_power_if_enabled(display, power_domain, wakeref) 3889 tmp = intel_de_read(dev_priv, 3890 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); 3891 3892 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 3893 continue; 3894 3895 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 3896 default: 3897 drm_WARN(dev, 1, 3898 "unknown pipe linked to transcoder %s\n", 3899 transcoder_name(cpu_transcoder)); 3900 fallthrough; 3901 case TRANS_DDI_EDP_INPUT_A_ONOFF: 3902 case TRANS_DDI_EDP_INPUT_A_ON: 3903 trans_pipe = PIPE_A; 3904 break; 3905 case TRANS_DDI_EDP_INPUT_B_ONOFF: 3906 trans_pipe = PIPE_B; 3907 break; 3908 case TRANS_DDI_EDP_INPUT_C_ONOFF: 3909 trans_pipe = PIPE_C; 3910 break; 3911 case TRANS_DDI_EDP_INPUT_D_ONOFF: 3912 trans_pipe = PIPE_D; 3913 break; 3914 } 3915 3916 if (trans_pipe == crtc->pipe) 3917 enabled_transcoders |= BIT(cpu_transcoder); 3918 } 3919 3920 /* single pipe or joiner primary */ 3921 cpu_transcoder = (enum transcoder) crtc->pipe; 3922 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3923 enabled_transcoders |= BIT(cpu_transcoder); 3924 3925 /* joiner secondary -> consider the primary pipe's transcoder as well */ 3926 enabled_joiner_pipes(dev_priv, crtc->pipe, &primary_pipe, &secondary_pipes); 3927 if (secondary_pipes & BIT(crtc->pipe)) { 3928 cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1; 3929 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3930 enabled_transcoders |= BIT(cpu_transcoder); 3931 } 3932 3933 return enabled_transcoders; 3934 } 3935 3936 static bool has_edp_transcoders(u8 enabled_transcoders) 3937 { 3938 return enabled_transcoders & BIT(TRANSCODER_EDP); 3939 } 3940 3941 static bool has_dsi_transcoders(u8 enabled_transcoders) 3942 { 3943 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 3944 BIT(TRANSCODER_DSI_1)); 3945 } 3946 3947 static bool has_pipe_transcoders(u8 enabled_transcoders) 3948 { 3949 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 3950 BIT(TRANSCODER_DSI_0) | 3951 BIT(TRANSCODER_DSI_1)); 3952 } 3953 3954 static void assert_enabled_transcoders(struct drm_i915_private *i915, 3955 u8 enabled_transcoders) 3956 { 3957 /* Only one type of transcoder please */ 3958 drm_WARN_ON(&i915->drm, 3959 has_edp_transcoders(enabled_transcoders) + 3960 has_dsi_transcoders(enabled_transcoders) + 3961 has_pipe_transcoders(enabled_transcoders) > 1); 3962 3963 /* Only DSI transcoders can be ganged */ 3964 drm_WARN_ON(&i915->drm, 3965 !has_dsi_transcoders(enabled_transcoders) && 3966 !is_power_of_2(enabled_transcoders)); 3967 } 3968 3969 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 3970 struct intel_crtc_state *pipe_config, 3971 struct intel_display_power_domain_set *power_domain_set) 3972 { 3973 struct intel_display *display = to_intel_display(crtc); 3974 struct drm_device *dev = crtc->base.dev; 3975 struct drm_i915_private *dev_priv = to_i915(dev); 3976 unsigned long enabled_transcoders; 3977 u32 tmp; 3978 3979 enabled_transcoders = hsw_enabled_transcoders(crtc); 3980 if (!enabled_transcoders) 3981 return false; 3982 3983 assert_enabled_transcoders(dev_priv, enabled_transcoders); 3984 3985 /* 3986 * With the exception of DSI we should only ever have 3987 * a single enabled transcoder. With DSI let's just 3988 * pick the first one. 3989 */ 3990 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 3991 3992 if (!intel_display_power_get_in_set_if_enabled(display, power_domain_set, 3993 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 3994 return false; 3995 3996 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { 3997 tmp = intel_de_read(dev_priv, 3998 TRANS_DDI_FUNC_CTL(dev_priv, pipe_config->cpu_transcoder)); 3999 4000 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 4001 pipe_config->pch_pfit.force_thru = true; 4002 } 4003 4004 tmp = intel_de_read(dev_priv, 4005 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 4006 4007 return tmp & TRANSCONF_ENABLE; 4008 } 4009 4010 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 4011 struct intel_crtc_state *pipe_config, 4012 struct intel_display_power_domain_set *power_domain_set) 4013 { 4014 struct intel_display *display = to_intel_display(crtc); 4015 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4016 enum transcoder cpu_transcoder; 4017 enum port port; 4018 u32 tmp; 4019 4020 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 4021 if (port == PORT_A) 4022 cpu_transcoder = TRANSCODER_DSI_A; 4023 else 4024 cpu_transcoder = TRANSCODER_DSI_C; 4025 4026 if (!intel_display_power_get_in_set_if_enabled(display, power_domain_set, 4027 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 4028 continue; 4029 4030 /* 4031 * The PLL needs to be enabled with a valid divider 4032 * configuration, otherwise accessing DSI registers will hang 4033 * the machine. See BSpec North Display Engine 4034 * registers/MIPI[BXT]. We can break out here early, since we 4035 * need the same DSI PLL to be enabled for both DSI ports. 4036 */ 4037 if (!bxt_dsi_pll_is_enabled(dev_priv)) 4038 break; 4039 4040 /* XXX: this works for video mode only */ 4041 tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port)); 4042 if (!(tmp & DPI_ENABLE)) 4043 continue; 4044 4045 tmp = intel_de_read(display, MIPI_CTRL(display, port)); 4046 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 4047 continue; 4048 4049 pipe_config->cpu_transcoder = cpu_transcoder; 4050 break; 4051 } 4052 4053 return transcoder_is_dsi(pipe_config->cpu_transcoder); 4054 } 4055 4056 static void intel_joiner_get_config(struct intel_crtc_state *crtc_state) 4057 { 4058 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4059 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4060 u8 primary_pipe, secondary_pipes; 4061 enum pipe pipe = crtc->pipe; 4062 4063 enabled_joiner_pipes(i915, pipe, &primary_pipe, &secondary_pipes); 4064 4065 if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0) 4066 return; 4067 4068 crtc_state->joiner_pipes = primary_pipe | secondary_pipes; 4069 } 4070 4071 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 4072 struct intel_crtc_state *pipe_config) 4073 { 4074 struct intel_display *display = to_intel_display(crtc); 4075 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4076 bool active; 4077 u32 tmp; 4078 4079 if (!intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains, 4080 POWER_DOMAIN_PIPE(crtc->pipe))) 4081 return false; 4082 4083 pipe_config->shared_dpll = NULL; 4084 4085 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains); 4086 4087 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4088 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) { 4089 drm_WARN_ON(&dev_priv->drm, active); 4090 active = true; 4091 } 4092 4093 if (!active) 4094 goto out; 4095 4096 intel_joiner_get_config(pipe_config); 4097 intel_dsc_get_config(pipe_config); 4098 4099 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 4100 DISPLAY_VER(dev_priv) >= 11) 4101 intel_get_transcoder_timings(crtc, pipe_config); 4102 4103 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 4104 intel_vrr_get_config(pipe_config); 4105 4106 intel_get_pipe_src_size(crtc, pipe_config); 4107 4108 if (IS_HASWELL(dev_priv)) { 4109 u32 tmp = intel_de_read(dev_priv, 4110 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 4111 4112 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) 4113 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 4114 else 4115 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 4116 } else { 4117 pipe_config->output_format = 4118 bdw_get_pipe_misc_output_format(crtc); 4119 } 4120 4121 pipe_config->sink_format = pipe_config->output_format; 4122 4123 intel_color_get_config(pipe_config); 4124 4125 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 4126 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 4127 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 4128 pipe_config->ips_linetime = 4129 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 4130 4131 if (intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains, 4132 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 4133 if (DISPLAY_VER(dev_priv) >= 9) 4134 skl_scaler_get_config(pipe_config); 4135 else 4136 ilk_get_pfit_config(pipe_config); 4137 } 4138 4139 hsw_ips_get_config(pipe_config); 4140 4141 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 4142 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4143 pipe_config->pixel_multiplier = 4144 intel_de_read(dev_priv, 4145 TRANS_MULT(dev_priv, pipe_config->cpu_transcoder)) + 1; 4146 } else { 4147 pipe_config->pixel_multiplier = 1; 4148 } 4149 4150 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4151 tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder)); 4152 4153 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 4154 } else { 4155 /* no idea if this is correct */ 4156 pipe_config->framestart_delay = 1; 4157 } 4158 4159 out: 4160 intel_display_power_put_all_in_set(display, &crtc->hw_readout_power_domains); 4161 4162 return active; 4163 } 4164 4165 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 4166 { 4167 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4168 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4169 4170 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state)) 4171 return false; 4172 4173 crtc_state->hw.active = true; 4174 4175 intel_crtc_readout_derived_state(crtc_state); 4176 4177 return true; 4178 } 4179 4180 int intel_dotclock_calculate(int link_freq, 4181 const struct intel_link_m_n *m_n) 4182 { 4183 /* 4184 * The calculation for the data clock -> pixel clock is: 4185 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 4186 * But we want to avoid losing precision if possible, so: 4187 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 4188 * 4189 * and for link freq (10kbs units) -> pixel clock it is: 4190 * link_symbol_clock = link_freq * 10 / link_symbol_size 4191 * pixel_clock = (m * link_symbol_clock) / n 4192 * or for more precision: 4193 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size) 4194 */ 4195 4196 if (!m_n->link_n) 4197 return 0; 4198 4199 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10), 4200 m_n->link_n * intel_dp_link_symbol_size(link_freq)); 4201 } 4202 4203 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) 4204 { 4205 int dotclock; 4206 4207 if (intel_crtc_has_dp_encoder(pipe_config)) 4208 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 4209 &pipe_config->dp_m_n); 4210 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) 4211 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, 4212 pipe_config->pipe_bpp); 4213 else 4214 dotclock = pipe_config->port_clock; 4215 4216 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 4217 !intel_crtc_has_dp_encoder(pipe_config)) 4218 dotclock *= 2; 4219 4220 if (pipe_config->pixel_multiplier) 4221 dotclock /= pipe_config->pixel_multiplier; 4222 4223 return dotclock; 4224 } 4225 4226 /* Returns the currently programmed mode of the given encoder. */ 4227 struct drm_display_mode * 4228 intel_encoder_current_mode(struct intel_encoder *encoder) 4229 { 4230 struct intel_display *display = to_intel_display(encoder); 4231 struct intel_crtc_state *crtc_state; 4232 struct drm_display_mode *mode; 4233 struct intel_crtc *crtc; 4234 enum pipe pipe; 4235 4236 if (!encoder->get_hw_state(encoder, &pipe)) 4237 return NULL; 4238 4239 crtc = intel_crtc_for_pipe(display, pipe); 4240 4241 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 4242 if (!mode) 4243 return NULL; 4244 4245 crtc_state = intel_crtc_state_alloc(crtc); 4246 if (!crtc_state) { 4247 kfree(mode); 4248 return NULL; 4249 } 4250 4251 if (!intel_crtc_get_pipe_config(crtc_state)) { 4252 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4253 kfree(mode); 4254 return NULL; 4255 } 4256 4257 intel_encoder_get_config(encoder, crtc_state); 4258 4259 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 4260 4261 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4262 4263 return mode; 4264 } 4265 4266 static bool encoders_cloneable(const struct intel_encoder *a, 4267 const struct intel_encoder *b) 4268 { 4269 /* masks could be asymmetric, so check both ways */ 4270 return a == b || (a->cloneable & BIT(b->type) && 4271 b->cloneable & BIT(a->type)); 4272 } 4273 4274 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 4275 struct intel_crtc *crtc, 4276 struct intel_encoder *encoder) 4277 { 4278 struct intel_encoder *source_encoder; 4279 struct drm_connector *connector; 4280 struct drm_connector_state *connector_state; 4281 int i; 4282 4283 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4284 if (connector_state->crtc != &crtc->base) 4285 continue; 4286 4287 source_encoder = 4288 to_intel_encoder(connector_state->best_encoder); 4289 if (!encoders_cloneable(encoder, source_encoder)) 4290 return false; 4291 } 4292 4293 return true; 4294 } 4295 4296 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4297 { 4298 const struct drm_display_mode *pipe_mode = 4299 &crtc_state->hw.pipe_mode; 4300 int linetime_wm; 4301 4302 if (!crtc_state->hw.enable) 4303 return 0; 4304 4305 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4306 pipe_mode->crtc_clock); 4307 4308 return min(linetime_wm, 0x1ff); 4309 } 4310 4311 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4312 const struct intel_cdclk_state *cdclk_state) 4313 { 4314 const struct drm_display_mode *pipe_mode = 4315 &crtc_state->hw.pipe_mode; 4316 int linetime_wm; 4317 4318 if (!crtc_state->hw.enable) 4319 return 0; 4320 4321 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4322 cdclk_state->logical.cdclk); 4323 4324 return min(linetime_wm, 0x1ff); 4325 } 4326 4327 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4328 { 4329 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4330 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4331 const struct drm_display_mode *pipe_mode = 4332 &crtc_state->hw.pipe_mode; 4333 int linetime_wm; 4334 4335 if (!crtc_state->hw.enable) 4336 return 0; 4337 4338 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4339 crtc_state->pixel_rate); 4340 4341 /* Display WA #1135: BXT:ALL GLK:ALL */ 4342 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4343 skl_watermark_ipc_enabled(dev_priv)) 4344 linetime_wm /= 2; 4345 4346 return min(linetime_wm, 0x1ff); 4347 } 4348 4349 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4350 struct intel_crtc *crtc) 4351 { 4352 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4353 struct intel_crtc_state *crtc_state = 4354 intel_atomic_get_new_crtc_state(state, crtc); 4355 const struct intel_cdclk_state *cdclk_state; 4356 4357 if (DISPLAY_VER(dev_priv) >= 9) 4358 crtc_state->linetime = skl_linetime_wm(crtc_state); 4359 else 4360 crtc_state->linetime = hsw_linetime_wm(crtc_state); 4361 4362 if (!hsw_crtc_supports_ips(crtc)) 4363 return 0; 4364 4365 cdclk_state = intel_atomic_get_cdclk_state(state); 4366 if (IS_ERR(cdclk_state)) 4367 return PTR_ERR(cdclk_state); 4368 4369 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4370 cdclk_state); 4371 4372 return 0; 4373 } 4374 4375 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4376 struct intel_crtc *crtc) 4377 { 4378 struct intel_display *display = to_intel_display(crtc); 4379 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4380 struct intel_crtc_state *crtc_state = 4381 intel_atomic_get_new_crtc_state(state, crtc); 4382 int ret; 4383 4384 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) && 4385 intel_crtc_needs_modeset(crtc_state) && 4386 !crtc_state->hw.active) 4387 crtc_state->update_wm_post = true; 4388 4389 if (intel_crtc_needs_modeset(crtc_state)) { 4390 ret = intel_dpll_crtc_get_shared_dpll(state, crtc); 4391 if (ret) 4392 return ret; 4393 } 4394 4395 ret = intel_color_check(state, crtc); 4396 if (ret) 4397 return ret; 4398 4399 ret = intel_wm_compute(state, crtc); 4400 if (ret) { 4401 drm_dbg_kms(&dev_priv->drm, 4402 "[CRTC:%d:%s] watermarks are invalid\n", 4403 crtc->base.base.id, crtc->base.name); 4404 return ret; 4405 } 4406 4407 if (DISPLAY_VER(dev_priv) >= 9) { 4408 if (intel_crtc_needs_modeset(crtc_state) || 4409 intel_crtc_needs_fastset(crtc_state)) { 4410 ret = skl_update_scaler_crtc(crtc_state); 4411 if (ret) 4412 return ret; 4413 } 4414 4415 ret = intel_atomic_setup_scalers(state, crtc); 4416 if (ret) 4417 return ret; 4418 } 4419 4420 if (HAS_IPS(display)) { 4421 ret = hsw_ips_compute_config(state, crtc); 4422 if (ret) 4423 return ret; 4424 } 4425 4426 if (DISPLAY_VER(dev_priv) >= 9 || 4427 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 4428 ret = hsw_compute_linetime_wm(state, crtc); 4429 if (ret) 4430 return ret; 4431 4432 } 4433 4434 ret = intel_psr2_sel_fetch_update(state, crtc); 4435 if (ret) 4436 return ret; 4437 4438 return 0; 4439 } 4440 4441 static int 4442 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 4443 struct intel_crtc_state *crtc_state) 4444 { 4445 struct drm_connector *connector = conn_state->connector; 4446 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 4447 const struct drm_display_info *info = &connector->display_info; 4448 int bpp; 4449 4450 switch (conn_state->max_bpc) { 4451 case 6 ... 7: 4452 bpp = 6 * 3; 4453 break; 4454 case 8 ... 9: 4455 bpp = 8 * 3; 4456 break; 4457 case 10 ... 11: 4458 bpp = 10 * 3; 4459 break; 4460 case 12 ... 16: 4461 bpp = 12 * 3; 4462 break; 4463 default: 4464 MISSING_CASE(conn_state->max_bpc); 4465 return -EINVAL; 4466 } 4467 4468 if (bpp < crtc_state->pipe_bpp) { 4469 drm_dbg_kms(&i915->drm, 4470 "[CONNECTOR:%d:%s] Limiting display bpp to %d " 4471 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", 4472 connector->base.id, connector->name, 4473 bpp, 3 * info->bpc, 4474 3 * conn_state->max_requested_bpc, 4475 crtc_state->pipe_bpp); 4476 4477 crtc_state->pipe_bpp = bpp; 4478 } 4479 4480 return 0; 4481 } 4482 4483 static int 4484 compute_baseline_pipe_bpp(struct intel_atomic_state *state, 4485 struct intel_crtc *crtc) 4486 { 4487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4488 struct intel_crtc_state *crtc_state = 4489 intel_atomic_get_new_crtc_state(state, crtc); 4490 struct drm_connector *connector; 4491 struct drm_connector_state *connector_state; 4492 int bpp, i; 4493 4494 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 4495 IS_CHERRYVIEW(dev_priv))) 4496 bpp = 10*3; 4497 else if (DISPLAY_VER(dev_priv) >= 5) 4498 bpp = 12*3; 4499 else 4500 bpp = 8*3; 4501 4502 crtc_state->pipe_bpp = bpp; 4503 4504 /* Clamp display bpp to connector max bpp */ 4505 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4506 int ret; 4507 4508 if (connector_state->crtc != &crtc->base) 4509 continue; 4510 4511 ret = compute_sink_pipe_bpp(connector_state, crtc_state); 4512 if (ret) 4513 return ret; 4514 } 4515 4516 return 0; 4517 } 4518 4519 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 4520 { 4521 struct drm_device *dev = state->base.dev; 4522 struct drm_connector *connector; 4523 struct drm_connector_list_iter conn_iter; 4524 unsigned int used_ports = 0; 4525 unsigned int used_mst_ports = 0; 4526 bool ret = true; 4527 4528 /* 4529 * We're going to peek into connector->state, 4530 * hence connection_mutex must be held. 4531 */ 4532 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 4533 4534 /* 4535 * Walk the connector list instead of the encoder 4536 * list to detect the problem on ddi platforms 4537 * where there's just one encoder per digital port. 4538 */ 4539 drm_connector_list_iter_begin(dev, &conn_iter); 4540 drm_for_each_connector_iter(connector, &conn_iter) { 4541 struct drm_connector_state *connector_state; 4542 struct intel_encoder *encoder; 4543 4544 connector_state = 4545 drm_atomic_get_new_connector_state(&state->base, 4546 connector); 4547 if (!connector_state) 4548 connector_state = connector->state; 4549 4550 if (!connector_state->best_encoder) 4551 continue; 4552 4553 encoder = to_intel_encoder(connector_state->best_encoder); 4554 4555 drm_WARN_ON(dev, !connector_state->crtc); 4556 4557 switch (encoder->type) { 4558 case INTEL_OUTPUT_DDI: 4559 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 4560 break; 4561 fallthrough; 4562 case INTEL_OUTPUT_DP: 4563 case INTEL_OUTPUT_HDMI: 4564 case INTEL_OUTPUT_EDP: 4565 /* the same port mustn't appear more than once */ 4566 if (used_ports & BIT(encoder->port)) 4567 ret = false; 4568 4569 used_ports |= BIT(encoder->port); 4570 break; 4571 case INTEL_OUTPUT_DP_MST: 4572 used_mst_ports |= 4573 1 << encoder->port; 4574 break; 4575 default: 4576 break; 4577 } 4578 } 4579 drm_connector_list_iter_end(&conn_iter); 4580 4581 /* can't mix MST and SST/HDMI on the same port */ 4582 if (used_ports & used_mst_ports) 4583 return false; 4584 4585 return ret; 4586 } 4587 4588 static void 4589 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 4590 struct intel_crtc *crtc) 4591 { 4592 struct intel_crtc_state *crtc_state = 4593 intel_atomic_get_new_crtc_state(state, crtc); 4594 4595 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4596 4597 drm_property_replace_blob(&crtc_state->hw.degamma_lut, 4598 crtc_state->uapi.degamma_lut); 4599 drm_property_replace_blob(&crtc_state->hw.gamma_lut, 4600 crtc_state->uapi.gamma_lut); 4601 drm_property_replace_blob(&crtc_state->hw.ctm, 4602 crtc_state->uapi.ctm); 4603 } 4604 4605 static void 4606 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 4607 struct intel_crtc *crtc) 4608 { 4609 struct intel_crtc_state *crtc_state = 4610 intel_atomic_get_new_crtc_state(state, crtc); 4611 4612 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4613 4614 crtc_state->hw.enable = crtc_state->uapi.enable; 4615 crtc_state->hw.active = crtc_state->uapi.active; 4616 drm_mode_copy(&crtc_state->hw.mode, 4617 &crtc_state->uapi.mode); 4618 drm_mode_copy(&crtc_state->hw.adjusted_mode, 4619 &crtc_state->uapi.adjusted_mode); 4620 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 4621 4622 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 4623 } 4624 4625 static void 4626 copy_joiner_crtc_state_nomodeset(struct intel_atomic_state *state, 4627 struct intel_crtc *secondary_crtc) 4628 { 4629 struct intel_crtc_state *secondary_crtc_state = 4630 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4631 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4632 const struct intel_crtc_state *primary_crtc_state = 4633 intel_atomic_get_new_crtc_state(state, primary_crtc); 4634 4635 drm_property_replace_blob(&secondary_crtc_state->hw.degamma_lut, 4636 primary_crtc_state->hw.degamma_lut); 4637 drm_property_replace_blob(&secondary_crtc_state->hw.gamma_lut, 4638 primary_crtc_state->hw.gamma_lut); 4639 drm_property_replace_blob(&secondary_crtc_state->hw.ctm, 4640 primary_crtc_state->hw.ctm); 4641 4642 secondary_crtc_state->uapi.color_mgmt_changed = primary_crtc_state->uapi.color_mgmt_changed; 4643 } 4644 4645 static int 4646 copy_joiner_crtc_state_modeset(struct intel_atomic_state *state, 4647 struct intel_crtc *secondary_crtc) 4648 { 4649 struct intel_crtc_state *secondary_crtc_state = 4650 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4651 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4652 const struct intel_crtc_state *primary_crtc_state = 4653 intel_atomic_get_new_crtc_state(state, primary_crtc); 4654 struct intel_crtc_state *saved_state; 4655 4656 WARN_ON(primary_crtc_state->joiner_pipes != 4657 secondary_crtc_state->joiner_pipes); 4658 4659 saved_state = kmemdup(primary_crtc_state, sizeof(*saved_state), GFP_KERNEL); 4660 if (!saved_state) 4661 return -ENOMEM; 4662 4663 /* preserve some things from the slave's original crtc state */ 4664 saved_state->uapi = secondary_crtc_state->uapi; 4665 saved_state->scaler_state = secondary_crtc_state->scaler_state; 4666 saved_state->shared_dpll = secondary_crtc_state->shared_dpll; 4667 saved_state->crc_enabled = secondary_crtc_state->crc_enabled; 4668 4669 intel_crtc_free_hw_state(secondary_crtc_state); 4670 if (secondary_crtc_state->dp_tunnel_ref.tunnel) 4671 drm_dp_tunnel_ref_put(&secondary_crtc_state->dp_tunnel_ref); 4672 memcpy(secondary_crtc_state, saved_state, sizeof(*secondary_crtc_state)); 4673 kfree(saved_state); 4674 4675 /* Re-init hw state */ 4676 memset(&secondary_crtc_state->hw, 0, sizeof(secondary_crtc_state->hw)); 4677 secondary_crtc_state->hw.enable = primary_crtc_state->hw.enable; 4678 secondary_crtc_state->hw.active = primary_crtc_state->hw.active; 4679 drm_mode_copy(&secondary_crtc_state->hw.mode, 4680 &primary_crtc_state->hw.mode); 4681 drm_mode_copy(&secondary_crtc_state->hw.pipe_mode, 4682 &primary_crtc_state->hw.pipe_mode); 4683 drm_mode_copy(&secondary_crtc_state->hw.adjusted_mode, 4684 &primary_crtc_state->hw.adjusted_mode); 4685 secondary_crtc_state->hw.scaling_filter = primary_crtc_state->hw.scaling_filter; 4686 4687 if (primary_crtc_state->dp_tunnel_ref.tunnel) 4688 drm_dp_tunnel_ref_get(primary_crtc_state->dp_tunnel_ref.tunnel, 4689 &secondary_crtc_state->dp_tunnel_ref); 4690 4691 copy_joiner_crtc_state_nomodeset(state, secondary_crtc); 4692 4693 secondary_crtc_state->uapi.mode_changed = primary_crtc_state->uapi.mode_changed; 4694 secondary_crtc_state->uapi.connectors_changed = primary_crtc_state->uapi.connectors_changed; 4695 secondary_crtc_state->uapi.active_changed = primary_crtc_state->uapi.active_changed; 4696 4697 WARN_ON(primary_crtc_state->joiner_pipes != 4698 secondary_crtc_state->joiner_pipes); 4699 4700 return 0; 4701 } 4702 4703 static int 4704 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 4705 struct intel_crtc *crtc) 4706 { 4707 struct intel_crtc_state *crtc_state = 4708 intel_atomic_get_new_crtc_state(state, crtc); 4709 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4710 struct intel_crtc_state *saved_state; 4711 4712 saved_state = intel_crtc_state_alloc(crtc); 4713 if (!saved_state) 4714 return -ENOMEM; 4715 4716 /* free the old crtc_state->hw members */ 4717 intel_crtc_free_hw_state(crtc_state); 4718 4719 intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state); 4720 4721 /* FIXME: before the switch to atomic started, a new pipe_config was 4722 * kzalloc'd. Code that depends on any field being zero should be 4723 * fixed, so that the crtc_state can be safely duplicated. For now, 4724 * only fields that are know to not cause problems are preserved. */ 4725 4726 saved_state->uapi = crtc_state->uapi; 4727 saved_state->inherited = crtc_state->inherited; 4728 saved_state->scaler_state = crtc_state->scaler_state; 4729 saved_state->shared_dpll = crtc_state->shared_dpll; 4730 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 4731 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 4732 sizeof(saved_state->icl_port_dplls)); 4733 saved_state->crc_enabled = crtc_state->crc_enabled; 4734 if (IS_G4X(dev_priv) || 4735 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4736 saved_state->wm = crtc_state->wm; 4737 4738 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 4739 kfree(saved_state); 4740 4741 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 4742 4743 return 0; 4744 } 4745 4746 static int 4747 intel_modeset_pipe_config(struct intel_atomic_state *state, 4748 struct intel_crtc *crtc, 4749 const struct intel_link_bw_limits *limits) 4750 { 4751 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4752 struct intel_crtc_state *crtc_state = 4753 intel_atomic_get_new_crtc_state(state, crtc); 4754 struct drm_connector *connector; 4755 struct drm_connector_state *connector_state; 4756 int pipe_src_w, pipe_src_h; 4757 int base_bpp, ret, i; 4758 4759 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; 4760 4761 crtc_state->framestart_delay = 1; 4762 4763 /* 4764 * Sanitize sync polarity flags based on requested ones. If neither 4765 * positive or negative polarity is requested, treat this as meaning 4766 * negative polarity. 4767 */ 4768 if (!(crtc_state->hw.adjusted_mode.flags & 4769 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 4770 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 4771 4772 if (!(crtc_state->hw.adjusted_mode.flags & 4773 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 4774 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 4775 4776 ret = compute_baseline_pipe_bpp(state, crtc); 4777 if (ret) 4778 return ret; 4779 4780 crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe); 4781 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; 4782 4783 if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) { 4784 drm_dbg_kms(&i915->drm, 4785 "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n", 4786 crtc->base.base.id, crtc->base.name, 4787 FXP_Q4_ARGS(crtc_state->max_link_bpp_x16)); 4788 crtc_state->bw_constrained = true; 4789 } 4790 4791 base_bpp = crtc_state->pipe_bpp; 4792 4793 /* 4794 * Determine the real pipe dimensions. Note that stereo modes can 4795 * increase the actual pipe size due to the frame doubling and 4796 * insertion of additional space for blanks between the frame. This 4797 * is stored in the crtc timings. We use the requested mode to do this 4798 * computation to clearly distinguish it from the adjusted mode, which 4799 * can be changed by the connectors in the below retry loop. 4800 */ 4801 drm_mode_get_hv_timing(&crtc_state->hw.mode, 4802 &pipe_src_w, &pipe_src_h); 4803 drm_rect_init(&crtc_state->pipe_src, 0, 0, 4804 pipe_src_w, pipe_src_h); 4805 4806 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4807 struct intel_encoder *encoder = 4808 to_intel_encoder(connector_state->best_encoder); 4809 4810 if (connector_state->crtc != &crtc->base) 4811 continue; 4812 4813 if (!check_single_encoder_cloning(state, crtc, encoder)) { 4814 drm_dbg_kms(&i915->drm, 4815 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", 4816 encoder->base.base.id, encoder->base.name); 4817 return -EINVAL; 4818 } 4819 4820 /* 4821 * Determine output_types before calling the .compute_config() 4822 * hooks so that the hooks can use this information safely. 4823 */ 4824 if (encoder->compute_output_type) 4825 crtc_state->output_types |= 4826 BIT(encoder->compute_output_type(encoder, crtc_state, 4827 connector_state)); 4828 else 4829 crtc_state->output_types |= BIT(encoder->type); 4830 } 4831 4832 /* Ensure the port clock defaults are reset when retrying. */ 4833 crtc_state->port_clock = 0; 4834 crtc_state->pixel_multiplier = 1; 4835 4836 /* Fill in default crtc timings, allow encoders to overwrite them. */ 4837 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, 4838 CRTC_STEREO_DOUBLE); 4839 4840 /* Pass our mode to the connectors and the CRTC to give them a chance to 4841 * adjust it according to limitations or connector properties, and also 4842 * a chance to reject the mode entirely. 4843 */ 4844 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4845 struct intel_encoder *encoder = 4846 to_intel_encoder(connector_state->best_encoder); 4847 4848 if (connector_state->crtc != &crtc->base) 4849 continue; 4850 4851 ret = encoder->compute_config(encoder, crtc_state, 4852 connector_state); 4853 if (ret == -EDEADLK) 4854 return ret; 4855 if (ret < 0) { 4856 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n", 4857 encoder->base.base.id, encoder->base.name, ret); 4858 return ret; 4859 } 4860 } 4861 4862 /* Set default port clock if not overwritten by the encoder. Needs to be 4863 * done afterwards in case the encoder adjusts the mode. */ 4864 if (!crtc_state->port_clock) 4865 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock 4866 * crtc_state->pixel_multiplier; 4867 4868 ret = intel_crtc_compute_config(state, crtc); 4869 if (ret == -EDEADLK) 4870 return ret; 4871 if (ret < 0) { 4872 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n", 4873 crtc->base.base.id, crtc->base.name, ret); 4874 return ret; 4875 } 4876 4877 /* Dithering seems to not pass-through bits correctly when it should, so 4878 * only enable it on 6bpc panels and when its not a compliance 4879 * test requesting 6bpc video pattern. 4880 */ 4881 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && 4882 !crtc_state->dither_force_disable; 4883 drm_dbg_kms(&i915->drm, 4884 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 4885 crtc->base.base.id, crtc->base.name, 4886 base_bpp, crtc_state->pipe_bpp, crtc_state->dither); 4887 4888 return 0; 4889 } 4890 4891 static int 4892 intel_modeset_pipe_config_late(struct intel_atomic_state *state, 4893 struct intel_crtc *crtc) 4894 { 4895 struct intel_crtc_state *crtc_state = 4896 intel_atomic_get_new_crtc_state(state, crtc); 4897 struct drm_connector_state *conn_state; 4898 struct drm_connector *connector; 4899 int i; 4900 4901 intel_vrr_compute_config_late(crtc_state); 4902 4903 for_each_new_connector_in_state(&state->base, connector, 4904 conn_state, i) { 4905 struct intel_encoder *encoder = 4906 to_intel_encoder(conn_state->best_encoder); 4907 int ret; 4908 4909 if (conn_state->crtc != &crtc->base || 4910 !encoder->compute_config_late) 4911 continue; 4912 4913 ret = encoder->compute_config_late(encoder, crtc_state, 4914 conn_state); 4915 if (ret) 4916 return ret; 4917 } 4918 4919 return 0; 4920 } 4921 4922 bool intel_fuzzy_clock_check(int clock1, int clock2) 4923 { 4924 int diff; 4925 4926 if (clock1 == clock2) 4927 return true; 4928 4929 if (!clock1 || !clock2) 4930 return false; 4931 4932 diff = abs(clock1 - clock2); 4933 4934 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 4935 return true; 4936 4937 return false; 4938 } 4939 4940 static bool 4941 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 4942 const struct intel_link_m_n *m2_n2) 4943 { 4944 return m_n->tu == m2_n2->tu && 4945 m_n->data_m == m2_n2->data_m && 4946 m_n->data_n == m2_n2->data_n && 4947 m_n->link_m == m2_n2->link_m && 4948 m_n->link_n == m2_n2->link_n; 4949 } 4950 4951 static bool 4952 intel_compare_infoframe(const union hdmi_infoframe *a, 4953 const union hdmi_infoframe *b) 4954 { 4955 return memcmp(a, b, sizeof(*a)) == 0; 4956 } 4957 4958 static bool 4959 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 4960 const struct drm_dp_vsc_sdp *b) 4961 { 4962 return a->pixelformat == b->pixelformat && 4963 a->colorimetry == b->colorimetry && 4964 a->bpc == b->bpc && 4965 a->dynamic_range == b->dynamic_range && 4966 a->content_type == b->content_type; 4967 } 4968 4969 static bool 4970 intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a, 4971 const struct drm_dp_as_sdp *b) 4972 { 4973 return a->vtotal == b->vtotal && 4974 a->target_rr == b->target_rr && 4975 a->duration_incr_ms == b->duration_incr_ms && 4976 a->duration_decr_ms == b->duration_decr_ms && 4977 a->mode == b->mode; 4978 } 4979 4980 static bool 4981 intel_compare_buffer(const u8 *a, const u8 *b, size_t len) 4982 { 4983 return memcmp(a, b, len) == 0; 4984 } 4985 4986 static void __printf(5, 6) 4987 pipe_config_mismatch(struct drm_printer *p, bool fastset, 4988 const struct intel_crtc *crtc, 4989 const char *name, const char *format, ...) 4990 { 4991 struct va_format vaf; 4992 va_list args; 4993 4994 va_start(args, format); 4995 vaf.fmt = format; 4996 vaf.va = &args; 4997 4998 if (fastset) 4999 drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", 5000 crtc->base.base.id, crtc->base.name, name, &vaf); 5001 else 5002 drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n", 5003 crtc->base.base.id, crtc->base.name, name, &vaf); 5004 5005 va_end(args); 5006 } 5007 5008 static void 5009 pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset, 5010 const struct intel_crtc *crtc, 5011 const char *name, 5012 const union hdmi_infoframe *a, 5013 const union hdmi_infoframe *b) 5014 { 5015 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5016 const char *loglevel; 5017 5018 if (fastset) { 5019 if (!drm_debug_enabled(DRM_UT_KMS)) 5020 return; 5021 5022 loglevel = KERN_DEBUG; 5023 } else { 5024 loglevel = KERN_ERR; 5025 } 5026 5027 pipe_config_mismatch(p, fastset, crtc, name, "infoframe"); 5028 5029 drm_printf(p, "expected:\n"); 5030 hdmi_infoframe_log(loglevel, i915->drm.dev, a); 5031 drm_printf(p, "found:\n"); 5032 hdmi_infoframe_log(loglevel, i915->drm.dev, b); 5033 } 5034 5035 static void 5036 pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset, 5037 const struct intel_crtc *crtc, 5038 const char *name, 5039 const struct drm_dp_vsc_sdp *a, 5040 const struct drm_dp_vsc_sdp *b) 5041 { 5042 pipe_config_mismatch(p, fastset, crtc, name, "dp vsc sdp"); 5043 5044 drm_printf(p, "expected:\n"); 5045 drm_dp_vsc_sdp_log(p, a); 5046 drm_printf(p, "found:\n"); 5047 drm_dp_vsc_sdp_log(p, b); 5048 } 5049 5050 static void 5051 pipe_config_dp_as_sdp_mismatch(struct drm_printer *p, bool fastset, 5052 const struct intel_crtc *crtc, 5053 const char *name, 5054 const struct drm_dp_as_sdp *a, 5055 const struct drm_dp_as_sdp *b) 5056 { 5057 pipe_config_mismatch(p, fastset, crtc, name, "dp as sdp"); 5058 5059 drm_printf(p, "expected:\n"); 5060 drm_dp_as_sdp_log(p, a); 5061 drm_printf(p, "found:\n"); 5062 drm_dp_as_sdp_log(p, b); 5063 } 5064 5065 /* Returns the length up to and including the last differing byte */ 5066 static size_t 5067 memcmp_diff_len(const u8 *a, const u8 *b, size_t len) 5068 { 5069 int i; 5070 5071 for (i = len - 1; i >= 0; i--) { 5072 if (a[i] != b[i]) 5073 return i + 1; 5074 } 5075 5076 return 0; 5077 } 5078 5079 static void 5080 pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset, 5081 const struct intel_crtc *crtc, 5082 const char *name, 5083 const u8 *a, const u8 *b, size_t len) 5084 { 5085 pipe_config_mismatch(p, fastset, crtc, name, "buffer"); 5086 5087 /* only dump up to the last difference */ 5088 len = memcmp_diff_len(a, b, len); 5089 5090 drm_print_hex_dump(p, "expected: ", a, len); 5091 drm_print_hex_dump(p, "found: ", b, len); 5092 } 5093 5094 static void 5095 pipe_config_pll_mismatch(struct drm_printer *p, bool fastset, 5096 const struct intel_crtc *crtc, 5097 const char *name, 5098 const struct intel_dpll_hw_state *a, 5099 const struct intel_dpll_hw_state *b) 5100 { 5101 struct intel_display *display = to_intel_display(crtc); 5102 5103 pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */ 5104 5105 drm_printf(p, "expected:\n"); 5106 intel_dpll_dump_hw_state(display, p, a); 5107 drm_printf(p, "found:\n"); 5108 intel_dpll_dump_hw_state(display, p, b); 5109 } 5110 5111 static void 5112 pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset, 5113 const struct intel_crtc *crtc, 5114 const char *name, 5115 const struct intel_cx0pll_state *a, 5116 const struct intel_cx0pll_state *b) 5117 { 5118 struct intel_display *display = to_intel_display(crtc); 5119 char *chipname = a->use_c10 ? "C10" : "C20"; 5120 5121 pipe_config_mismatch(p, fastset, crtc, name, chipname); 5122 5123 drm_printf(p, "expected:\n"); 5124 intel_cx0pll_dump_hw_state(display, a); 5125 drm_printf(p, "found:\n"); 5126 intel_cx0pll_dump_hw_state(display, b); 5127 } 5128 5129 static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_state) 5130 { 5131 struct intel_display *display = to_intel_display(old_crtc_state); 5132 5133 /* 5134 * Allow fastboot to fix up vblank delay (handled via LRR 5135 * codepaths), a bit dodgy as the registers aren't 5136 * double buffered but seems to be working more or less... 5137 */ 5138 return HAS_LRR(display) && old_crtc_state->inherited && 5139 !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI); 5140 } 5141 5142 bool 5143 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 5144 const struct intel_crtc_state *pipe_config, 5145 bool fastset) 5146 { 5147 struct intel_display *display = to_intel_display(current_config); 5148 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 5149 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 5150 struct drm_printer p; 5151 u32 exclude_infoframes = 0; 5152 bool ret = true; 5153 5154 if (fastset) 5155 p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL); 5156 else 5157 p = drm_err_printer(&dev_priv->drm, NULL); 5158 5159 #define PIPE_CONF_CHECK_X(name) do { \ 5160 if (current_config->name != pipe_config->name) { \ 5161 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5162 __stringify(name) " is bool"); \ 5163 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5164 "(expected 0x%08x, found 0x%08x)", \ 5165 current_config->name, \ 5166 pipe_config->name); \ 5167 ret = false; \ 5168 } \ 5169 } while (0) 5170 5171 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 5172 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 5173 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5174 __stringify(name) " is bool"); \ 5175 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5176 "(expected 0x%08x, found 0x%08x)", \ 5177 current_config->name & (mask), \ 5178 pipe_config->name & (mask)); \ 5179 ret = false; \ 5180 } \ 5181 } while (0) 5182 5183 #define PIPE_CONF_CHECK_I(name) do { \ 5184 if (current_config->name != pipe_config->name) { \ 5185 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5186 __stringify(name) " is bool"); \ 5187 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5188 "(expected %i, found %i)", \ 5189 current_config->name, \ 5190 pipe_config->name); \ 5191 ret = false; \ 5192 } \ 5193 } while (0) 5194 5195 #define PIPE_CONF_CHECK_LLI(name) do { \ 5196 if (current_config->name != pipe_config->name) { \ 5197 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5198 "(expected %lli, found %lli)", \ 5199 current_config->name, \ 5200 pipe_config->name); \ 5201 ret = false; \ 5202 } \ 5203 } while (0) 5204 5205 #define PIPE_CONF_CHECK_BOOL(name) do { \ 5206 if (current_config->name != pipe_config->name) { \ 5207 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ 5208 __stringify(name) " is not bool"); \ 5209 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5210 "(expected %s, found %s)", \ 5211 str_yes_no(current_config->name), \ 5212 str_yes_no(pipe_config->name)); \ 5213 ret = false; \ 5214 } \ 5215 } while (0) 5216 5217 #define PIPE_CONF_CHECK_P(name) do { \ 5218 if (current_config->name != pipe_config->name) { \ 5219 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5220 "(expected %p, found %p)", \ 5221 current_config->name, \ 5222 pipe_config->name); \ 5223 ret = false; \ 5224 } \ 5225 } while (0) 5226 5227 #define PIPE_CONF_CHECK_M_N(name) do { \ 5228 if (!intel_compare_link_m_n(¤t_config->name, \ 5229 &pipe_config->name)) { \ 5230 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5231 "(expected tu %i data %i/%i link %i/%i, " \ 5232 "found tu %i, data %i/%i link %i/%i)", \ 5233 current_config->name.tu, \ 5234 current_config->name.data_m, \ 5235 current_config->name.data_n, \ 5236 current_config->name.link_m, \ 5237 current_config->name.link_n, \ 5238 pipe_config->name.tu, \ 5239 pipe_config->name.data_m, \ 5240 pipe_config->name.data_n, \ 5241 pipe_config->name.link_m, \ 5242 pipe_config->name.link_n); \ 5243 ret = false; \ 5244 } \ 5245 } while (0) 5246 5247 #define PIPE_CONF_CHECK_PLL(name) do { \ 5248 if (!intel_dpll_compare_hw_state(display, ¤t_config->name, \ 5249 &pipe_config->name)) { \ 5250 pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5251 ¤t_config->name, \ 5252 &pipe_config->name); \ 5253 ret = false; \ 5254 } \ 5255 } while (0) 5256 5257 #define PIPE_CONF_CHECK_PLL_CX0(name) do { \ 5258 if (!intel_cx0pll_compare_hw_state(¤t_config->name, \ 5259 &pipe_config->name)) { \ 5260 pipe_config_cx0pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5261 ¤t_config->name, \ 5262 &pipe_config->name); \ 5263 ret = false; \ 5264 } \ 5265 } while (0) 5266 5267 #define PIPE_CONF_CHECK_TIMINGS(name) do { \ 5268 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ 5269 PIPE_CONF_CHECK_I(name.crtc_htotal); \ 5270 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ 5271 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ 5272 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ 5273 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ 5274 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ 5275 if (!fastset || !allow_vblank_delay_fastset(current_config)) \ 5276 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ 5277 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ 5278 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ 5279 if (!fastset || !pipe_config->update_lrr) { \ 5280 PIPE_CONF_CHECK_I(name.crtc_vtotal); \ 5281 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ 5282 } \ 5283 } while (0) 5284 5285 #define PIPE_CONF_CHECK_RECT(name) do { \ 5286 PIPE_CONF_CHECK_I(name.x1); \ 5287 PIPE_CONF_CHECK_I(name.x2); \ 5288 PIPE_CONF_CHECK_I(name.y1); \ 5289 PIPE_CONF_CHECK_I(name.y2); \ 5290 } while (0) 5291 5292 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 5293 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 5294 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5295 "(%x) (expected %i, found %i)", \ 5296 (mask), \ 5297 current_config->name & (mask), \ 5298 pipe_config->name & (mask)); \ 5299 ret = false; \ 5300 } \ 5301 } while (0) 5302 5303 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 5304 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 5305 &pipe_config->infoframes.name)) { \ 5306 pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \ 5307 ¤t_config->infoframes.name, \ 5308 &pipe_config->infoframes.name); \ 5309 ret = false; \ 5310 } \ 5311 } while (0) 5312 5313 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 5314 if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 5315 &pipe_config->infoframes.name)) { \ 5316 pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5317 ¤t_config->infoframes.name, \ 5318 &pipe_config->infoframes.name); \ 5319 ret = false; \ 5320 } \ 5321 } while (0) 5322 5323 #define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \ 5324 if (!intel_compare_dp_as_sdp(¤t_config->infoframes.name, \ 5325 &pipe_config->infoframes.name)) { \ 5326 pipe_config_dp_as_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5327 ¤t_config->infoframes.name, \ 5328 &pipe_config->infoframes.name); \ 5329 ret = false; \ 5330 } \ 5331 } while (0) 5332 5333 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \ 5334 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ 5335 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ 5336 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ 5337 pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \ 5338 current_config->name, \ 5339 pipe_config->name, \ 5340 (len)); \ 5341 ret = false; \ 5342 } \ 5343 } while (0) 5344 5345 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \ 5346 if (current_config->gamma_mode == pipe_config->gamma_mode && \ 5347 !intel_color_lut_equal(current_config, \ 5348 current_config->lut, pipe_config->lut, \ 5349 is_pre_csc_lut)) { \ 5350 pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \ 5351 "hw_state doesn't match sw_state"); \ 5352 ret = false; \ 5353 } \ 5354 } while (0) 5355 5356 #define PIPE_CONF_CHECK_CSC(name) do { \ 5357 PIPE_CONF_CHECK_X(name.preoff[0]); \ 5358 PIPE_CONF_CHECK_X(name.preoff[1]); \ 5359 PIPE_CONF_CHECK_X(name.preoff[2]); \ 5360 PIPE_CONF_CHECK_X(name.coeff[0]); \ 5361 PIPE_CONF_CHECK_X(name.coeff[1]); \ 5362 PIPE_CONF_CHECK_X(name.coeff[2]); \ 5363 PIPE_CONF_CHECK_X(name.coeff[3]); \ 5364 PIPE_CONF_CHECK_X(name.coeff[4]); \ 5365 PIPE_CONF_CHECK_X(name.coeff[5]); \ 5366 PIPE_CONF_CHECK_X(name.coeff[6]); \ 5367 PIPE_CONF_CHECK_X(name.coeff[7]); \ 5368 PIPE_CONF_CHECK_X(name.coeff[8]); \ 5369 PIPE_CONF_CHECK_X(name.postoff[0]); \ 5370 PIPE_CONF_CHECK_X(name.postoff[1]); \ 5371 PIPE_CONF_CHECK_X(name.postoff[2]); \ 5372 } while (0) 5373 5374 #define PIPE_CONF_QUIRK(quirk) \ 5375 ((current_config->quirks | pipe_config->quirks) & (quirk)) 5376 5377 PIPE_CONF_CHECK_BOOL(hw.enable); 5378 PIPE_CONF_CHECK_BOOL(hw.active); 5379 5380 PIPE_CONF_CHECK_I(cpu_transcoder); 5381 PIPE_CONF_CHECK_I(mst_master_transcoder); 5382 5383 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 5384 PIPE_CONF_CHECK_I(fdi_lanes); 5385 PIPE_CONF_CHECK_M_N(fdi_m_n); 5386 5387 PIPE_CONF_CHECK_I(lane_count); 5388 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 5389 5390 if (HAS_DOUBLE_BUFFERED_M_N(display)) { 5391 if (!fastset || !pipe_config->update_m_n) 5392 PIPE_CONF_CHECK_M_N(dp_m_n); 5393 } else { 5394 PIPE_CONF_CHECK_M_N(dp_m_n); 5395 PIPE_CONF_CHECK_M_N(dp_m2_n2); 5396 } 5397 5398 PIPE_CONF_CHECK_X(output_types); 5399 5400 PIPE_CONF_CHECK_I(framestart_delay); 5401 PIPE_CONF_CHECK_I(msa_timing_delay); 5402 5403 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); 5404 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); 5405 5406 PIPE_CONF_CHECK_I(pixel_multiplier); 5407 5408 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5409 DRM_MODE_FLAG_INTERLACE); 5410 5411 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 5412 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5413 DRM_MODE_FLAG_PHSYNC); 5414 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5415 DRM_MODE_FLAG_NHSYNC); 5416 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5417 DRM_MODE_FLAG_PVSYNC); 5418 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5419 DRM_MODE_FLAG_NVSYNC); 5420 } 5421 5422 PIPE_CONF_CHECK_I(output_format); 5423 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 5424 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 5425 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5426 PIPE_CONF_CHECK_BOOL(limited_color_range); 5427 5428 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 5429 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 5430 PIPE_CONF_CHECK_BOOL(has_infoframe); 5431 PIPE_CONF_CHECK_BOOL(enhanced_framing); 5432 PIPE_CONF_CHECK_BOOL(fec_enable); 5433 5434 if (!fastset) { 5435 PIPE_CONF_CHECK_BOOL(has_audio); 5436 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5437 } 5438 5439 PIPE_CONF_CHECK_X(gmch_pfit.control); 5440 /* pfit ratios are autocomputed by the hw on gen4+ */ 5441 if (DISPLAY_VER(dev_priv) < 4) 5442 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 5443 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 5444 5445 /* 5446 * Changing the EDP transcoder input mux 5447 * (A_ONOFF vs. A_ON) requires a full modeset. 5448 */ 5449 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 5450 5451 if (!fastset) { 5452 PIPE_CONF_CHECK_RECT(pipe_src); 5453 5454 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 5455 PIPE_CONF_CHECK_RECT(pch_pfit.dst); 5456 5457 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 5458 PIPE_CONF_CHECK_I(pixel_rate); 5459 5460 PIPE_CONF_CHECK_X(gamma_mode); 5461 if (IS_CHERRYVIEW(dev_priv)) 5462 PIPE_CONF_CHECK_X(cgm_mode); 5463 else 5464 PIPE_CONF_CHECK_X(csc_mode); 5465 PIPE_CONF_CHECK_BOOL(gamma_enable); 5466 PIPE_CONF_CHECK_BOOL(csc_enable); 5467 PIPE_CONF_CHECK_BOOL(wgc_enable); 5468 5469 PIPE_CONF_CHECK_I(linetime); 5470 PIPE_CONF_CHECK_I(ips_linetime); 5471 5472 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); 5473 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); 5474 5475 PIPE_CONF_CHECK_CSC(csc); 5476 PIPE_CONF_CHECK_CSC(output_csc); 5477 } 5478 5479 PIPE_CONF_CHECK_BOOL(double_wide); 5480 5481 if (dev_priv->display.dpll.mgr) 5482 PIPE_CONF_CHECK_P(shared_dpll); 5483 5484 /* FIXME convert everything over the dpll_mgr */ 5485 if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv)) 5486 PIPE_CONF_CHECK_PLL(dpll_hw_state); 5487 5488 /* FIXME convert MTL+ platforms over to dpll_mgr */ 5489 if (DISPLAY_VER(dev_priv) >= 14) 5490 PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll); 5491 5492 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 5493 PIPE_CONF_CHECK_X(dsi_pll.div); 5494 5495 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 5496 PIPE_CONF_CHECK_I(pipe_bpp); 5497 5498 if (!fastset || !pipe_config->update_m_n) { 5499 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); 5500 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); 5501 } 5502 PIPE_CONF_CHECK_I(port_clock); 5503 5504 PIPE_CONF_CHECK_I(min_voltage_level); 5505 5506 if (current_config->has_psr || pipe_config->has_psr) 5507 exclude_infoframes |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 5508 5509 if (current_config->vrr.enable || pipe_config->vrr.enable) 5510 exclude_infoframes |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC); 5511 5512 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, ~exclude_infoframes); 5513 PIPE_CONF_CHECK_X(infoframes.gcp); 5514 PIPE_CONF_CHECK_INFOFRAME(avi); 5515 PIPE_CONF_CHECK_INFOFRAME(spd); 5516 PIPE_CONF_CHECK_INFOFRAME(hdmi); 5517 if (!fastset) { 5518 PIPE_CONF_CHECK_INFOFRAME(drm); 5519 PIPE_CONF_CHECK_DP_AS_SDP(as_sdp); 5520 } 5521 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 5522 5523 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 5524 PIPE_CONF_CHECK_I(master_transcoder); 5525 PIPE_CONF_CHECK_X(joiner_pipes); 5526 5527 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable); 5528 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb); 5529 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422); 5530 PIPE_CONF_CHECK_BOOL(dsc.config.native_422); 5531 PIPE_CONF_CHECK_BOOL(dsc.config.native_420); 5532 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable); 5533 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth); 5534 PIPE_CONF_CHECK_I(dsc.config.bits_per_component); 5535 PIPE_CONF_CHECK_I(dsc.config.pic_width); 5536 PIPE_CONF_CHECK_I(dsc.config.pic_height); 5537 PIPE_CONF_CHECK_I(dsc.config.slice_width); 5538 PIPE_CONF_CHECK_I(dsc.config.slice_height); 5539 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay); 5540 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay); 5541 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval); 5542 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval); 5543 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value); 5544 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset); 5545 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp); 5546 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp); 5547 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset); 5548 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset); 5549 PIPE_CONF_CHECK_I(dsc.config.initial_offset); 5550 PIPE_CONF_CHECK_I(dsc.config.final_offset); 5551 PIPE_CONF_CHECK_I(dsc.config.rc_model_size); 5552 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0); 5553 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1); 5554 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size); 5555 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset); 5556 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset); 5557 5558 PIPE_CONF_CHECK_BOOL(dsc.compression_enable); 5559 PIPE_CONF_CHECK_I(dsc.num_streams); 5560 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16); 5561 5562 PIPE_CONF_CHECK_BOOL(splitter.enable); 5563 PIPE_CONF_CHECK_I(splitter.link_count); 5564 PIPE_CONF_CHECK_I(splitter.pixel_overlap); 5565 5566 if (!fastset) { 5567 PIPE_CONF_CHECK_BOOL(vrr.enable); 5568 PIPE_CONF_CHECK_I(vrr.vmin); 5569 PIPE_CONF_CHECK_I(vrr.vmax); 5570 PIPE_CONF_CHECK_I(vrr.flipline); 5571 PIPE_CONF_CHECK_I(vrr.pipeline_full); 5572 PIPE_CONF_CHECK_I(vrr.guardband); 5573 PIPE_CONF_CHECK_I(vrr.vsync_start); 5574 PIPE_CONF_CHECK_I(vrr.vsync_end); 5575 PIPE_CONF_CHECK_LLI(cmrr.cmrr_m); 5576 PIPE_CONF_CHECK_LLI(cmrr.cmrr_n); 5577 PIPE_CONF_CHECK_BOOL(cmrr.enable); 5578 } 5579 5580 #undef PIPE_CONF_CHECK_X 5581 #undef PIPE_CONF_CHECK_I 5582 #undef PIPE_CONF_CHECK_LLI 5583 #undef PIPE_CONF_CHECK_BOOL 5584 #undef PIPE_CONF_CHECK_P 5585 #undef PIPE_CONF_CHECK_FLAGS 5586 #undef PIPE_CONF_CHECK_COLOR_LUT 5587 #undef PIPE_CONF_CHECK_TIMINGS 5588 #undef PIPE_CONF_CHECK_RECT 5589 #undef PIPE_CONF_QUIRK 5590 5591 return ret; 5592 } 5593 5594 static void 5595 intel_verify_planes(struct intel_atomic_state *state) 5596 { 5597 struct intel_plane *plane; 5598 const struct intel_plane_state *plane_state; 5599 int i; 5600 5601 for_each_new_intel_plane_in_state(state, plane, 5602 plane_state, i) 5603 assert_plane(plane, plane_state->is_y_plane || 5604 plane_state->uapi.visible); 5605 } 5606 5607 static int intel_modeset_pipe(struct intel_atomic_state *state, 5608 struct intel_crtc_state *crtc_state, 5609 const char *reason) 5610 { 5611 struct drm_i915_private *i915 = to_i915(state->base.dev); 5612 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5613 int ret; 5614 5615 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n", 5616 crtc->base.base.id, crtc->base.name, reason); 5617 5618 ret = drm_atomic_add_affected_connectors(&state->base, 5619 &crtc->base); 5620 if (ret) 5621 return ret; 5622 5623 ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc); 5624 if (ret) 5625 return ret; 5626 5627 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); 5628 if (ret) 5629 return ret; 5630 5631 ret = intel_atomic_add_affected_planes(state, crtc); 5632 if (ret) 5633 return ret; 5634 5635 crtc_state->uapi.mode_changed = true; 5636 5637 return 0; 5638 } 5639 5640 /** 5641 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes 5642 * @state: intel atomic state 5643 * @reason: the reason for the full modeset 5644 * @mask: mask of pipes to modeset 5645 * 5646 * Add pipes in @mask to @state and force a full modeset on the enabled ones 5647 * due to the description in @reason. 5648 * This function can be called only before new plane states are computed. 5649 * 5650 * Returns 0 in case of success, negative error code otherwise. 5651 */ 5652 int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state, 5653 const char *reason, u8 mask) 5654 { 5655 struct drm_i915_private *i915 = to_i915(state->base.dev); 5656 struct intel_crtc *crtc; 5657 5658 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) { 5659 struct intel_crtc_state *crtc_state; 5660 int ret; 5661 5662 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5663 if (IS_ERR(crtc_state)) 5664 return PTR_ERR(crtc_state); 5665 5666 if (!crtc_state->hw.enable || 5667 intel_crtc_needs_modeset(crtc_state)) 5668 continue; 5669 5670 ret = intel_modeset_pipe(state, crtc_state, reason); 5671 if (ret) 5672 return ret; 5673 } 5674 5675 return 0; 5676 } 5677 5678 static void 5679 intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state) 5680 { 5681 crtc_state->uapi.mode_changed = true; 5682 5683 crtc_state->update_pipe = false; 5684 crtc_state->update_m_n = false; 5685 crtc_state->update_lrr = false; 5686 } 5687 5688 /** 5689 * intel_modeset_all_pipes_late - force a full modeset on all pipes 5690 * @state: intel atomic state 5691 * @reason: the reason for the full modeset 5692 * 5693 * Add all pipes to @state and force a full modeset on the active ones due to 5694 * the description in @reason. 5695 * This function can be called only after new plane states are computed already. 5696 * 5697 * Returns 0 in case of success, negative error code otherwise. 5698 */ 5699 int intel_modeset_all_pipes_late(struct intel_atomic_state *state, 5700 const char *reason) 5701 { 5702 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5703 struct intel_crtc *crtc; 5704 5705 for_each_intel_crtc(&dev_priv->drm, crtc) { 5706 struct intel_crtc_state *crtc_state; 5707 int ret; 5708 5709 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5710 if (IS_ERR(crtc_state)) 5711 return PTR_ERR(crtc_state); 5712 5713 if (!crtc_state->hw.active || 5714 intel_crtc_needs_modeset(crtc_state)) 5715 continue; 5716 5717 ret = intel_modeset_pipe(state, crtc_state, reason); 5718 if (ret) 5719 return ret; 5720 5721 intel_crtc_flag_modeset(crtc_state); 5722 5723 crtc_state->update_planes |= crtc_state->active_planes; 5724 crtc_state->async_flip_planes = 0; 5725 crtc_state->do_async_flip = false; 5726 } 5727 5728 return 0; 5729 } 5730 5731 int intel_modeset_commit_pipes(struct drm_i915_private *i915, 5732 u8 pipe_mask, 5733 struct drm_modeset_acquire_ctx *ctx) 5734 { 5735 struct drm_atomic_state *state; 5736 struct intel_crtc *crtc; 5737 int ret; 5738 5739 state = drm_atomic_state_alloc(&i915->drm); 5740 if (!state) 5741 return -ENOMEM; 5742 5743 state->acquire_ctx = ctx; 5744 to_intel_atomic_state(state)->internal = true; 5745 5746 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { 5747 struct intel_crtc_state *crtc_state = 5748 intel_atomic_get_crtc_state(state, crtc); 5749 5750 if (IS_ERR(crtc_state)) { 5751 ret = PTR_ERR(crtc_state); 5752 goto out; 5753 } 5754 5755 crtc_state->uapi.connectors_changed = true; 5756 } 5757 5758 ret = drm_atomic_commit(state); 5759 out: 5760 drm_atomic_state_put(state); 5761 5762 return ret; 5763 } 5764 5765 /* 5766 * This implements the workaround described in the "notes" section of the mode 5767 * set sequence documentation. When going from no pipes or single pipe to 5768 * multiple pipes, and planes are enabled after the pipe, we need to wait at 5769 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 5770 */ 5771 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 5772 { 5773 struct intel_crtc_state *crtc_state; 5774 struct intel_crtc *crtc; 5775 struct intel_crtc_state *first_crtc_state = NULL; 5776 struct intel_crtc_state *other_crtc_state = NULL; 5777 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 5778 int i; 5779 5780 /* look at all crtc's that are going to be enabled in during modeset */ 5781 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5782 if (!crtc_state->hw.active || 5783 !intel_crtc_needs_modeset(crtc_state)) 5784 continue; 5785 5786 if (first_crtc_state) { 5787 other_crtc_state = crtc_state; 5788 break; 5789 } else { 5790 first_crtc_state = crtc_state; 5791 first_pipe = crtc->pipe; 5792 } 5793 } 5794 5795 /* No workaround needed? */ 5796 if (!first_crtc_state) 5797 return 0; 5798 5799 /* w/a possibly needed, check how many crtc's are already enabled. */ 5800 for_each_intel_crtc(state->base.dev, crtc) { 5801 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5802 if (IS_ERR(crtc_state)) 5803 return PTR_ERR(crtc_state); 5804 5805 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 5806 5807 if (!crtc_state->hw.active || 5808 intel_crtc_needs_modeset(crtc_state)) 5809 continue; 5810 5811 /* 2 or more enabled crtcs means no need for w/a */ 5812 if (enabled_pipe != INVALID_PIPE) 5813 return 0; 5814 5815 enabled_pipe = crtc->pipe; 5816 } 5817 5818 if (enabled_pipe != INVALID_PIPE) 5819 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 5820 else if (other_crtc_state) 5821 other_crtc_state->hsw_workaround_pipe = first_pipe; 5822 5823 return 0; 5824 } 5825 5826 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 5827 u8 active_pipes) 5828 { 5829 const struct intel_crtc_state *crtc_state; 5830 struct intel_crtc *crtc; 5831 int i; 5832 5833 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5834 if (crtc_state->hw.active) 5835 active_pipes |= BIT(crtc->pipe); 5836 else 5837 active_pipes &= ~BIT(crtc->pipe); 5838 } 5839 5840 return active_pipes; 5841 } 5842 5843 static int intel_modeset_checks(struct intel_atomic_state *state) 5844 { 5845 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5846 5847 state->modeset = true; 5848 5849 if (IS_HASWELL(dev_priv)) 5850 return hsw_mode_set_planes_workaround(state); 5851 5852 return 0; 5853 } 5854 5855 static bool lrr_params_changed(const struct drm_display_mode *old_adjusted_mode, 5856 const struct drm_display_mode *new_adjusted_mode) 5857 { 5858 return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start || 5859 old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end || 5860 old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal; 5861 } 5862 5863 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 5864 struct intel_crtc_state *new_crtc_state) 5865 { 5866 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 5867 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5868 5869 /* only allow LRR when the timings stay within the VRR range */ 5870 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range) 5871 new_crtc_state->update_lrr = false; 5872 5873 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) { 5874 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n", 5875 crtc->base.base.id, crtc->base.name); 5876 } else { 5877 if (allow_vblank_delay_fastset(old_crtc_state)) 5878 new_crtc_state->update_lrr = true; 5879 new_crtc_state->uapi.mode_changed = false; 5880 } 5881 5882 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n, 5883 &new_crtc_state->dp_m_n)) 5884 new_crtc_state->update_m_n = false; 5885 5886 if (!lrr_params_changed(&old_crtc_state->hw.adjusted_mode, 5887 &new_crtc_state->hw.adjusted_mode)) 5888 new_crtc_state->update_lrr = false; 5889 5890 if (intel_crtc_needs_modeset(new_crtc_state)) 5891 intel_crtc_flag_modeset(new_crtc_state); 5892 else 5893 new_crtc_state->update_pipe = true; 5894 } 5895 5896 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 5897 { 5898 struct intel_crtc_state __maybe_unused *crtc_state; 5899 struct intel_crtc *crtc; 5900 int i; 5901 5902 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5903 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5904 int ret; 5905 5906 ret = intel_crtc_atomic_check(state, crtc); 5907 if (ret) { 5908 drm_dbg_atomic(&i915->drm, 5909 "[CRTC:%d:%s] atomic driver check failed\n", 5910 crtc->base.base.id, crtc->base.name); 5911 return ret; 5912 } 5913 } 5914 5915 return 0; 5916 } 5917 5918 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 5919 u8 transcoders) 5920 { 5921 const struct intel_crtc_state *new_crtc_state; 5922 struct intel_crtc *crtc; 5923 int i; 5924 5925 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5926 if (new_crtc_state->hw.enable && 5927 transcoders & BIT(new_crtc_state->cpu_transcoder) && 5928 intel_crtc_needs_modeset(new_crtc_state)) 5929 return true; 5930 } 5931 5932 return false; 5933 } 5934 5935 static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 5936 u8 pipes) 5937 { 5938 const struct intel_crtc_state *new_crtc_state; 5939 struct intel_crtc *crtc; 5940 int i; 5941 5942 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5943 if (new_crtc_state->hw.enable && 5944 pipes & BIT(crtc->pipe) && 5945 intel_crtc_needs_modeset(new_crtc_state)) 5946 return true; 5947 } 5948 5949 return false; 5950 } 5951 5952 static int intel_atomic_check_joiner(struct intel_atomic_state *state, 5953 struct intel_crtc *primary_crtc) 5954 { 5955 struct drm_i915_private *i915 = to_i915(state->base.dev); 5956 struct intel_crtc_state *primary_crtc_state = 5957 intel_atomic_get_new_crtc_state(state, primary_crtc); 5958 struct intel_crtc *secondary_crtc; 5959 5960 if (!primary_crtc_state->joiner_pipes) 5961 return 0; 5962 5963 /* sanity check */ 5964 if (drm_WARN_ON(&i915->drm, 5965 primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state))) 5966 return -EINVAL; 5967 5968 if (primary_crtc_state->joiner_pipes & ~joiner_pipes(i915)) { 5969 drm_dbg_kms(&i915->drm, 5970 "[CRTC:%d:%s] Cannot act as joiner primary " 5971 "(need 0x%x as pipes, only 0x%x possible)\n", 5972 primary_crtc->base.base.id, primary_crtc->base.name, 5973 primary_crtc_state->joiner_pipes, joiner_pipes(i915)); 5974 return -EINVAL; 5975 } 5976 5977 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc, 5978 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 5979 struct intel_crtc_state *secondary_crtc_state; 5980 int ret; 5981 5982 secondary_crtc_state = intel_atomic_get_crtc_state(&state->base, secondary_crtc); 5983 if (IS_ERR(secondary_crtc_state)) 5984 return PTR_ERR(secondary_crtc_state); 5985 5986 /* primary being enabled, secondary was already configured? */ 5987 if (secondary_crtc_state->uapi.enable) { 5988 drm_dbg_kms(&i915->drm, 5989 "[CRTC:%d:%s] secondary is enabled as normal CRTC, but " 5990 "[CRTC:%d:%s] claiming this CRTC for joiner.\n", 5991 secondary_crtc->base.base.id, secondary_crtc->base.name, 5992 primary_crtc->base.base.id, primary_crtc->base.name); 5993 return -EINVAL; 5994 } 5995 5996 /* 5997 * The state copy logic assumes the primary crtc gets processed 5998 * before the secondary crtc during the main compute_config loop. 5999 * This works because the crtcs are created in pipe order, 6000 * and the hardware requires primary pipe < secondary pipe as well. 6001 * Should that change we need to rethink the logic. 6002 */ 6003 if (WARN_ON(drm_crtc_index(&primary_crtc->base) > 6004 drm_crtc_index(&secondary_crtc->base))) 6005 return -EINVAL; 6006 6007 drm_dbg_kms(&i915->drm, 6008 "[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n", 6009 secondary_crtc->base.base.id, secondary_crtc->base.name, 6010 primary_crtc->base.base.id, primary_crtc->base.name); 6011 6012 secondary_crtc_state->joiner_pipes = 6013 primary_crtc_state->joiner_pipes; 6014 6015 ret = copy_joiner_crtc_state_modeset(state, secondary_crtc); 6016 if (ret) 6017 return ret; 6018 } 6019 6020 return 0; 6021 } 6022 6023 static void kill_joiner_secondaries(struct intel_atomic_state *state, 6024 struct intel_crtc *primary_crtc) 6025 { 6026 struct drm_i915_private *i915 = to_i915(state->base.dev); 6027 struct intel_crtc_state *primary_crtc_state = 6028 intel_atomic_get_new_crtc_state(state, primary_crtc); 6029 struct intel_crtc *secondary_crtc; 6030 6031 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc, 6032 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 6033 struct intel_crtc_state *secondary_crtc_state = 6034 intel_atomic_get_new_crtc_state(state, secondary_crtc); 6035 6036 secondary_crtc_state->joiner_pipes = 0; 6037 6038 intel_crtc_copy_uapi_to_hw_state_modeset(state, secondary_crtc); 6039 } 6040 6041 primary_crtc_state->joiner_pipes = 0; 6042 } 6043 6044 /** 6045 * DOC: asynchronous flip implementation 6046 * 6047 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 6048 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 6049 * Correspondingly, support is currently added for primary plane only. 6050 * 6051 * Async flip can only change the plane surface address, so anything else 6052 * changing is rejected from the intel_async_flip_check_hw() function. 6053 * Once this check is cleared, flip done interrupt is enabled using 6054 * the intel_crtc_enable_flip_done() function. 6055 * 6056 * As soon as the surface address register is written, flip done interrupt is 6057 * generated and the requested events are sent to the userspace in the interrupt 6058 * handler itself. The timestamp and sequence sent during the flip done event 6059 * correspond to the last vblank and have no relation to the actual time when 6060 * the flip done event was sent. 6061 */ 6062 static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 6063 struct intel_crtc *crtc) 6064 { 6065 struct drm_i915_private *i915 = to_i915(state->base.dev); 6066 const struct intel_crtc_state *new_crtc_state = 6067 intel_atomic_get_new_crtc_state(state, crtc); 6068 const struct intel_plane_state *old_plane_state; 6069 struct intel_plane_state *new_plane_state; 6070 struct intel_plane *plane; 6071 int i; 6072 6073 if (!new_crtc_state->uapi.async_flip) 6074 return 0; 6075 6076 if (!new_crtc_state->uapi.active) { 6077 drm_dbg_kms(&i915->drm, 6078 "[CRTC:%d:%s] not active\n", 6079 crtc->base.base.id, crtc->base.name); 6080 return -EINVAL; 6081 } 6082 6083 if (intel_crtc_needs_modeset(new_crtc_state)) { 6084 drm_dbg_kms(&i915->drm, 6085 "[CRTC:%d:%s] modeset required\n", 6086 crtc->base.base.id, crtc->base.name); 6087 return -EINVAL; 6088 } 6089 6090 /* 6091 * FIXME: joiner+async flip is busted currently. 6092 * Remove this check once the issues are fixed. 6093 */ 6094 if (new_crtc_state->joiner_pipes) { 6095 drm_dbg_kms(&i915->drm, 6096 "[CRTC:%d:%s] async flip disallowed with joiner\n", 6097 crtc->base.base.id, crtc->base.name); 6098 return -EINVAL; 6099 } 6100 6101 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6102 new_plane_state, i) { 6103 if (plane->pipe != crtc->pipe) 6104 continue; 6105 6106 /* 6107 * TODO: Async flip is only supported through the page flip IOCTL 6108 * as of now. So support currently added for primary plane only. 6109 * Support for other planes on platforms on which supports 6110 * this(vlv/chv and icl+) should be added when async flip is 6111 * enabled in the atomic IOCTL path. 6112 */ 6113 if (!plane->async_flip) { 6114 drm_dbg_kms(&i915->drm, 6115 "[PLANE:%d:%s] async flip not supported\n", 6116 plane->base.base.id, plane->base.name); 6117 return -EINVAL; 6118 } 6119 6120 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 6121 drm_dbg_kms(&i915->drm, 6122 "[PLANE:%d:%s] no old or new framebuffer\n", 6123 plane->base.base.id, plane->base.name); 6124 return -EINVAL; 6125 } 6126 } 6127 6128 return 0; 6129 } 6130 6131 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 6132 { 6133 struct drm_i915_private *i915 = to_i915(state->base.dev); 6134 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6135 const struct intel_plane_state *new_plane_state, *old_plane_state; 6136 struct intel_plane *plane; 6137 int i; 6138 6139 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6140 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6141 6142 if (!new_crtc_state->uapi.async_flip) 6143 return 0; 6144 6145 if (!new_crtc_state->hw.active) { 6146 drm_dbg_kms(&i915->drm, 6147 "[CRTC:%d:%s] not active\n", 6148 crtc->base.base.id, crtc->base.name); 6149 return -EINVAL; 6150 } 6151 6152 if (intel_crtc_needs_modeset(new_crtc_state)) { 6153 drm_dbg_kms(&i915->drm, 6154 "[CRTC:%d:%s] modeset required\n", 6155 crtc->base.base.id, crtc->base.name); 6156 return -EINVAL; 6157 } 6158 6159 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 6160 drm_dbg_kms(&i915->drm, 6161 "[CRTC:%d:%s] Active planes cannot be in async flip\n", 6162 crtc->base.base.id, crtc->base.name); 6163 return -EINVAL; 6164 } 6165 6166 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6167 new_plane_state, i) { 6168 if (plane->pipe != crtc->pipe) 6169 continue; 6170 6171 /* 6172 * Only async flip capable planes should be in the state 6173 * if we're really about to ask the hardware to perform 6174 * an async flip. We should never get this far otherwise. 6175 */ 6176 if (drm_WARN_ON(&i915->drm, 6177 new_crtc_state->do_async_flip && !plane->async_flip)) 6178 return -EINVAL; 6179 6180 /* 6181 * Only check async flip capable planes other planes 6182 * may be involved in the initial commit due to 6183 * the wm0/ddb optimization. 6184 * 6185 * TODO maybe should track which planes actually 6186 * were requested to do the async flip... 6187 */ 6188 if (!plane->async_flip) 6189 continue; 6190 6191 if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->modifier)) { 6192 drm_dbg_kms(&i915->drm, 6193 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n", 6194 plane->base.base.id, plane->base.name, 6195 new_plane_state->hw.fb->modifier); 6196 return -EINVAL; 6197 } 6198 6199 if (intel_format_info_is_yuv_semiplanar(new_plane_state->hw.fb->format, 6200 new_plane_state->hw.fb->modifier)) { 6201 drm_dbg_kms(&i915->drm, 6202 "[PLANE:%d:%s] Planar formats do not support async flips\n", 6203 plane->base.base.id, plane->base.name); 6204 return -EINVAL; 6205 } 6206 6207 /* 6208 * We turn the first async flip request into a sync flip 6209 * so that we can reconfigure the plane (eg. change modifier). 6210 */ 6211 if (!new_crtc_state->do_async_flip) 6212 continue; 6213 6214 if (old_plane_state->view.color_plane[0].mapping_stride != 6215 new_plane_state->view.color_plane[0].mapping_stride) { 6216 drm_dbg_kms(&i915->drm, 6217 "[PLANE:%d:%s] Stride cannot be changed in async flip\n", 6218 plane->base.base.id, plane->base.name); 6219 return -EINVAL; 6220 } 6221 6222 if (old_plane_state->hw.fb->modifier != 6223 new_plane_state->hw.fb->modifier) { 6224 drm_dbg_kms(&i915->drm, 6225 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", 6226 plane->base.base.id, plane->base.name); 6227 return -EINVAL; 6228 } 6229 6230 if (old_plane_state->hw.fb->format != 6231 new_plane_state->hw.fb->format) { 6232 drm_dbg_kms(&i915->drm, 6233 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", 6234 plane->base.base.id, plane->base.name); 6235 return -EINVAL; 6236 } 6237 6238 if (old_plane_state->hw.rotation != 6239 new_plane_state->hw.rotation) { 6240 drm_dbg_kms(&i915->drm, 6241 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", 6242 plane->base.base.id, plane->base.name); 6243 return -EINVAL; 6244 } 6245 6246 if (skl_plane_aux_dist(old_plane_state, 0) != 6247 skl_plane_aux_dist(new_plane_state, 0)) { 6248 drm_dbg_kms(&i915->drm, 6249 "[PLANE:%d:%s] AUX_DIST cannot be changed in async flip\n", 6250 plane->base.base.id, plane->base.name); 6251 return -EINVAL; 6252 } 6253 6254 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 6255 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 6256 drm_dbg_kms(&i915->drm, 6257 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", 6258 plane->base.base.id, plane->base.name); 6259 return -EINVAL; 6260 } 6261 6262 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 6263 drm_dbg_kms(&i915->drm, 6264 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", 6265 plane->base.base.id, plane->base.name); 6266 return -EINVAL; 6267 } 6268 6269 if (old_plane_state->hw.pixel_blend_mode != 6270 new_plane_state->hw.pixel_blend_mode) { 6271 drm_dbg_kms(&i915->drm, 6272 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", 6273 plane->base.base.id, plane->base.name); 6274 return -EINVAL; 6275 } 6276 6277 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 6278 drm_dbg_kms(&i915->drm, 6279 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", 6280 plane->base.base.id, plane->base.name); 6281 return -EINVAL; 6282 } 6283 6284 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 6285 drm_dbg_kms(&i915->drm, 6286 "[PLANE:%d:%s] Color range cannot be changed in async flip\n", 6287 plane->base.base.id, plane->base.name); 6288 return -EINVAL; 6289 } 6290 6291 /* plane decryption is allow to change only in synchronous flips */ 6292 if (old_plane_state->decrypt != new_plane_state->decrypt) { 6293 drm_dbg_kms(&i915->drm, 6294 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", 6295 plane->base.base.id, plane->base.name); 6296 return -EINVAL; 6297 } 6298 } 6299 6300 return 0; 6301 } 6302 6303 static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state) 6304 { 6305 struct drm_i915_private *i915 = to_i915(state->base.dev); 6306 const struct intel_plane_state *plane_state; 6307 struct intel_crtc_state *crtc_state; 6308 struct intel_plane *plane; 6309 struct intel_crtc *crtc; 6310 u8 affected_pipes = 0; 6311 u8 modeset_pipes = 0; 6312 int i; 6313 6314 /* 6315 * Any plane which is in use by the joiner needs its crtc. 6316 * Pull those in first as this will not have happened yet 6317 * if the plane remains disabled according to uapi. 6318 */ 6319 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 6320 crtc = to_intel_crtc(plane_state->hw.crtc); 6321 if (!crtc) 6322 continue; 6323 6324 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6325 if (IS_ERR(crtc_state)) 6326 return PTR_ERR(crtc_state); 6327 } 6328 6329 /* Now pull in all joined crtcs */ 6330 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6331 affected_pipes |= crtc_state->joiner_pipes; 6332 if (intel_crtc_needs_modeset(crtc_state)) 6333 modeset_pipes |= crtc_state->joiner_pipes; 6334 } 6335 6336 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { 6337 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6338 if (IS_ERR(crtc_state)) 6339 return PTR_ERR(crtc_state); 6340 } 6341 6342 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { 6343 int ret; 6344 6345 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6346 6347 crtc_state->uapi.mode_changed = true; 6348 6349 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6350 if (ret) 6351 return ret; 6352 6353 ret = intel_atomic_add_affected_planes(state, crtc); 6354 if (ret) 6355 return ret; 6356 } 6357 6358 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6359 /* Kill old joiner link, we may re-establish afterwards */ 6360 if (intel_crtc_needs_modeset(crtc_state) && 6361 intel_crtc_is_joiner_primary(crtc_state)) 6362 kill_joiner_secondaries(state, crtc); 6363 } 6364 6365 return 0; 6366 } 6367 6368 static int intel_atomic_check_config(struct intel_atomic_state *state, 6369 struct intel_link_bw_limits *limits, 6370 enum pipe *failed_pipe) 6371 { 6372 struct drm_i915_private *i915 = to_i915(state->base.dev); 6373 struct intel_crtc_state *new_crtc_state; 6374 struct intel_crtc *crtc; 6375 int ret; 6376 int i; 6377 6378 *failed_pipe = INVALID_PIPE; 6379 6380 ret = intel_joiner_add_affected_crtcs(state); 6381 if (ret) 6382 return ret; 6383 6384 ret = intel_fdi_add_affected_crtcs(state); 6385 if (ret) 6386 return ret; 6387 6388 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6389 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6390 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 6391 copy_joiner_crtc_state_nomodeset(state, crtc); 6392 else 6393 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 6394 continue; 6395 } 6396 6397 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6398 continue; 6399 6400 ret = intel_crtc_prepare_cleared_state(state, crtc); 6401 if (ret) 6402 goto fail; 6403 6404 if (!new_crtc_state->hw.enable) 6405 continue; 6406 6407 ret = intel_modeset_pipe_config(state, crtc, limits); 6408 if (ret) 6409 goto fail; 6410 } 6411 6412 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6413 if (!intel_crtc_needs_modeset(new_crtc_state)) 6414 continue; 6415 6416 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6417 continue; 6418 6419 if (!new_crtc_state->hw.enable) 6420 continue; 6421 6422 ret = intel_modeset_pipe_config_late(state, crtc); 6423 if (ret) 6424 goto fail; 6425 } 6426 6427 fail: 6428 if (ret) 6429 *failed_pipe = crtc->pipe; 6430 6431 return ret; 6432 } 6433 6434 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state) 6435 { 6436 struct intel_link_bw_limits new_limits; 6437 struct intel_link_bw_limits old_limits; 6438 int ret; 6439 6440 intel_link_bw_init_limits(state, &new_limits); 6441 old_limits = new_limits; 6442 6443 while (true) { 6444 enum pipe failed_pipe; 6445 6446 ret = intel_atomic_check_config(state, &new_limits, 6447 &failed_pipe); 6448 if (ret) { 6449 /* 6450 * The bpp limit for a pipe is below the minimum it supports, set the 6451 * limit to the minimum and recalculate the config. 6452 */ 6453 if (ret == -EINVAL && 6454 intel_link_bw_set_bpp_limit_for_pipe(state, 6455 &old_limits, 6456 &new_limits, 6457 failed_pipe)) 6458 continue; 6459 6460 break; 6461 } 6462 6463 old_limits = new_limits; 6464 6465 ret = intel_link_bw_atomic_check(state, &new_limits); 6466 if (ret != -EAGAIN) 6467 break; 6468 } 6469 6470 return ret; 6471 } 6472 /** 6473 * intel_atomic_check - validate state object 6474 * @dev: drm device 6475 * @_state: state to validate 6476 */ 6477 int intel_atomic_check(struct drm_device *dev, 6478 struct drm_atomic_state *_state) 6479 { 6480 struct intel_display *display = to_intel_display(dev); 6481 struct drm_i915_private *dev_priv = to_i915(dev); 6482 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6483 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6484 struct intel_crtc *crtc; 6485 int ret, i; 6486 bool any_ms = false; 6487 6488 if (!intel_display_driver_check_access(display)) 6489 return -ENODEV; 6490 6491 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6492 new_crtc_state, i) { 6493 /* 6494 * crtc's state no longer considered to be inherited 6495 * after the first userspace/client initiated commit. 6496 */ 6497 if (!state->internal) 6498 new_crtc_state->inherited = false; 6499 6500 if (new_crtc_state->inherited != old_crtc_state->inherited) 6501 new_crtc_state->uapi.mode_changed = true; 6502 6503 if (new_crtc_state->uapi.scaling_filter != 6504 old_crtc_state->uapi.scaling_filter) 6505 new_crtc_state->uapi.mode_changed = true; 6506 } 6507 6508 intel_vrr_check_modeset(state); 6509 6510 ret = drm_atomic_helper_check_modeset(dev, &state->base); 6511 if (ret) 6512 goto fail; 6513 6514 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6515 ret = intel_async_flip_check_uapi(state, crtc); 6516 if (ret) 6517 return ret; 6518 } 6519 6520 ret = intel_atomic_check_config_and_link(state); 6521 if (ret) 6522 goto fail; 6523 6524 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6525 if (!intel_crtc_needs_modeset(new_crtc_state)) 6526 continue; 6527 6528 if (intel_crtc_is_joiner_secondary(new_crtc_state)) { 6529 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable); 6530 continue; 6531 } 6532 6533 ret = intel_atomic_check_joiner(state, crtc); 6534 if (ret) 6535 goto fail; 6536 } 6537 6538 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6539 new_crtc_state, i) { 6540 if (!intel_crtc_needs_modeset(new_crtc_state)) 6541 continue; 6542 6543 intel_joiner_adjust_pipe_src(new_crtc_state); 6544 6545 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 6546 } 6547 6548 /** 6549 * Check if fastset is allowed by external dependencies like other 6550 * pipes and transcoders. 6551 * 6552 * Right now it only forces a fullmodeset when the MST master 6553 * transcoder did not changed but the pipe of the master transcoder 6554 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 6555 * in case of port synced crtcs, if one of the synced crtcs 6556 * needs a full modeset, all other synced crtcs should be 6557 * forced a full modeset. 6558 */ 6559 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6560 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6561 continue; 6562 6563 if (intel_dp_mst_crtc_needs_modeset(state, crtc)) 6564 intel_crtc_flag_modeset(new_crtc_state); 6565 6566 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6567 enum transcoder master = new_crtc_state->mst_master_transcoder; 6568 6569 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) 6570 intel_crtc_flag_modeset(new_crtc_state); 6571 } 6572 6573 if (is_trans_port_sync_mode(new_crtc_state)) { 6574 u8 trans = new_crtc_state->sync_mode_slaves_mask; 6575 6576 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6577 trans |= BIT(new_crtc_state->master_transcoder); 6578 6579 if (intel_cpu_transcoders_need_modeset(state, trans)) 6580 intel_crtc_flag_modeset(new_crtc_state); 6581 } 6582 6583 if (new_crtc_state->joiner_pipes) { 6584 if (intel_pipes_need_modeset(state, new_crtc_state->joiner_pipes)) 6585 intel_crtc_flag_modeset(new_crtc_state); 6586 } 6587 } 6588 6589 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6590 new_crtc_state, i) { 6591 if (!intel_crtc_needs_modeset(new_crtc_state)) 6592 continue; 6593 6594 any_ms = true; 6595 6596 intel_release_shared_dplls(state, crtc); 6597 } 6598 6599 if (any_ms && !check_digital_port_conflicts(state)) { 6600 drm_dbg_kms(&dev_priv->drm, 6601 "rejecting conflicting digital port configuration\n"); 6602 ret = -EINVAL; 6603 goto fail; 6604 } 6605 6606 ret = intel_atomic_check_planes(state); 6607 if (ret) 6608 goto fail; 6609 6610 ret = intel_compute_global_watermarks(state); 6611 if (ret) 6612 goto fail; 6613 6614 ret = intel_bw_atomic_check(state); 6615 if (ret) 6616 goto fail; 6617 6618 ret = intel_cdclk_atomic_check(state, &any_ms); 6619 if (ret) 6620 goto fail; 6621 6622 if (intel_any_crtc_needs_modeset(state)) 6623 any_ms = true; 6624 6625 if (any_ms) { 6626 ret = intel_modeset_checks(state); 6627 if (ret) 6628 goto fail; 6629 6630 ret = intel_modeset_calc_cdclk(state); 6631 if (ret) 6632 return ret; 6633 } 6634 6635 ret = intel_pmdemand_atomic_check(state); 6636 if (ret) 6637 goto fail; 6638 6639 ret = intel_atomic_check_crtcs(state); 6640 if (ret) 6641 goto fail; 6642 6643 ret = intel_fbc_atomic_check(state); 6644 if (ret) 6645 goto fail; 6646 6647 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6648 new_crtc_state, i) { 6649 intel_color_assert_luts(new_crtc_state); 6650 6651 ret = intel_async_flip_check_hw(state, crtc); 6652 if (ret) 6653 goto fail; 6654 6655 /* Either full modeset or fastset (or neither), never both */ 6656 drm_WARN_ON(&dev_priv->drm, 6657 intel_crtc_needs_modeset(new_crtc_state) && 6658 intel_crtc_needs_fastset(new_crtc_state)); 6659 6660 if (!intel_crtc_needs_modeset(new_crtc_state) && 6661 !intel_crtc_needs_fastset(new_crtc_state)) 6662 continue; 6663 6664 intel_crtc_state_dump(new_crtc_state, state, 6665 intel_crtc_needs_modeset(new_crtc_state) ? 6666 "modeset" : "fastset"); 6667 } 6668 6669 return 0; 6670 6671 fail: 6672 if (ret == -EDEADLK) 6673 return ret; 6674 6675 /* 6676 * FIXME would probably be nice to know which crtc specifically 6677 * caused the failure, in cases where we can pinpoint it. 6678 */ 6679 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6680 new_crtc_state, i) 6681 intel_crtc_state_dump(new_crtc_state, state, "failed"); 6682 6683 return ret; 6684 } 6685 6686 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 6687 { 6688 int ret; 6689 6690 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 6691 if (ret < 0) 6692 return ret; 6693 6694 return 0; 6695 } 6696 6697 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 6698 struct intel_crtc_state *crtc_state) 6699 { 6700 struct intel_display *display = to_intel_display(crtc); 6701 6702 if (DISPLAY_VER(display) != 2 || crtc_state->active_planes) 6703 intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true); 6704 6705 if (crtc_state->has_pch_encoder) { 6706 enum pipe pch_transcoder = 6707 intel_crtc_pch_transcoder(crtc); 6708 6709 intel_set_pch_fifo_underrun_reporting(display, pch_transcoder, true); 6710 } 6711 } 6712 6713 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 6714 const struct intel_crtc_state *new_crtc_state) 6715 { 6716 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6717 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6718 6719 /* 6720 * Update pipe size and adjust fitter if needed: the reason for this is 6721 * that in compute_mode_changes we check the native mode (not the pfit 6722 * mode) to see if we can flip rather than do a full mode set. In the 6723 * fastboot case, we'll flip, but if we don't update the pipesrc and 6724 * pfit state, we'll end up with a big fb scanned out into the wrong 6725 * sized surface. 6726 */ 6727 intel_set_pipe_src_size(new_crtc_state); 6728 6729 /* on skylake this is done by detaching scalers */ 6730 if (DISPLAY_VER(dev_priv) >= 9) { 6731 if (new_crtc_state->pch_pfit.enabled) 6732 skl_pfit_enable(new_crtc_state); 6733 } else if (HAS_PCH_SPLIT(dev_priv)) { 6734 if (new_crtc_state->pch_pfit.enabled) 6735 ilk_pfit_enable(new_crtc_state); 6736 else if (old_crtc_state->pch_pfit.enabled) 6737 ilk_pfit_disable(old_crtc_state); 6738 } 6739 6740 /* 6741 * The register is supposedly single buffered so perhaps 6742 * not 100% correct to do this here. But SKL+ calculate 6743 * this based on the adjust pixel rate so pfit changes do 6744 * affect it and so it must be updated for fastsets. 6745 * HSW/BDW only really need this here for fastboot, after 6746 * that the value should not change without a full modeset. 6747 */ 6748 if (DISPLAY_VER(dev_priv) >= 9 || 6749 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 6750 hsw_set_linetime_wm(new_crtc_state); 6751 6752 if (new_crtc_state->update_m_n) 6753 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, 6754 &new_crtc_state->dp_m_n); 6755 6756 if (new_crtc_state->update_lrr) 6757 intel_set_transcoder_timings_lrr(new_crtc_state); 6758 } 6759 6760 static void commit_pipe_pre_planes(struct intel_atomic_state *state, 6761 struct intel_crtc *crtc) 6762 { 6763 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6764 const struct intel_crtc_state *old_crtc_state = 6765 intel_atomic_get_old_crtc_state(state, crtc); 6766 const struct intel_crtc_state *new_crtc_state = 6767 intel_atomic_get_new_crtc_state(state, crtc); 6768 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6769 6770 drm_WARN_ON(&dev_priv->drm, new_crtc_state->use_dsb); 6771 6772 /* 6773 * During modesets pipe configuration was programmed as the 6774 * CRTC was enabled. 6775 */ 6776 if (!modeset) { 6777 if (intel_crtc_needs_color_update(new_crtc_state)) 6778 intel_color_commit_arm(NULL, new_crtc_state); 6779 6780 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6781 bdw_set_pipe_misc(NULL, new_crtc_state); 6782 6783 if (intel_crtc_needs_fastset(new_crtc_state)) 6784 intel_pipe_fastset(old_crtc_state, new_crtc_state); 6785 } 6786 6787 intel_psr2_program_trans_man_trk_ctl(NULL, new_crtc_state); 6788 6789 intel_atomic_update_watermarks(state, crtc); 6790 } 6791 6792 static void commit_pipe_post_planes(struct intel_atomic_state *state, 6793 struct intel_crtc *crtc) 6794 { 6795 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6796 const struct intel_crtc_state *new_crtc_state = 6797 intel_atomic_get_new_crtc_state(state, crtc); 6798 6799 drm_WARN_ON(&dev_priv->drm, new_crtc_state->use_dsb); 6800 6801 /* 6802 * Disable the scaler(s) after the plane(s) so that we don't 6803 * get a catastrophic underrun even if the two operations 6804 * end up happening in two different frames. 6805 */ 6806 if (DISPLAY_VER(dev_priv) >= 9 && 6807 !intel_crtc_needs_modeset(new_crtc_state)) 6808 skl_detach_scalers(new_crtc_state); 6809 6810 if (intel_crtc_vrr_enabling(state, crtc)) 6811 intel_vrr_enable(new_crtc_state); 6812 } 6813 6814 static void intel_enable_crtc(struct intel_atomic_state *state, 6815 struct intel_crtc *crtc) 6816 { 6817 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6818 const struct intel_crtc_state *new_crtc_state = 6819 intel_atomic_get_new_crtc_state(state, crtc); 6820 struct intel_crtc *pipe_crtc; 6821 6822 if (!intel_crtc_needs_modeset(new_crtc_state)) 6823 return; 6824 6825 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, 6826 intel_crtc_joined_pipe_mask(new_crtc_state)) { 6827 const struct intel_crtc_state *pipe_crtc_state = 6828 intel_atomic_get_new_crtc_state(state, pipe_crtc); 6829 6830 /* VRR will be enable later, if required */ 6831 intel_crtc_update_active_timings(pipe_crtc_state, false); 6832 } 6833 6834 dev_priv->display.funcs.display->crtc_enable(state, crtc); 6835 6836 /* vblanks work again, re-enable pipe CRC. */ 6837 intel_crtc_enable_pipe_crc(crtc); 6838 } 6839 6840 static void intel_pre_update_crtc(struct intel_atomic_state *state, 6841 struct intel_crtc *crtc) 6842 { 6843 struct intel_display *display = to_intel_display(state); 6844 struct drm_i915_private *i915 = to_i915(state->base.dev); 6845 const struct intel_crtc_state *old_crtc_state = 6846 intel_atomic_get_old_crtc_state(state, crtc); 6847 struct intel_crtc_state *new_crtc_state = 6848 intel_atomic_get_new_crtc_state(state, crtc); 6849 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6850 6851 if (old_crtc_state->inherited || 6852 intel_crtc_needs_modeset(new_crtc_state)) { 6853 if (HAS_DPT(i915)) 6854 intel_dpt_configure(crtc); 6855 } 6856 6857 if (!modeset) { 6858 if (new_crtc_state->preload_luts && 6859 intel_crtc_needs_color_update(new_crtc_state)) 6860 intel_color_load_luts(new_crtc_state); 6861 6862 intel_pre_plane_update(state, crtc); 6863 6864 if (intel_crtc_needs_fastset(new_crtc_state)) 6865 intel_encoders_update_pipe(state, crtc); 6866 6867 if (DISPLAY_VER(i915) >= 11 && 6868 intel_crtc_needs_fastset(new_crtc_state)) 6869 icl_set_pipe_chicken(new_crtc_state); 6870 6871 if (vrr_params_changed(old_crtc_state, new_crtc_state) || 6872 cmrr_params_changed(old_crtc_state, new_crtc_state)) 6873 intel_vrr_set_transcoder_timings(new_crtc_state); 6874 } 6875 6876 intel_fbc_update(state, crtc); 6877 6878 drm_WARN_ON(display->drm, !intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF)); 6879 6880 if (!modeset && 6881 intel_crtc_needs_color_update(new_crtc_state) && 6882 !new_crtc_state->use_dsb) 6883 intel_color_commit_noarm(NULL, new_crtc_state); 6884 6885 if (!new_crtc_state->use_dsb) 6886 intel_crtc_planes_update_noarm(NULL, state, crtc); 6887 } 6888 6889 static void intel_update_crtc(struct intel_atomic_state *state, 6890 struct intel_crtc *crtc) 6891 { 6892 const struct intel_crtc_state *old_crtc_state = 6893 intel_atomic_get_old_crtc_state(state, crtc); 6894 struct intel_crtc_state *new_crtc_state = 6895 intel_atomic_get_new_crtc_state(state, crtc); 6896 6897 if (new_crtc_state->use_dsb) { 6898 intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event); 6899 6900 intel_dsb_commit(new_crtc_state->dsb_commit, false); 6901 } else { 6902 /* Perform vblank evasion around commit operation */ 6903 intel_pipe_update_start(state, crtc); 6904 6905 if (new_crtc_state->dsb_commit) 6906 intel_dsb_commit(new_crtc_state->dsb_commit, false); 6907 6908 commit_pipe_pre_planes(state, crtc); 6909 6910 intel_crtc_planes_update_arm(NULL, state, crtc); 6911 6912 commit_pipe_post_planes(state, crtc); 6913 6914 intel_pipe_update_end(state, crtc); 6915 } 6916 6917 /* 6918 * VRR/Seamless M/N update may need to update frame timings. 6919 * 6920 * FIXME Should be synchronized with the start of vblank somehow... 6921 */ 6922 if (intel_crtc_vrr_enabling(state, crtc) || 6923 new_crtc_state->update_m_n || new_crtc_state->update_lrr) 6924 intel_crtc_update_active_timings(new_crtc_state, 6925 new_crtc_state->vrr.enable); 6926 6927 /* 6928 * We usually enable FIFO underrun interrupts as part of the 6929 * CRTC enable sequence during modesets. But when we inherit a 6930 * valid pipe configuration from the BIOS we need to take care 6931 * of enabling them on the CRTC's first fastset. 6932 */ 6933 if (intel_crtc_needs_fastset(new_crtc_state) && 6934 old_crtc_state->inherited) 6935 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 6936 } 6937 6938 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 6939 struct intel_crtc *crtc) 6940 { 6941 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6942 const struct intel_crtc_state *old_crtc_state = 6943 intel_atomic_get_old_crtc_state(state, crtc); 6944 struct intel_crtc *pipe_crtc; 6945 6946 /* 6947 * We need to disable pipe CRC before disabling the pipe, 6948 * or we race against vblank off. 6949 */ 6950 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, 6951 intel_crtc_joined_pipe_mask(old_crtc_state)) 6952 intel_crtc_disable_pipe_crc(pipe_crtc); 6953 6954 dev_priv->display.funcs.display->crtc_disable(state, crtc); 6955 6956 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, 6957 intel_crtc_joined_pipe_mask(old_crtc_state)) { 6958 const struct intel_crtc_state *new_pipe_crtc_state = 6959 intel_atomic_get_new_crtc_state(state, pipe_crtc); 6960 6961 pipe_crtc->active = false; 6962 intel_fbc_disable(pipe_crtc); 6963 6964 if (!new_pipe_crtc_state->hw.active) 6965 intel_initial_watermarks(state, pipe_crtc); 6966 } 6967 } 6968 6969 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 6970 { 6971 struct drm_i915_private *i915 = to_i915(state->base.dev); 6972 const struct intel_crtc_state *new_crtc_state, *old_crtc_state; 6973 struct intel_crtc *crtc; 6974 u8 disable_pipes = 0; 6975 int i; 6976 6977 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6978 new_crtc_state, i) { 6979 if (!intel_crtc_needs_modeset(new_crtc_state)) 6980 continue; 6981 6982 /* 6983 * Needs to be done even for pipes 6984 * that weren't enabled previously. 6985 */ 6986 intel_pre_plane_update(state, crtc); 6987 6988 if (!old_crtc_state->hw.active) 6989 continue; 6990 6991 disable_pipes |= BIT(crtc->pipe); 6992 } 6993 6994 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 6995 if ((disable_pipes & BIT(crtc->pipe)) == 0) 6996 continue; 6997 6998 intel_crtc_disable_planes(state, crtc); 6999 7000 drm_vblank_work_flush_all(&crtc->base); 7001 } 7002 7003 /* Only disable port sync and MST slaves */ 7004 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7005 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7006 continue; 7007 7008 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 7009 continue; 7010 7011 /* In case of Transcoder port Sync master slave CRTCs can be 7012 * assigned in any order and we need to make sure that 7013 * slave CRTCs are disabled first and then master CRTC since 7014 * Slave vblanks are masked till Master Vblanks. 7015 */ 7016 if (!is_trans_port_sync_slave(old_crtc_state) && 7017 !intel_dp_mst_is_slave_trans(old_crtc_state)) 7018 continue; 7019 7020 intel_old_crtc_state_disables(state, crtc); 7021 7022 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 7023 } 7024 7025 /* Disable everything else left on */ 7026 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7027 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7028 continue; 7029 7030 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 7031 continue; 7032 7033 intel_old_crtc_state_disables(state, crtc); 7034 7035 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 7036 } 7037 7038 drm_WARN_ON(&i915->drm, disable_pipes); 7039 } 7040 7041 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 7042 { 7043 struct intel_crtc_state *new_crtc_state; 7044 struct intel_crtc *crtc; 7045 int i; 7046 7047 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7048 if (!new_crtc_state->hw.active) 7049 continue; 7050 7051 intel_enable_crtc(state, crtc); 7052 intel_pre_update_crtc(state, crtc); 7053 } 7054 7055 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7056 if (!new_crtc_state->hw.active) 7057 continue; 7058 7059 intel_update_crtc(state, crtc); 7060 } 7061 } 7062 7063 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 7064 { 7065 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7066 struct intel_crtc *crtc; 7067 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7068 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 7069 u8 update_pipes = 0, modeset_pipes = 0; 7070 int i; 7071 7072 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7073 enum pipe pipe = crtc->pipe; 7074 7075 if (!new_crtc_state->hw.active) 7076 continue; 7077 7078 /* ignore allocations for crtc's that have been turned off. */ 7079 if (!intel_crtc_needs_modeset(new_crtc_state)) { 7080 entries[pipe] = old_crtc_state->wm.skl.ddb; 7081 update_pipes |= BIT(pipe); 7082 } else { 7083 modeset_pipes |= BIT(pipe); 7084 } 7085 } 7086 7087 /* 7088 * Whenever the number of active pipes changes, we need to make sure we 7089 * update the pipes in the right order so that their ddb allocations 7090 * never overlap with each other between CRTC updates. Otherwise we'll 7091 * cause pipe underruns and other bad stuff. 7092 * 7093 * So first lets enable all pipes that do not need a fullmodeset as 7094 * those don't have any external dependency. 7095 */ 7096 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7097 enum pipe pipe = crtc->pipe; 7098 7099 if ((update_pipes & BIT(pipe)) == 0) 7100 continue; 7101 7102 intel_pre_update_crtc(state, crtc); 7103 } 7104 7105 intel_dbuf_mbus_pre_ddb_update(state); 7106 7107 while (update_pipes) { 7108 /* 7109 * Commit in reverse order to make joiner primary 7110 * send the uapi events after secondaries are done. 7111 */ 7112 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, 7113 new_crtc_state, i) { 7114 enum pipe pipe = crtc->pipe; 7115 7116 if ((update_pipes & BIT(pipe)) == 0) 7117 continue; 7118 7119 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7120 entries, I915_MAX_PIPES, pipe)) 7121 continue; 7122 7123 entries[pipe] = new_crtc_state->wm.skl.ddb; 7124 update_pipes &= ~BIT(pipe); 7125 7126 intel_update_crtc(state, crtc); 7127 7128 /* 7129 * If this is an already active pipe, it's DDB changed, 7130 * and this isn't the last pipe that needs updating 7131 * then we need to wait for a vblank to pass for the 7132 * new ddb allocation to take effect. 7133 */ 7134 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 7135 &old_crtc_state->wm.skl.ddb) && 7136 (update_pipes | modeset_pipes)) 7137 intel_crtc_wait_for_next_vblank(crtc); 7138 } 7139 } 7140 7141 intel_dbuf_mbus_post_ddb_update(state); 7142 7143 update_pipes = modeset_pipes; 7144 7145 /* 7146 * Enable all pipes that needs a modeset and do not depends on other 7147 * pipes 7148 */ 7149 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7150 enum pipe pipe = crtc->pipe; 7151 7152 if ((modeset_pipes & BIT(pipe)) == 0) 7153 continue; 7154 7155 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7156 continue; 7157 7158 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 7159 is_trans_port_sync_master(new_crtc_state)) 7160 continue; 7161 7162 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7163 7164 intel_enable_crtc(state, crtc); 7165 } 7166 7167 /* 7168 * Then we enable all remaining pipes that depend on other 7169 * pipes: MST slaves and port sync masters 7170 */ 7171 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7172 enum pipe pipe = crtc->pipe; 7173 7174 if ((modeset_pipes & BIT(pipe)) == 0) 7175 continue; 7176 7177 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7178 continue; 7179 7180 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7181 7182 intel_enable_crtc(state, crtc); 7183 } 7184 7185 /* 7186 * Finally we do the plane updates/etc. for all pipes that got enabled. 7187 */ 7188 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7189 enum pipe pipe = crtc->pipe; 7190 7191 if ((update_pipes & BIT(pipe)) == 0) 7192 continue; 7193 7194 intel_pre_update_crtc(state, crtc); 7195 } 7196 7197 /* 7198 * Commit in reverse order to make joiner primary 7199 * send the uapi events after secondaries are done. 7200 */ 7201 for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) { 7202 enum pipe pipe = crtc->pipe; 7203 7204 if ((update_pipes & BIT(pipe)) == 0) 7205 continue; 7206 7207 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7208 entries, I915_MAX_PIPES, pipe)); 7209 7210 entries[pipe] = new_crtc_state->wm.skl.ddb; 7211 update_pipes &= ~BIT(pipe); 7212 7213 intel_update_crtc(state, crtc); 7214 } 7215 7216 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 7217 drm_WARN_ON(&dev_priv->drm, update_pipes); 7218 } 7219 7220 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 7221 { 7222 struct drm_i915_private *i915 = to_i915(intel_state->base.dev); 7223 struct drm_plane *plane; 7224 struct drm_plane_state *new_plane_state; 7225 int ret, i; 7226 7227 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { 7228 if (new_plane_state->fence) { 7229 ret = dma_fence_wait_timeout(new_plane_state->fence, false, 7230 i915_fence_timeout(i915)); 7231 if (ret <= 0) 7232 break; 7233 7234 dma_fence_put(new_plane_state->fence); 7235 new_plane_state->fence = NULL; 7236 } 7237 } 7238 } 7239 7240 static void intel_atomic_dsb_wait_commit(struct intel_crtc_state *crtc_state) 7241 { 7242 if (crtc_state->dsb_commit) 7243 intel_dsb_wait(crtc_state->dsb_commit); 7244 7245 intel_color_wait_commit(crtc_state); 7246 } 7247 7248 static void intel_atomic_dsb_cleanup(struct intel_crtc_state *crtc_state) 7249 { 7250 if (crtc_state->dsb_commit) { 7251 intel_dsb_cleanup(crtc_state->dsb_commit); 7252 crtc_state->dsb_commit = NULL; 7253 } 7254 7255 intel_color_cleanup_commit(crtc_state); 7256 } 7257 7258 static void intel_atomic_cleanup_work(struct work_struct *work) 7259 { 7260 struct intel_atomic_state *state = 7261 container_of(work, struct intel_atomic_state, cleanup_work); 7262 struct drm_i915_private *i915 = to_i915(state->base.dev); 7263 struct intel_crtc_state *old_crtc_state; 7264 struct intel_crtc *crtc; 7265 int i; 7266 7267 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) 7268 intel_atomic_dsb_cleanup(old_crtc_state); 7269 7270 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 7271 drm_atomic_helper_commit_cleanup_done(&state->base); 7272 drm_atomic_state_put(&state->base); 7273 } 7274 7275 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 7276 { 7277 struct drm_i915_private *i915 = to_i915(state->base.dev); 7278 struct intel_plane *plane; 7279 struct intel_plane_state *plane_state; 7280 int i; 7281 7282 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7283 struct drm_framebuffer *fb = plane_state->hw.fb; 7284 int cc_plane; 7285 int ret; 7286 7287 if (!fb) 7288 continue; 7289 7290 cc_plane = intel_fb_rc_ccs_cc_plane(fb); 7291 if (cc_plane < 0) 7292 continue; 7293 7294 /* 7295 * The layout of the fast clear color value expected by HW 7296 * (the DRM ABI requiring this value to be located in fb at 7297 * offset 0 of cc plane, plane #2 previous generations or 7298 * plane #1 for flat ccs): 7299 * - 4 x 4 bytes per-channel value 7300 * (in surface type specific float/int format provided by the fb user) 7301 * - 8 bytes native color value used by the display 7302 * (converted/written by GPU during a fast clear operation using the 7303 * above per-channel values) 7304 * 7305 * The commit's FB prepare hook already ensured that FB obj is pinned and the 7306 * caller made sure that the object is synced wrt. the related color clear value 7307 * GPU write on it. 7308 */ 7309 ret = intel_bo_read_from_page(intel_fb_bo(fb), 7310 fb->offsets[cc_plane] + 16, 7311 &plane_state->ccval, 7312 sizeof(plane_state->ccval)); 7313 /* The above could only fail if the FB obj has an unexpected backing store type. */ 7314 drm_WARN_ON(&i915->drm, ret); 7315 } 7316 } 7317 7318 static void intel_atomic_dsb_prepare(struct intel_atomic_state *state, 7319 struct intel_crtc *crtc) 7320 { 7321 struct intel_display *display = to_intel_display(state); 7322 const struct intel_crtc_state *old_crtc_state = 7323 intel_atomic_get_old_crtc_state(state, crtc); 7324 struct intel_crtc_state *new_crtc_state = 7325 intel_atomic_get_new_crtc_state(state, crtc); 7326 7327 if (!new_crtc_state->hw.active) 7328 return; 7329 7330 if (state->base.legacy_cursor_update) 7331 return; 7332 7333 /* FIXME deal with everything */ 7334 new_crtc_state->use_dsb = 7335 new_crtc_state->update_planes && 7336 !new_crtc_state->do_async_flip && 7337 (DISPLAY_VER(display) >= 20 || !new_crtc_state->has_psr) && 7338 !new_crtc_state->scaler_state.scaler_users && 7339 !old_crtc_state->scaler_state.scaler_users && 7340 !intel_crtc_needs_modeset(new_crtc_state) && 7341 !intel_crtc_needs_fastset(new_crtc_state); 7342 7343 intel_color_prepare_commit(state, crtc); 7344 } 7345 7346 static void intel_atomic_dsb_finish(struct intel_atomic_state *state, 7347 struct intel_crtc *crtc) 7348 { 7349 struct intel_crtc_state *new_crtc_state = 7350 intel_atomic_get_new_crtc_state(state, crtc); 7351 7352 if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank) 7353 return; 7354 7355 /* 7356 * Rough estimate: 7357 * ~64 registers per each plane * 8 planes = 512 7358 * Double that for pipe stuff and other overhead. 7359 */ 7360 new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 7361 new_crtc_state->use_dsb ? 1024 : 16); 7362 if (!new_crtc_state->dsb_commit) { 7363 new_crtc_state->use_dsb = false; 7364 intel_color_cleanup_commit(new_crtc_state); 7365 return; 7366 } 7367 7368 if (new_crtc_state->use_dsb) { 7369 if (intel_crtc_needs_color_update(new_crtc_state)) 7370 intel_color_commit_noarm(new_crtc_state->dsb_commit, 7371 new_crtc_state); 7372 intel_crtc_planes_update_noarm(new_crtc_state->dsb_commit, 7373 state, crtc); 7374 7375 /* 7376 * Ensure we have "Frame Change" event when PSR state is 7377 * SRDENT(PSR1) or DEEP_SLEEP(PSR2). Otherwise DSB vblank 7378 * evasion hangs as PIPEDSL is reading as 0. 7379 */ 7380 intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit, 7381 state, crtc); 7382 7383 intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit); 7384 7385 if (intel_crtc_needs_color_update(new_crtc_state)) 7386 intel_color_commit_arm(new_crtc_state->dsb_commit, 7387 new_crtc_state); 7388 bdw_set_pipe_misc(new_crtc_state->dsb_commit, 7389 new_crtc_state); 7390 intel_psr2_program_trans_man_trk_ctl(new_crtc_state->dsb_commit, 7391 new_crtc_state); 7392 intel_crtc_planes_update_arm(new_crtc_state->dsb_commit, 7393 state, crtc); 7394 7395 if (!new_crtc_state->dsb_color_vblank) { 7396 intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1); 7397 7398 intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state); 7399 intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit); 7400 intel_vrr_check_push_sent(new_crtc_state->dsb_commit, new_crtc_state); 7401 intel_dsb_interrupt(new_crtc_state->dsb_commit); 7402 } 7403 } 7404 7405 if (new_crtc_state->dsb_color_vblank) 7406 intel_dsb_chain(state, new_crtc_state->dsb_commit, 7407 new_crtc_state->dsb_color_vblank, true); 7408 7409 intel_dsb_finish(new_crtc_state->dsb_commit); 7410 } 7411 7412 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 7413 { 7414 struct intel_display *display = to_intel_display(state); 7415 struct drm_device *dev = state->base.dev; 7416 struct drm_i915_private *dev_priv = to_i915(dev); 7417 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7418 struct intel_crtc *crtc; 7419 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; 7420 intel_wakeref_t wakeref = NULL; 7421 int i; 7422 7423 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7424 intel_atomic_dsb_prepare(state, crtc); 7425 7426 intel_atomic_commit_fence_wait(state); 7427 7428 intel_td_flush(dev_priv); 7429 7430 intel_atomic_prepare_plane_clear_colors(state); 7431 7432 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7433 intel_atomic_dsb_finish(state, crtc); 7434 7435 drm_atomic_helper_wait_for_dependencies(&state->base); 7436 drm_dp_mst_atomic_wait_for_dependencies(&state->base); 7437 intel_atomic_global_state_wait_for_dependencies(state); 7438 7439 /* 7440 * During full modesets we write a lot of registers, wait 7441 * for PLLs, etc. Doing that while DC states are enabled 7442 * is not a good idea. 7443 * 7444 * During fastsets and other updates we also need to 7445 * disable DC states due to the following scenario: 7446 * 1. DC5 exit and PSR exit happen 7447 * 2. Some or all _noarm() registers are written 7448 * 3. Due to some long delay PSR is re-entered 7449 * 4. DC5 entry -> DMC saves the already written new 7450 * _noarm() registers and the old not yet written 7451 * _arm() registers 7452 * 5. DC5 exit -> DMC restores a mixture of old and 7453 * new register values and arms the update 7454 * 6. PSR exit -> hardware latches a mixture of old and 7455 * new register values -> corrupted frame, or worse 7456 * 7. New _arm() registers are finally written 7457 * 8. Hardware finally latches a complete set of new 7458 * register values, and subsequent frames will be OK again 7459 * 7460 * Also note that due to the pipe CSC hardware issues on 7461 * SKL/GLK DC states must remain off until the pipe CSC 7462 * state readout has happened. Otherwise we risk corrupting 7463 * the CSC latched register values with the readout (see 7464 * skl_read_csc() and skl_color_commit_noarm()). 7465 */ 7466 wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF); 7467 7468 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7469 new_crtc_state, i) { 7470 if (intel_crtc_needs_modeset(new_crtc_state) || 7471 intel_crtc_needs_fastset(new_crtc_state)) 7472 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); 7473 } 7474 7475 intel_commit_modeset_disables(state); 7476 7477 intel_dp_tunnel_atomic_alloc_bw(state); 7478 7479 /* FIXME: Eventually get rid of our crtc->config pointer */ 7480 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7481 crtc->config = new_crtc_state; 7482 7483 /* 7484 * In XE_LPD+ Pmdemand combines many parameters such as voltage index, 7485 * plls, cdclk frequency, QGV point selection parameter etc. Voltage 7486 * index, cdclk/ddiclk frequencies are supposed to be configured before 7487 * the cdclk config is set. 7488 */ 7489 intel_pmdemand_pre_plane_update(state); 7490 7491 if (state->modeset) { 7492 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 7493 7494 intel_set_cdclk_pre_plane_update(state); 7495 7496 intel_modeset_verify_disabled(state); 7497 } 7498 7499 intel_sagv_pre_plane_update(state); 7500 7501 /* Complete the events for pipes that have now been disabled */ 7502 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7503 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7504 7505 /* Complete events for now disable pipes here. */ 7506 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 7507 spin_lock_irq(&dev->event_lock); 7508 drm_crtc_send_vblank_event(&crtc->base, 7509 new_crtc_state->uapi.event); 7510 spin_unlock_irq(&dev->event_lock); 7511 7512 new_crtc_state->uapi.event = NULL; 7513 } 7514 } 7515 7516 intel_encoders_update_prepare(state); 7517 7518 intel_dbuf_pre_plane_update(state); 7519 7520 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7521 if (new_crtc_state->do_async_flip) 7522 intel_crtc_enable_flip_done(state, crtc); 7523 } 7524 7525 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7526 dev_priv->display.funcs.display->commit_modeset_enables(state); 7527 7528 intel_program_dpkgc_latency(state); 7529 7530 if (state->modeset) 7531 intel_set_cdclk_post_plane_update(state); 7532 7533 intel_wait_for_vblank_workers(state); 7534 7535 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 7536 * already, but still need the state for the delayed optimization. To 7537 * fix this: 7538 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 7539 * - schedule that vblank worker _before_ calling hw_done 7540 * - at the start of commit_tail, cancel it _synchrously 7541 * - switch over to the vblank wait helper in the core after that since 7542 * we don't need out special handling any more. 7543 */ 7544 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 7545 7546 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7547 if (new_crtc_state->do_async_flip) 7548 intel_crtc_disable_flip_done(state, crtc); 7549 7550 intel_atomic_dsb_wait_commit(new_crtc_state); 7551 7552 if (!state->base.legacy_cursor_update && !new_crtc_state->use_dsb) 7553 intel_vrr_check_push_sent(NULL, new_crtc_state); 7554 } 7555 7556 /* 7557 * Now that the vblank has passed, we can go ahead and program the 7558 * optimal watermarks on platforms that need two-step watermark 7559 * programming. 7560 * 7561 * TODO: Move this (and other cleanup) to an async worker eventually. 7562 */ 7563 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7564 new_crtc_state, i) { 7565 /* 7566 * Gen2 reports pipe underruns whenever all planes are disabled. 7567 * So re-enable underrun reporting after some planes get enabled. 7568 * 7569 * We do this before .optimize_watermarks() so that we have a 7570 * chance of catching underruns with the intermediate watermarks 7571 * vs. the new plane configuration. 7572 */ 7573 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 7574 intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true); 7575 7576 intel_optimize_watermarks(state, crtc); 7577 } 7578 7579 intel_dbuf_post_plane_update(state); 7580 7581 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7582 intel_post_plane_update(state, crtc); 7583 7584 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); 7585 7586 intel_modeset_verify_crtc(state, crtc); 7587 7588 intel_post_plane_update_after_readout(state, crtc); 7589 7590 /* 7591 * DSB cleanup is done in cleanup_work aligning with framebuffer 7592 * cleanup. So copy and reset the dsb structure to sync with 7593 * commit_done and later do dsb cleanup in cleanup_work. 7594 * 7595 * FIXME get rid of this funny new->old swapping 7596 */ 7597 old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank); 7598 old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit); 7599 } 7600 7601 /* Underruns don't always raise interrupts, so check manually */ 7602 intel_check_cpu_fifo_underruns(display); 7603 intel_check_pch_fifo_underruns(display); 7604 7605 if (state->modeset) 7606 intel_verify_planes(state); 7607 7608 intel_sagv_post_plane_update(state); 7609 intel_pmdemand_post_plane_update(state); 7610 7611 drm_atomic_helper_commit_hw_done(&state->base); 7612 intel_atomic_global_state_commit_done(state); 7613 7614 if (state->modeset) { 7615 /* As one of the primary mmio accessors, KMS has a high 7616 * likelihood of triggering bugs in unclaimed access. After we 7617 * finish modesetting, see if an error has been flagged, and if 7618 * so enable debugging for the next modeset - and hope we catch 7619 * the culprit. 7620 */ 7621 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 7622 } 7623 /* 7624 * Delay re-enabling DC states by 17 ms to avoid the off->on->off 7625 * toggling overhead at and above 60 FPS. 7626 */ 7627 intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17); 7628 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7629 7630 /* 7631 * Defer the cleanup of the old state to a separate worker to not 7632 * impede the current task (userspace for blocking modesets) that 7633 * are executed inline. For out-of-line asynchronous modesets/flips, 7634 * deferring to a new worker seems overkill, but we would place a 7635 * schedule point (cond_resched()) here anyway to keep latencies 7636 * down. 7637 */ 7638 INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work); 7639 queue_work(dev_priv->display.wq.cleanup, &state->cleanup_work); 7640 } 7641 7642 static void intel_atomic_commit_work(struct work_struct *work) 7643 { 7644 struct intel_atomic_state *state = 7645 container_of(work, struct intel_atomic_state, base.commit_work); 7646 7647 intel_atomic_commit_tail(state); 7648 } 7649 7650 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7651 { 7652 struct intel_plane_state *old_plane_state, *new_plane_state; 7653 struct intel_plane *plane; 7654 int i; 7655 7656 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7657 new_plane_state, i) 7658 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 7659 to_intel_frontbuffer(new_plane_state->hw.fb), 7660 plane->frontbuffer_bit); 7661 } 7662 7663 static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock) 7664 { 7665 int ret; 7666 7667 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 7668 if (ret) 7669 return ret; 7670 7671 ret = intel_atomic_global_state_setup_commit(state); 7672 if (ret) 7673 return ret; 7674 7675 return 0; 7676 } 7677 7678 static int intel_atomic_swap_state(struct intel_atomic_state *state) 7679 { 7680 int ret; 7681 7682 ret = drm_atomic_helper_swap_state(&state->base, true); 7683 if (ret) 7684 return ret; 7685 7686 intel_atomic_swap_global_state(state); 7687 7688 intel_shared_dpll_swap_state(state); 7689 7690 intel_atomic_track_fbs(state); 7691 7692 return 0; 7693 } 7694 7695 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, 7696 bool nonblock) 7697 { 7698 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7699 struct drm_i915_private *dev_priv = to_i915(dev); 7700 int ret = 0; 7701 7702 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 7703 7704 /* 7705 * The intel_legacy_cursor_update() fast path takes care 7706 * of avoiding the vblank waits for simple cursor 7707 * movement and flips. For cursor on/off and size changes, 7708 * we want to perform the vblank waits so that watermark 7709 * updates happen during the correct frames. Gen9+ have 7710 * double buffered watermarks and so shouldn't need this. 7711 * 7712 * Unset state->legacy_cursor_update before the call to 7713 * drm_atomic_helper_setup_commit() because otherwise 7714 * drm_atomic_helper_wait_for_flip_done() is a noop and 7715 * we get FIFO underruns because we didn't wait 7716 * for vblank. 7717 * 7718 * FIXME doing watermarks and fb cleanup from a vblank worker 7719 * (assuming we had any) would solve these problems. 7720 */ 7721 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) { 7722 struct intel_crtc_state *new_crtc_state; 7723 struct intel_crtc *crtc; 7724 int i; 7725 7726 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7727 if (new_crtc_state->wm.need_postvbl_update || 7728 new_crtc_state->update_wm_post) 7729 state->base.legacy_cursor_update = false; 7730 } 7731 7732 ret = intel_atomic_prepare_commit(state); 7733 if (ret) { 7734 drm_dbg_atomic(&dev_priv->drm, 7735 "Preparing state failed with %i\n", ret); 7736 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7737 return ret; 7738 } 7739 7740 ret = intel_atomic_setup_commit(state, nonblock); 7741 if (!ret) 7742 ret = intel_atomic_swap_state(state); 7743 7744 if (ret) { 7745 drm_atomic_helper_unprepare_planes(dev, &state->base); 7746 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7747 return ret; 7748 } 7749 7750 drm_atomic_state_get(&state->base); 7751 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 7752 7753 if (nonblock && state->modeset) { 7754 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); 7755 } else if (nonblock) { 7756 queue_work(dev_priv->display.wq.flip, &state->base.commit_work); 7757 } else { 7758 if (state->modeset) 7759 flush_workqueue(dev_priv->display.wq.modeset); 7760 intel_atomic_commit_tail(state); 7761 } 7762 7763 return 0; 7764 } 7765 7766 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 7767 { 7768 struct drm_device *dev = encoder->base.dev; 7769 struct intel_encoder *source_encoder; 7770 u32 possible_clones = 0; 7771 7772 for_each_intel_encoder(dev, source_encoder) { 7773 if (encoders_cloneable(encoder, source_encoder)) 7774 possible_clones |= drm_encoder_mask(&source_encoder->base); 7775 } 7776 7777 return possible_clones; 7778 } 7779 7780 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 7781 { 7782 struct drm_device *dev = encoder->base.dev; 7783 struct intel_crtc *crtc; 7784 u32 possible_crtcs = 0; 7785 7786 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) 7787 possible_crtcs |= drm_crtc_mask(&crtc->base); 7788 7789 return possible_crtcs; 7790 } 7791 7792 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 7793 { 7794 if (!IS_MOBILE(dev_priv)) 7795 return false; 7796 7797 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 7798 return false; 7799 7800 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 7801 return false; 7802 7803 return true; 7804 } 7805 7806 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 7807 { 7808 if (DISPLAY_VER(dev_priv) >= 9) 7809 return false; 7810 7811 if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)) 7812 return false; 7813 7814 if (HAS_PCH_LPT_H(dev_priv) && 7815 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 7816 return false; 7817 7818 /* DDI E can't be used if DDI A requires 4 lanes */ 7819 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 7820 return false; 7821 7822 if (!dev_priv->display.vbt.int_crt_support) 7823 return false; 7824 7825 return true; 7826 } 7827 7828 bool assert_port_valid(struct intel_display *display, enum port port) 7829 { 7830 return !drm_WARN(display->drm, !(DISPLAY_RUNTIME_INFO(display)->port_mask & BIT(port)), 7831 "Platform does not support port %c\n", port_name(port)); 7832 } 7833 7834 void intel_setup_outputs(struct drm_i915_private *dev_priv) 7835 { 7836 struct intel_display *display = &dev_priv->display; 7837 struct intel_encoder *encoder; 7838 bool dpd_is_edp = false; 7839 7840 intel_pps_unlock_regs_wa(display); 7841 7842 if (!HAS_DISPLAY(dev_priv)) 7843 return; 7844 7845 if (HAS_DDI(dev_priv)) { 7846 if (intel_ddi_crt_present(dev_priv)) 7847 intel_crt_init(display); 7848 7849 intel_bios_for_each_encoder(display, intel_ddi_init); 7850 7851 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 7852 vlv_dsi_init(dev_priv); 7853 } else if (HAS_PCH_SPLIT(dev_priv)) { 7854 int found; 7855 7856 /* 7857 * intel_edp_init_connector() depends on this completing first, 7858 * to prevent the registration of both eDP and LVDS and the 7859 * incorrect sharing of the PPS. 7860 */ 7861 intel_lvds_init(dev_priv); 7862 intel_crt_init(display); 7863 7864 dpd_is_edp = intel_dp_is_port_edp(display, PORT_D); 7865 7866 if (ilk_has_edp_a(dev_priv)) 7867 g4x_dp_init(display, DP_A, PORT_A); 7868 7869 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 7870 /* PCH SDVOB multiplex with HDMIB */ 7871 found = intel_sdvo_init(display, PCH_SDVOB, PORT_B); 7872 if (!found) 7873 g4x_hdmi_init(display, PCH_HDMIB, PORT_B); 7874 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 7875 g4x_dp_init(display, PCH_DP_B, PORT_B); 7876 } 7877 7878 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 7879 g4x_hdmi_init(display, PCH_HDMIC, PORT_C); 7880 7881 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 7882 g4x_hdmi_init(display, PCH_HDMID, PORT_D); 7883 7884 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 7885 g4x_dp_init(display, PCH_DP_C, PORT_C); 7886 7887 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 7888 g4x_dp_init(display, PCH_DP_D, PORT_D); 7889 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7890 bool has_edp, has_port; 7891 7892 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) 7893 intel_crt_init(display); 7894 7895 /* 7896 * The DP_DETECTED bit is the latched state of the DDC 7897 * SDA pin at boot. However since eDP doesn't require DDC 7898 * (no way to plug in a DP->HDMI dongle) the DDC pins for 7899 * eDP ports may have been muxed to an alternate function. 7900 * Thus we can't rely on the DP_DETECTED bit alone to detect 7901 * eDP ports. Consult the VBT as well as DP_DETECTED to 7902 * detect eDP ports. 7903 * 7904 * Sadly the straps seem to be missing sometimes even for HDMI 7905 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 7906 * and VBT for the presence of the port. Additionally we can't 7907 * trust the port type the VBT declares as we've seen at least 7908 * HDMI ports that the VBT claim are DP or eDP. 7909 */ 7910 has_edp = intel_dp_is_port_edp(display, PORT_B); 7911 has_port = intel_bios_is_port_present(display, PORT_B); 7912 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 7913 has_edp &= g4x_dp_init(display, VLV_DP_B, PORT_B); 7914 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 7915 g4x_hdmi_init(display, VLV_HDMIB, PORT_B); 7916 7917 has_edp = intel_dp_is_port_edp(display, PORT_C); 7918 has_port = intel_bios_is_port_present(display, PORT_C); 7919 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 7920 has_edp &= g4x_dp_init(display, VLV_DP_C, PORT_C); 7921 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 7922 g4x_hdmi_init(display, VLV_HDMIC, PORT_C); 7923 7924 if (IS_CHERRYVIEW(dev_priv)) { 7925 /* 7926 * eDP not supported on port D, 7927 * so no need to worry about it 7928 */ 7929 has_port = intel_bios_is_port_present(display, PORT_D); 7930 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 7931 g4x_dp_init(display, CHV_DP_D, PORT_D); 7932 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 7933 g4x_hdmi_init(display, CHV_HDMID, PORT_D); 7934 } 7935 7936 vlv_dsi_init(dev_priv); 7937 } else if (IS_PINEVIEW(dev_priv)) { 7938 intel_lvds_init(dev_priv); 7939 intel_crt_init(display); 7940 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { 7941 bool found = false; 7942 7943 if (IS_MOBILE(dev_priv)) 7944 intel_lvds_init(dev_priv); 7945 7946 intel_crt_init(display); 7947 7948 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 7949 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 7950 found = intel_sdvo_init(display, GEN3_SDVOB, PORT_B); 7951 if (!found && IS_G4X(dev_priv)) { 7952 drm_dbg_kms(&dev_priv->drm, 7953 "probing HDMI on SDVOB\n"); 7954 g4x_hdmi_init(display, GEN4_HDMIB, PORT_B); 7955 } 7956 7957 if (!found && IS_G4X(dev_priv)) 7958 g4x_dp_init(display, DP_B, PORT_B); 7959 } 7960 7961 /* Before G4X SDVOC doesn't have its own detect register */ 7962 7963 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 7964 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 7965 found = intel_sdvo_init(display, GEN3_SDVOC, PORT_C); 7966 } 7967 7968 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 7969 7970 if (IS_G4X(dev_priv)) { 7971 drm_dbg_kms(&dev_priv->drm, 7972 "probing HDMI on SDVOC\n"); 7973 g4x_hdmi_init(display, GEN4_HDMIC, PORT_C); 7974 } 7975 if (IS_G4X(dev_priv)) 7976 g4x_dp_init(display, DP_C, PORT_C); 7977 } 7978 7979 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 7980 g4x_dp_init(display, DP_D, PORT_D); 7981 7982 if (SUPPORTS_TV(dev_priv)) 7983 intel_tv_init(display); 7984 } else if (DISPLAY_VER(dev_priv) == 2) { 7985 if (IS_I85X(dev_priv)) 7986 intel_lvds_init(dev_priv); 7987 7988 intel_crt_init(display); 7989 intel_dvo_init(dev_priv); 7990 } 7991 7992 for_each_intel_encoder(&dev_priv->drm, encoder) { 7993 encoder->base.possible_crtcs = 7994 intel_encoder_possible_crtcs(encoder); 7995 encoder->base.possible_clones = 7996 intel_encoder_possible_clones(encoder); 7997 } 7998 7999 intel_init_pch_refclk(dev_priv); 8000 8001 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 8002 } 8003 8004 static int max_dotclock(struct drm_i915_private *i915) 8005 { 8006 struct intel_display *display = &i915->display; 8007 int max_dotclock = display->cdclk.max_dotclk_freq; 8008 8009 if (HAS_ULTRAJOINER(display)) 8010 max_dotclock *= 4; 8011 else if (HAS_UNCOMPRESSED_JOINER(display) || HAS_BIGJOINER(display)) 8012 max_dotclock *= 2; 8013 8014 return max_dotclock; 8015 } 8016 8017 enum drm_mode_status intel_mode_valid(struct drm_device *dev, 8018 const struct drm_display_mode *mode) 8019 { 8020 struct drm_i915_private *dev_priv = to_i915(dev); 8021 int hdisplay_max, htotal_max; 8022 int vdisplay_max, vtotal_max; 8023 8024 /* 8025 * Can't reject DBLSCAN here because Xorg ddxen can add piles 8026 * of DBLSCAN modes to the output's mode list when they detect 8027 * the scaling mode property on the connector. And they don't 8028 * ask the kernel to validate those modes in any way until 8029 * modeset time at which point the client gets a protocol error. 8030 * So in order to not upset those clients we silently ignore the 8031 * DBLSCAN flag on such connectors. For other connectors we will 8032 * reject modes with the DBLSCAN flag in encoder->compute_config(). 8033 * And we always reject DBLSCAN modes in connector->mode_valid() 8034 * as we never want such modes on the connector's mode list. 8035 */ 8036 8037 if (mode->vscan > 1) 8038 return MODE_NO_VSCAN; 8039 8040 if (mode->flags & DRM_MODE_FLAG_HSKEW) 8041 return MODE_H_ILLEGAL; 8042 8043 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 8044 DRM_MODE_FLAG_NCSYNC | 8045 DRM_MODE_FLAG_PCSYNC)) 8046 return MODE_HSYNC; 8047 8048 if (mode->flags & (DRM_MODE_FLAG_BCAST | 8049 DRM_MODE_FLAG_PIXMUX | 8050 DRM_MODE_FLAG_CLKDIV2)) 8051 return MODE_BAD; 8052 8053 /* 8054 * Reject clearly excessive dotclocks early to 8055 * avoid having to worry about huge integers later. 8056 */ 8057 if (mode->clock > max_dotclock(dev_priv)) 8058 return MODE_CLOCK_HIGH; 8059 8060 /* Transcoder timing limits */ 8061 if (DISPLAY_VER(dev_priv) >= 11) { 8062 hdisplay_max = 16384; 8063 vdisplay_max = 8192; 8064 htotal_max = 16384; 8065 vtotal_max = 8192; 8066 } else if (DISPLAY_VER(dev_priv) >= 9 || 8067 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 8068 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 8069 vdisplay_max = 4096; 8070 htotal_max = 8192; 8071 vtotal_max = 8192; 8072 } else if (DISPLAY_VER(dev_priv) >= 3) { 8073 hdisplay_max = 4096; 8074 vdisplay_max = 4096; 8075 htotal_max = 8192; 8076 vtotal_max = 8192; 8077 } else { 8078 hdisplay_max = 2048; 8079 vdisplay_max = 2048; 8080 htotal_max = 4096; 8081 vtotal_max = 4096; 8082 } 8083 8084 if (mode->hdisplay > hdisplay_max || 8085 mode->hsync_start > htotal_max || 8086 mode->hsync_end > htotal_max || 8087 mode->htotal > htotal_max) 8088 return MODE_H_ILLEGAL; 8089 8090 if (mode->vdisplay > vdisplay_max || 8091 mode->vsync_start > vtotal_max || 8092 mode->vsync_end > vtotal_max || 8093 mode->vtotal > vtotal_max) 8094 return MODE_V_ILLEGAL; 8095 8096 return MODE_OK; 8097 } 8098 8099 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct intel_display *display, 8100 const struct drm_display_mode *mode) 8101 { 8102 /* 8103 * Additional transcoder timing limits, 8104 * excluding BXT/GLK DSI transcoders. 8105 */ 8106 if (DISPLAY_VER(display) >= 5) { 8107 if (mode->hdisplay < 64 || 8108 mode->htotal - mode->hdisplay < 32) 8109 return MODE_H_ILLEGAL; 8110 8111 if (mode->vtotal - mode->vdisplay < 5) 8112 return MODE_V_ILLEGAL; 8113 } else { 8114 if (mode->htotal - mode->hdisplay < 32) 8115 return MODE_H_ILLEGAL; 8116 8117 if (mode->vtotal - mode->vdisplay < 3) 8118 return MODE_V_ILLEGAL; 8119 } 8120 8121 /* 8122 * Cantiga+ cannot handle modes with a hsync front porch of 0. 8123 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 8124 */ 8125 if ((DISPLAY_VER(display) >= 5 || display->platform.g4x) && 8126 mode->hsync_start == mode->hdisplay) 8127 return MODE_H_ILLEGAL; 8128 8129 return MODE_OK; 8130 } 8131 8132 enum drm_mode_status 8133 intel_mode_valid_max_plane_size(struct intel_display *display, 8134 const struct drm_display_mode *mode, 8135 int num_joined_pipes) 8136 { 8137 int plane_width_max, plane_height_max; 8138 8139 /* 8140 * intel_mode_valid() should be 8141 * sufficient on older platforms. 8142 */ 8143 if (DISPLAY_VER(display) < 9) 8144 return MODE_OK; 8145 8146 /* 8147 * Most people will probably want a fullscreen 8148 * plane so let's not advertize modes that are 8149 * too big for that. 8150 */ 8151 if (DISPLAY_VER(display) >= 30) { 8152 plane_width_max = 6144 * num_joined_pipes; 8153 plane_height_max = 4800; 8154 } else if (DISPLAY_VER(display) >= 11) { 8155 plane_width_max = 5120 * num_joined_pipes; 8156 plane_height_max = 4320; 8157 } else { 8158 plane_width_max = 5120; 8159 plane_height_max = 4096; 8160 } 8161 8162 if (mode->hdisplay > plane_width_max) 8163 return MODE_H_ILLEGAL; 8164 8165 if (mode->vdisplay > plane_height_max) 8166 return MODE_V_ILLEGAL; 8167 8168 return MODE_OK; 8169 } 8170 8171 static const struct intel_display_funcs skl_display_funcs = { 8172 .get_pipe_config = hsw_get_pipe_config, 8173 .crtc_enable = hsw_crtc_enable, 8174 .crtc_disable = hsw_crtc_disable, 8175 .commit_modeset_enables = skl_commit_modeset_enables, 8176 .get_initial_plane_config = skl_get_initial_plane_config, 8177 .fixup_initial_plane_config = skl_fixup_initial_plane_config, 8178 }; 8179 8180 static const struct intel_display_funcs ddi_display_funcs = { 8181 .get_pipe_config = hsw_get_pipe_config, 8182 .crtc_enable = hsw_crtc_enable, 8183 .crtc_disable = hsw_crtc_disable, 8184 .commit_modeset_enables = intel_commit_modeset_enables, 8185 .get_initial_plane_config = i9xx_get_initial_plane_config, 8186 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8187 }; 8188 8189 static const struct intel_display_funcs pch_split_display_funcs = { 8190 .get_pipe_config = ilk_get_pipe_config, 8191 .crtc_enable = ilk_crtc_enable, 8192 .crtc_disable = ilk_crtc_disable, 8193 .commit_modeset_enables = intel_commit_modeset_enables, 8194 .get_initial_plane_config = i9xx_get_initial_plane_config, 8195 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8196 }; 8197 8198 static const struct intel_display_funcs vlv_display_funcs = { 8199 .get_pipe_config = i9xx_get_pipe_config, 8200 .crtc_enable = valleyview_crtc_enable, 8201 .crtc_disable = i9xx_crtc_disable, 8202 .commit_modeset_enables = intel_commit_modeset_enables, 8203 .get_initial_plane_config = i9xx_get_initial_plane_config, 8204 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8205 }; 8206 8207 static const struct intel_display_funcs i9xx_display_funcs = { 8208 .get_pipe_config = i9xx_get_pipe_config, 8209 .crtc_enable = i9xx_crtc_enable, 8210 .crtc_disable = i9xx_crtc_disable, 8211 .commit_modeset_enables = intel_commit_modeset_enables, 8212 .get_initial_plane_config = i9xx_get_initial_plane_config, 8213 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8214 }; 8215 8216 /** 8217 * intel_init_display_hooks - initialize the display modesetting hooks 8218 * @dev_priv: device private 8219 */ 8220 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 8221 { 8222 if (DISPLAY_VER(dev_priv) >= 9) { 8223 dev_priv->display.funcs.display = &skl_display_funcs; 8224 } else if (HAS_DDI(dev_priv)) { 8225 dev_priv->display.funcs.display = &ddi_display_funcs; 8226 } else if (HAS_PCH_SPLIT(dev_priv)) { 8227 dev_priv->display.funcs.display = &pch_split_display_funcs; 8228 } else if (IS_CHERRYVIEW(dev_priv) || 8229 IS_VALLEYVIEW(dev_priv)) { 8230 dev_priv->display.funcs.display = &vlv_display_funcs; 8231 } else { 8232 dev_priv->display.funcs.display = &i9xx_display_funcs; 8233 } 8234 } 8235 8236 int intel_initial_commit(struct drm_device *dev) 8237 { 8238 struct drm_atomic_state *state = NULL; 8239 struct drm_modeset_acquire_ctx ctx; 8240 struct intel_crtc *crtc; 8241 int ret = 0; 8242 8243 state = drm_atomic_state_alloc(dev); 8244 if (!state) 8245 return -ENOMEM; 8246 8247 drm_modeset_acquire_init(&ctx, 0); 8248 8249 state->acquire_ctx = &ctx; 8250 to_intel_atomic_state(state)->internal = true; 8251 8252 retry: 8253 for_each_intel_crtc(dev, crtc) { 8254 struct intel_crtc_state *crtc_state = 8255 intel_atomic_get_crtc_state(state, crtc); 8256 8257 if (IS_ERR(crtc_state)) { 8258 ret = PTR_ERR(crtc_state); 8259 goto out; 8260 } 8261 8262 if (crtc_state->hw.active) { 8263 struct intel_encoder *encoder; 8264 8265 ret = drm_atomic_add_affected_planes(state, &crtc->base); 8266 if (ret) 8267 goto out; 8268 8269 /* 8270 * FIXME hack to force a LUT update to avoid the 8271 * plane update forcing the pipe gamma on without 8272 * having a proper LUT loaded. Remove once we 8273 * have readout for pipe gamma enable. 8274 */ 8275 crtc_state->uapi.color_mgmt_changed = true; 8276 8277 for_each_intel_encoder_mask(dev, encoder, 8278 crtc_state->uapi.encoder_mask) { 8279 if (encoder->initial_fastset_check && 8280 !encoder->initial_fastset_check(encoder, crtc_state)) { 8281 ret = drm_atomic_add_affected_connectors(state, 8282 &crtc->base); 8283 if (ret) 8284 goto out; 8285 } 8286 } 8287 } 8288 } 8289 8290 ret = drm_atomic_commit(state); 8291 8292 out: 8293 if (ret == -EDEADLK) { 8294 drm_atomic_state_clear(state); 8295 drm_modeset_backoff(&ctx); 8296 goto retry; 8297 } 8298 8299 drm_atomic_state_put(state); 8300 8301 drm_modeset_drop_locks(&ctx); 8302 drm_modeset_acquire_fini(&ctx); 8303 8304 return ret; 8305 } 8306 8307 void i830_enable_pipe(struct intel_display *display, enum pipe pipe) 8308 { 8309 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8310 enum transcoder cpu_transcoder = (enum transcoder)pipe; 8311 /* 640x480@60Hz, ~25175 kHz */ 8312 struct dpll clock = { 8313 .m1 = 18, 8314 .m2 = 7, 8315 .p1 = 13, 8316 .p2 = 4, 8317 .n = 2, 8318 }; 8319 u32 dpll, fp; 8320 int i; 8321 8322 drm_WARN_ON(display->drm, 8323 i9xx_calc_dpll_params(48000, &clock) != 25154); 8324 8325 drm_dbg_kms(display->drm, 8326 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 8327 pipe_name(pipe), clock.vco, clock.dot); 8328 8329 fp = i9xx_dpll_compute_fp(&clock); 8330 dpll = DPLL_DVO_2X_MODE | 8331 DPLL_VGA_MODE_DIS | 8332 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 8333 PLL_P2_DIVIDE_BY_4 | 8334 PLL_REF_INPUT_DREFCLK | 8335 DPLL_VCO_ENABLE; 8336 8337 intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder), 8338 HACTIVE(640 - 1) | HTOTAL(800 - 1)); 8339 intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder), 8340 HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); 8341 intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder), 8342 HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); 8343 intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), 8344 VACTIVE(480 - 1) | VTOTAL(525 - 1)); 8345 intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), 8346 VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); 8347 intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder), 8348 VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); 8349 intel_de_write(display, PIPESRC(display, pipe), 8350 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); 8351 8352 intel_de_write(display, FP0(pipe), fp); 8353 intel_de_write(display, FP1(pipe), fp); 8354 8355 /* 8356 * Apparently we need to have VGA mode enabled prior to changing 8357 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 8358 * dividers, even though the register value does change. 8359 */ 8360 intel_de_write(display, DPLL(display, pipe), 8361 dpll & ~DPLL_VGA_MODE_DIS); 8362 intel_de_write(display, DPLL(display, pipe), dpll); 8363 8364 /* Wait for the clocks to stabilize. */ 8365 intel_de_posting_read(display, DPLL(display, pipe)); 8366 udelay(150); 8367 8368 /* The pixel multiplier can only be updated once the 8369 * DPLL is enabled and the clocks are stable. 8370 * 8371 * So write it again. 8372 */ 8373 intel_de_write(display, DPLL(display, pipe), dpll); 8374 8375 /* We do this three times for luck */ 8376 for (i = 0; i < 3 ; i++) { 8377 intel_de_write(display, DPLL(display, pipe), dpll); 8378 intel_de_posting_read(display, DPLL(display, pipe)); 8379 udelay(150); /* wait for warmup */ 8380 } 8381 8382 intel_de_write(display, TRANSCONF(display, pipe), TRANSCONF_ENABLE); 8383 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8384 8385 intel_wait_for_pipe_scanline_moving(crtc); 8386 } 8387 8388 void i830_disable_pipe(struct intel_display *display, enum pipe pipe) 8389 { 8390 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8391 8392 drm_dbg_kms(display->drm, "disabling pipe %c due to force quirk\n", 8393 pipe_name(pipe)); 8394 8395 drm_WARN_ON(display->drm, 8396 intel_de_read(display, DSPCNTR(display, PLANE_A)) & DISP_ENABLE); 8397 drm_WARN_ON(display->drm, 8398 intel_de_read(display, DSPCNTR(display, PLANE_B)) & DISP_ENABLE); 8399 drm_WARN_ON(display->drm, 8400 intel_de_read(display, DSPCNTR(display, PLANE_C)) & DISP_ENABLE); 8401 drm_WARN_ON(display->drm, 8402 intel_de_read(display, CURCNTR(display, PIPE_A)) & MCURSOR_MODE_MASK); 8403 drm_WARN_ON(display->drm, 8404 intel_de_read(display, CURCNTR(display, PIPE_B)) & MCURSOR_MODE_MASK); 8405 8406 intel_de_write(display, TRANSCONF(display, pipe), 0); 8407 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8408 8409 intel_wait_for_pipe_scanline_stopped(crtc); 8410 8411 intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS); 8412 intel_de_posting_read(display, DPLL(display, pipe)); 8413 } 8414 8415 bool intel_scanout_needs_vtd_wa(struct intel_display *display) 8416 { 8417 struct drm_i915_private *i915 = to_i915(display->drm); 8418 8419 return IS_DISPLAY_VER(display, 6, 11) && i915_vtd_active(i915); 8420 } 8421