1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dma-resv.h> 28 #include <linux/i2c.h> 29 #include <linux/input.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 #include <linux/string_helpers.h> 34 35 #include <drm/display/drm_dp_helper.h> 36 #include <drm/display/drm_dp_tunnel.h> 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_atomic_uapi.h> 40 #include <drm/drm_damage_helper.h> 41 #include <drm/drm_edid.h> 42 #include <drm/drm_fixed.h> 43 #include <drm/drm_fourcc.h> 44 #include <drm/drm_probe_helper.h> 45 #include <drm/drm_rect.h> 46 #include <drm/drm_vblank.h> 47 48 #include "gem/i915_gem_lmem.h" 49 #include "gem/i915_gem_object.h" 50 51 #include "g4x_dp.h" 52 #include "g4x_hdmi.h" 53 #include "hsw_ips.h" 54 #include "i915_config.h" 55 #include "i915_drv.h" 56 #include "i915_reg.h" 57 #include "i915_utils.h" 58 #include "i9xx_plane.h" 59 #include "i9xx_plane_regs.h" 60 #include "i9xx_wm.h" 61 #include "intel_atomic.h" 62 #include "intel_atomic_plane.h" 63 #include "intel_audio.h" 64 #include "intel_bw.h" 65 #include "intel_cdclk.h" 66 #include "intel_clock_gating.h" 67 #include "intel_color.h" 68 #include "intel_crt.h" 69 #include "intel_crtc.h" 70 #include "intel_crtc_state_dump.h" 71 #include "intel_cursor_regs.h" 72 #include "intel_cx0_phy.h" 73 #include "intel_cursor.h" 74 #include "intel_ddi.h" 75 #include "intel_de.h" 76 #include "intel_display_driver.h" 77 #include "intel_display_power.h" 78 #include "intel_display_types.h" 79 #include "intel_dmc.h" 80 #include "intel_dp.h" 81 #include "intel_dp_link_training.h" 82 #include "intel_dp_mst.h" 83 #include "intel_dp_tunnel.h" 84 #include "intel_dpll.h" 85 #include "intel_dpll_mgr.h" 86 #include "intel_dpt.h" 87 #include "intel_dpt_common.h" 88 #include "intel_drrs.h" 89 #include "intel_dsb.h" 90 #include "intel_dsi.h" 91 #include "intel_dvo.h" 92 #include "intel_fb.h" 93 #include "intel_fbc.h" 94 #include "intel_fdi.h" 95 #include "intel_fifo_underrun.h" 96 #include "intel_frontbuffer.h" 97 #include "intel_hdmi.h" 98 #include "intel_hotplug.h" 99 #include "intel_link_bw.h" 100 #include "intel_lvds.h" 101 #include "intel_lvds_regs.h" 102 #include "intel_modeset_setup.h" 103 #include "intel_modeset_verify.h" 104 #include "intel_overlay.h" 105 #include "intel_panel.h" 106 #include "intel_pch_display.h" 107 #include "intel_pch_refclk.h" 108 #include "intel_pcode.h" 109 #include "intel_pipe_crc.h" 110 #include "intel_plane_initial.h" 111 #include "intel_pmdemand.h" 112 #include "intel_pps.h" 113 #include "intel_psr.h" 114 #include "intel_psr_regs.h" 115 #include "intel_sdvo.h" 116 #include "intel_snps_phy.h" 117 #include "intel_tc.h" 118 #include "intel_tdf.h" 119 #include "intel_tv.h" 120 #include "intel_vblank.h" 121 #include "intel_vdsc.h" 122 #include "intel_vdsc_regs.h" 123 #include "intel_vga.h" 124 #include "intel_vrr.h" 125 #include "intel_wm.h" 126 #include "skl_scaler.h" 127 #include "skl_universal_plane.h" 128 #include "skl_universal_plane_regs.h" 129 #include "skl_watermark.h" 130 #include "vlv_dpio_phy_regs.h" 131 #include "vlv_dsi.h" 132 #include "vlv_dsi_pll.h" 133 #include "vlv_dsi_regs.h" 134 #include "vlv_sideband.h" 135 136 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 137 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 138 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 139 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); 140 141 /* returns HPLL frequency in kHz */ 142 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 143 { 144 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 145 146 /* Obtain SKU information */ 147 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 148 CCK_FUSE_HPLL_FREQ_MASK; 149 150 return vco_freq[hpll_freq] * 1000; 151 } 152 153 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 154 const char *name, u32 reg, int ref_freq) 155 { 156 u32 val; 157 int divider; 158 159 val = vlv_cck_read(dev_priv, reg); 160 divider = val & CCK_FREQUENCY_VALUES; 161 162 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 163 (divider << CCK_FREQUENCY_STATUS_SHIFT), 164 "%s change in progress\n", name); 165 166 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 167 } 168 169 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 170 const char *name, u32 reg) 171 { 172 int hpll; 173 174 vlv_cck_get(dev_priv); 175 176 if (dev_priv->hpll_freq == 0) 177 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 178 179 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 180 181 vlv_cck_put(dev_priv); 182 183 return hpll; 184 } 185 186 void intel_update_czclk(struct drm_i915_private *dev_priv) 187 { 188 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 189 return; 190 191 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 192 CCK_CZ_CLOCK_CONTROL); 193 194 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 195 dev_priv->czclk_freq); 196 } 197 198 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 199 { 200 return (crtc_state->active_planes & 201 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 202 } 203 204 /* WA Display #0827: Gen9:all */ 205 static void 206 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 207 { 208 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 209 DUPS1_GATING_DIS | DUPS2_GATING_DIS, 210 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); 211 } 212 213 /* Wa_2006604312:icl,ehl */ 214 static void 215 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 216 bool enable) 217 { 218 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 219 DPFR_GATING_DIS, 220 enable ? DPFR_GATING_DIS : 0); 221 } 222 223 /* Wa_1604331009:icl,jsl,ehl */ 224 static void 225 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 226 bool enable) 227 { 228 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 229 CURSOR_GATING_DIS, 230 enable ? CURSOR_GATING_DIS : 0); 231 } 232 233 static bool 234 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 235 { 236 return crtc_state->master_transcoder != INVALID_TRANSCODER; 237 } 238 239 bool 240 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 241 { 242 return crtc_state->sync_mode_slaves_mask != 0; 243 } 244 245 bool 246 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 247 { 248 return is_trans_port_sync_master(crtc_state) || 249 is_trans_port_sync_slave(crtc_state); 250 } 251 252 static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state) 253 { 254 return ffs(crtc_state->joiner_pipes) - 1; 255 } 256 257 /* 258 * The following helper functions, despite being named for bigjoiner, 259 * are applicable to both bigjoiner and uncompressed joiner configurations. 260 */ 261 static bool is_bigjoiner(const struct intel_crtc_state *crtc_state) 262 { 263 return hweight8(crtc_state->joiner_pipes) >= 2; 264 } 265 266 static u8 bigjoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 267 { 268 if (!is_bigjoiner(crtc_state)) 269 return 0; 270 271 return crtc_state->joiner_pipes & (0b01010101 << joiner_primary_pipe(crtc_state)); 272 } 273 274 static unsigned int bigjoiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 275 { 276 if (!is_bigjoiner(crtc_state)) 277 return 0; 278 279 return crtc_state->joiner_pipes & (0b10101010 << joiner_primary_pipe(crtc_state)); 280 } 281 282 bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state) 283 { 284 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 285 286 if (!is_bigjoiner(crtc_state)) 287 return false; 288 289 return BIT(crtc->pipe) & bigjoiner_primary_pipes(crtc_state); 290 } 291 292 bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state) 293 { 294 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 295 296 if (!is_bigjoiner(crtc_state)) 297 return false; 298 299 return BIT(crtc->pipe) & bigjoiner_secondary_pipes(crtc_state); 300 } 301 302 u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state) 303 { 304 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 305 306 if (!is_bigjoiner(crtc_state)) 307 return BIT(crtc->pipe); 308 309 return bigjoiner_primary_pipes(crtc_state); 310 } 311 312 u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state) 313 { 314 return bigjoiner_secondary_pipes(crtc_state); 315 } 316 317 u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 318 { 319 if (crtc_state->joiner_pipes) 320 return crtc_state->joiner_pipes & ~BIT(joiner_primary_pipe(crtc_state)); 321 else 322 return 0; 323 } 324 325 bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state) 326 { 327 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 328 329 return crtc_state->joiner_pipes && 330 crtc->pipe != joiner_primary_pipe(crtc_state); 331 } 332 333 bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state) 334 { 335 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 336 337 return crtc_state->joiner_pipes && 338 crtc->pipe == joiner_primary_pipe(crtc_state); 339 } 340 341 int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state) 342 { 343 return hweight8(intel_crtc_joined_pipe_mask(crtc_state)); 344 } 345 346 u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) 347 { 348 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 349 350 return BIT(crtc->pipe) | crtc_state->joiner_pipes; 351 } 352 353 struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state) 354 { 355 struct intel_display *display = to_intel_display(crtc_state); 356 357 if (intel_crtc_is_joiner_secondary(crtc_state)) 358 return intel_crtc_for_pipe(display, joiner_primary_pipe(crtc_state)); 359 else 360 return to_intel_crtc(crtc_state->uapi.crtc); 361 } 362 363 static void 364 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 365 { 366 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 367 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 368 369 if (DISPLAY_VER(dev_priv) >= 4) { 370 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 371 372 /* Wait for the Pipe State to go off */ 373 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), 374 TRANSCONF_STATE_ENABLE, 100)) 375 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); 376 } else { 377 intel_wait_for_pipe_scanline_stopped(crtc); 378 } 379 } 380 381 void assert_transcoder(struct drm_i915_private *dev_priv, 382 enum transcoder cpu_transcoder, bool state) 383 { 384 bool cur_state; 385 enum intel_display_power_domain power_domain; 386 intel_wakeref_t wakeref; 387 388 /* we keep both pipes enabled on 830 */ 389 if (IS_I830(dev_priv)) 390 state = true; 391 392 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 393 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 394 if (wakeref) { 395 u32 val = intel_de_read(dev_priv, 396 TRANSCONF(dev_priv, cpu_transcoder)); 397 cur_state = !!(val & TRANSCONF_ENABLE); 398 399 intel_display_power_put(dev_priv, power_domain, wakeref); 400 } else { 401 cur_state = false; 402 } 403 404 I915_STATE_WARN(dev_priv, cur_state != state, 405 "transcoder %s assertion failure (expected %s, current %s)\n", 406 transcoder_name(cpu_transcoder), str_on_off(state), 407 str_on_off(cur_state)); 408 } 409 410 static void assert_plane(struct intel_plane *plane, bool state) 411 { 412 struct drm_i915_private *i915 = to_i915(plane->base.dev); 413 enum pipe pipe; 414 bool cur_state; 415 416 cur_state = plane->get_hw_state(plane, &pipe); 417 418 I915_STATE_WARN(i915, cur_state != state, 419 "%s assertion failure (expected %s, current %s)\n", 420 plane->base.name, str_on_off(state), 421 str_on_off(cur_state)); 422 } 423 424 #define assert_plane_enabled(p) assert_plane(p, true) 425 #define assert_plane_disabled(p) assert_plane(p, false) 426 427 static void assert_planes_disabled(struct intel_crtc *crtc) 428 { 429 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 430 struct intel_plane *plane; 431 432 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 433 assert_plane_disabled(plane); 434 } 435 436 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 437 struct intel_digital_port *dig_port, 438 unsigned int expected_mask) 439 { 440 u32 port_mask; 441 i915_reg_t dpll_reg; 442 443 switch (dig_port->base.port) { 444 default: 445 MISSING_CASE(dig_port->base.port); 446 fallthrough; 447 case PORT_B: 448 port_mask = DPLL_PORTB_READY_MASK; 449 dpll_reg = DPLL(dev_priv, 0); 450 break; 451 case PORT_C: 452 port_mask = DPLL_PORTC_READY_MASK; 453 dpll_reg = DPLL(dev_priv, 0); 454 expected_mask <<= 4; 455 break; 456 case PORT_D: 457 port_mask = DPLL_PORTD_READY_MASK; 458 dpll_reg = DPIO_PHY_STATUS; 459 break; 460 } 461 462 if (intel_de_wait(dev_priv, dpll_reg, port_mask, expected_mask, 1000)) 463 drm_WARN(&dev_priv->drm, 1, 464 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 465 dig_port->base.base.base.id, dig_port->base.base.name, 466 intel_de_read(dev_priv, dpll_reg) & port_mask, 467 expected_mask); 468 } 469 470 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 471 { 472 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 473 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 474 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 475 enum pipe pipe = crtc->pipe; 476 u32 val; 477 478 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 479 480 assert_planes_disabled(crtc); 481 482 /* 483 * A pipe without a PLL won't actually be able to drive bits from 484 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 485 * need the check. 486 */ 487 if (HAS_GMCH(dev_priv)) { 488 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 489 assert_dsi_pll_enabled(dev_priv); 490 else 491 assert_pll_enabled(dev_priv, pipe); 492 } else { 493 if (new_crtc_state->has_pch_encoder) { 494 /* if driving the PCH, we need FDI enabled */ 495 assert_fdi_rx_pll_enabled(dev_priv, 496 intel_crtc_pch_transcoder(crtc)); 497 assert_fdi_tx_pll_enabled(dev_priv, 498 (enum pipe) cpu_transcoder); 499 } 500 /* FIXME: assert CPU port conditions for SNB+ */ 501 } 502 503 /* Wa_22012358565:adl-p */ 504 if (DISPLAY_VER(dev_priv) == 13) 505 intel_de_rmw(dev_priv, PIPE_ARB_CTL(dev_priv, pipe), 506 0, PIPE_ARB_USE_PROG_SLOTS); 507 508 if (DISPLAY_VER(dev_priv) >= 14) { 509 u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA; 510 u32 set = 0; 511 512 if (DISPLAY_VER(dev_priv) == 14) 513 set |= DP_FEC_BS_JITTER_WA; 514 515 intel_de_rmw(dev_priv, 516 hsw_chicken_trans_reg(dev_priv, cpu_transcoder), 517 clear, set); 518 } 519 520 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 521 if (val & TRANSCONF_ENABLE) { 522 /* we keep both pipes enabled on 830 */ 523 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 524 return; 525 } 526 527 /* Wa_1409098942:adlp+ */ 528 if (DISPLAY_VER(dev_priv) >= 13 && 529 new_crtc_state->dsc.compression_enable) { 530 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 531 val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK, 532 TRANSCONF_PIXEL_COUNT_SCALING_X4); 533 } 534 535 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), 536 val | TRANSCONF_ENABLE); 537 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 538 539 /* 540 * Until the pipe starts PIPEDSL reads will return a stale value, 541 * which causes an apparent vblank timestamp jump when PIPEDSL 542 * resets to its proper value. That also messes up the frame count 543 * when it's derived from the timestamps. So let's wait for the 544 * pipe to start properly before we call drm_crtc_vblank_on() 545 */ 546 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 547 intel_wait_for_pipe_scanline_moving(crtc); 548 } 549 550 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 551 { 552 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 553 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 554 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 555 enum pipe pipe = crtc->pipe; 556 u32 val; 557 558 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 559 560 /* 561 * Make sure planes won't keep trying to pump pixels to us, 562 * or we might hang the display. 563 */ 564 assert_planes_disabled(crtc); 565 566 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 567 if ((val & TRANSCONF_ENABLE) == 0) 568 return; 569 570 /* 571 * Double wide has implications for planes 572 * so best keep it disabled when not needed. 573 */ 574 if (old_crtc_state->double_wide) 575 val &= ~TRANSCONF_DOUBLE_WIDE; 576 577 /* Don't disable pipe or pipe PLLs if needed */ 578 if (!IS_I830(dev_priv)) 579 val &= ~TRANSCONF_ENABLE; 580 581 /* Wa_1409098942:adlp+ */ 582 if (DISPLAY_VER(dev_priv) >= 13 && 583 old_crtc_state->dsc.compression_enable) 584 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 585 586 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 587 588 if (DISPLAY_VER(dev_priv) >= 12) 589 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), 590 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 591 592 if ((val & TRANSCONF_ENABLE) == 0) 593 intel_wait_for_pipe_off(old_crtc_state); 594 } 595 596 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 597 { 598 unsigned int size = 0; 599 int i; 600 601 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 602 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; 603 604 return size; 605 } 606 607 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 608 { 609 unsigned int size = 0; 610 int i; 611 612 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 613 unsigned int plane_size; 614 615 if (rem_info->plane[i].linear) 616 plane_size = rem_info->plane[i].size; 617 else 618 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; 619 620 if (plane_size == 0) 621 continue; 622 623 if (rem_info->plane_alignment) 624 size = ALIGN(size, rem_info->plane_alignment); 625 626 size += plane_size; 627 } 628 629 return size; 630 } 631 632 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 633 { 634 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 635 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 636 637 return DISPLAY_VER(dev_priv) < 4 || 638 (plane->fbc && !plane_state->no_fbc_reason && 639 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); 640 } 641 642 /* 643 * Convert the x/y offsets into a linear offset. 644 * Only valid with 0/180 degree rotation, which is fine since linear 645 * offset is only used with linear buffers on pre-hsw and tiled buffers 646 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 647 */ 648 u32 intel_fb_xy_to_linear(int x, int y, 649 const struct intel_plane_state *state, 650 int color_plane) 651 { 652 const struct drm_framebuffer *fb = state->hw.fb; 653 unsigned int cpp = fb->format->cpp[color_plane]; 654 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; 655 656 return y * pitch + x * cpp; 657 } 658 659 /* 660 * Add the x/y offsets derived from fb->offsets[] to the user 661 * specified plane src x/y offsets. The resulting x/y offsets 662 * specify the start of scanout from the beginning of the gtt mapping. 663 */ 664 void intel_add_fb_offsets(int *x, int *y, 665 const struct intel_plane_state *state, 666 int color_plane) 667 668 { 669 *x += state->view.color_plane[color_plane].x; 670 *y += state->view.color_plane[color_plane].y; 671 } 672 673 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 674 u32 pixel_format, u64 modifier) 675 { 676 struct intel_crtc *crtc; 677 struct intel_plane *plane; 678 679 if (!HAS_DISPLAY(dev_priv)) 680 return 0; 681 682 /* 683 * We assume the primary plane for pipe A has 684 * the highest stride limits of them all, 685 * if in case pipe A is disabled, use the first pipe from pipe_mask. 686 */ 687 crtc = intel_first_crtc(dev_priv); 688 if (!crtc) 689 return 0; 690 691 plane = to_intel_plane(crtc->base.primary); 692 693 return plane->max_stride(plane, pixel_format, modifier, 694 DRM_MODE_ROTATE_0); 695 } 696 697 void intel_set_plane_visible(struct intel_crtc_state *crtc_state, 698 struct intel_plane_state *plane_state, 699 bool visible) 700 { 701 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 702 703 plane_state->uapi.visible = visible; 704 705 if (visible) 706 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 707 else 708 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 709 } 710 711 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) 712 { 713 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 714 struct drm_plane *plane; 715 716 /* 717 * Active_planes aliases if multiple "primary" or cursor planes 718 * have been used on the same (or wrong) pipe. plane_mask uses 719 * unique ids, hence we can use that to reconstruct active_planes. 720 */ 721 crtc_state->enabled_planes = 0; 722 crtc_state->active_planes = 0; 723 724 drm_for_each_plane_mask(plane, &dev_priv->drm, 725 crtc_state->uapi.plane_mask) { 726 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 727 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 728 } 729 } 730 731 void intel_plane_disable_noatomic(struct intel_crtc *crtc, 732 struct intel_plane *plane) 733 { 734 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 735 struct intel_crtc_state *crtc_state = 736 to_intel_crtc_state(crtc->base.state); 737 struct intel_plane_state *plane_state = 738 to_intel_plane_state(plane->base.state); 739 740 drm_dbg_kms(&dev_priv->drm, 741 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 742 plane->base.base.id, plane->base.name, 743 crtc->base.base.id, crtc->base.name); 744 745 intel_set_plane_visible(crtc_state, plane_state, false); 746 intel_plane_fixup_bitmasks(crtc_state); 747 crtc_state->data_rate[plane->id] = 0; 748 crtc_state->data_rate_y[plane->id] = 0; 749 crtc_state->rel_data_rate[plane->id] = 0; 750 crtc_state->rel_data_rate_y[plane->id] = 0; 751 crtc_state->min_cdclk[plane->id] = 0; 752 753 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 754 hsw_ips_disable(crtc_state)) { 755 crtc_state->ips_enabled = false; 756 intel_crtc_wait_for_next_vblank(crtc); 757 } 758 759 /* 760 * Vblank time updates from the shadow to live plane control register 761 * are blocked if the memory self-refresh mode is active at that 762 * moment. So to make sure the plane gets truly disabled, disable 763 * first the self-refresh mode. The self-refresh enable bit in turn 764 * will be checked/applied by the HW only at the next frame start 765 * event which is after the vblank start event, so we need to have a 766 * wait-for-vblank between disabling the plane and the pipe. 767 */ 768 if (HAS_GMCH(dev_priv) && 769 intel_set_memory_cxsr(dev_priv, false)) 770 intel_crtc_wait_for_next_vblank(crtc); 771 772 /* 773 * Gen2 reports pipe underruns whenever all planes are disabled. 774 * So disable underrun reporting before all the planes get disabled. 775 */ 776 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) 777 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 778 779 intel_plane_disable_arm(plane, crtc_state); 780 intel_crtc_wait_for_next_vblank(crtc); 781 } 782 783 unsigned int 784 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 785 { 786 int x = 0, y = 0; 787 788 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 789 plane_state->view.color_plane[0].offset, 0); 790 791 return y; 792 } 793 794 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 795 { 796 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 797 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 798 enum pipe pipe = crtc->pipe; 799 u32 tmp; 800 801 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 802 803 /* 804 * Display WA #1153: icl 805 * enable hardware to bypass the alpha math 806 * and rounding for per-pixel values 00 and 0xff 807 */ 808 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 809 /* 810 * Display WA # 1605353570: icl 811 * Set the pixel rounding bit to 1 for allowing 812 * passthrough of Frame buffer pixels unmodified 813 * across pipe 814 */ 815 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 816 817 /* 818 * Underrun recovery must always be disabled on display 13+. 819 * DG2 chicken bit meaning is inverted compared to other platforms. 820 */ 821 if (IS_DG2(dev_priv)) 822 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 823 else if (DISPLAY_VER(dev_priv) >= 13) 824 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 825 826 /* Wa_14010547955:dg2 */ 827 if (IS_DG2(dev_priv)) 828 tmp |= DG2_RENDER_CCSTAG_4_3_EN; 829 830 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 831 } 832 833 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 834 { 835 struct drm_crtc *crtc; 836 bool cleanup_done; 837 838 drm_for_each_crtc(crtc, &dev_priv->drm) { 839 struct drm_crtc_commit *commit; 840 spin_lock(&crtc->commit_lock); 841 commit = list_first_entry_or_null(&crtc->commit_list, 842 struct drm_crtc_commit, commit_entry); 843 cleanup_done = commit ? 844 try_wait_for_completion(&commit->cleanup_done) : true; 845 spin_unlock(&crtc->commit_lock); 846 847 if (cleanup_done) 848 continue; 849 850 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 851 852 return true; 853 } 854 855 return false; 856 } 857 858 /* 859 * Finds the encoder associated with the given CRTC. This can only be 860 * used when we know that the CRTC isn't feeding multiple encoders! 861 */ 862 struct intel_encoder * 863 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 864 const struct intel_crtc_state *crtc_state) 865 { 866 const struct drm_connector_state *connector_state; 867 const struct drm_connector *connector; 868 struct intel_encoder *encoder = NULL; 869 struct intel_crtc *primary_crtc; 870 int num_encoders = 0; 871 int i; 872 873 primary_crtc = intel_primary_crtc(crtc_state); 874 875 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 876 if (connector_state->crtc != &primary_crtc->base) 877 continue; 878 879 encoder = to_intel_encoder(connector_state->best_encoder); 880 num_encoders++; 881 } 882 883 drm_WARN(state->base.dev, num_encoders != 1, 884 "%d encoders for pipe %c\n", 885 num_encoders, pipe_name(primary_crtc->pipe)); 886 887 return encoder; 888 } 889 890 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 891 { 892 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 893 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 894 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 895 enum pipe pipe = crtc->pipe; 896 int width = drm_rect_width(dst); 897 int height = drm_rect_height(dst); 898 int x = dst->x1; 899 int y = dst->y1; 900 901 if (!crtc_state->pch_pfit.enabled) 902 return; 903 904 /* Force use of hard-coded filter coefficients 905 * as some pre-programmed values are broken, 906 * e.g. x201. 907 */ 908 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 909 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 910 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 911 else 912 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 913 PF_FILTER_MED_3x3); 914 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 915 PF_WIN_XPOS(x) | PF_WIN_YPOS(y)); 916 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 917 PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height)); 918 } 919 920 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 921 { 922 if (crtc->overlay) 923 (void) intel_overlay_switch_off(crtc->overlay); 924 925 /* Let userspace switch the overlay on again. In most cases userspace 926 * has to recompute where to put it anyway. 927 */ 928 } 929 930 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 931 { 932 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 933 934 if (!crtc_state->nv12_planes) 935 return false; 936 937 /* WA Display #0827: Gen9:all */ 938 if (DISPLAY_VER(dev_priv) == 9) 939 return true; 940 941 return false; 942 } 943 944 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 945 { 946 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 947 948 /* Wa_2006604312:icl,ehl */ 949 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) 950 return true; 951 952 return false; 953 } 954 955 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 956 { 957 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 958 959 /* Wa_1604331009:icl,jsl,ehl */ 960 if (is_hdr_mode(crtc_state) && 961 crtc_state->active_planes & BIT(PLANE_CURSOR) && 962 DISPLAY_VER(dev_priv) == 11) 963 return true; 964 965 return false; 966 } 967 968 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, 969 enum pipe pipe, bool enable) 970 { 971 if (DISPLAY_VER(i915) == 9) { 972 /* 973 * "Plane N strech max must be programmed to 11b (x1) 974 * when Async flips are enabled on that plane." 975 */ 976 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 977 SKL_PLANE1_STRETCH_MAX_MASK, 978 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 979 } else { 980 /* Also needed on HSW/BDW albeit undocumented */ 981 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 982 HSW_PRI_STRETCH_MAX_MASK, 983 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 984 } 985 } 986 987 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 988 { 989 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 990 991 return crtc_state->uapi.async_flip && i915_vtd_active(i915) && 992 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); 993 } 994 995 static void intel_encoders_audio_enable(struct intel_atomic_state *state, 996 struct intel_crtc *crtc) 997 { 998 const struct intel_crtc_state *crtc_state = 999 intel_atomic_get_new_crtc_state(state, crtc); 1000 const struct drm_connector_state *conn_state; 1001 struct drm_connector *conn; 1002 int i; 1003 1004 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1005 struct intel_encoder *encoder = 1006 to_intel_encoder(conn_state->best_encoder); 1007 1008 if (conn_state->crtc != &crtc->base) 1009 continue; 1010 1011 if (encoder->audio_enable) 1012 encoder->audio_enable(encoder, crtc_state, conn_state); 1013 } 1014 } 1015 1016 static void intel_encoders_audio_disable(struct intel_atomic_state *state, 1017 struct intel_crtc *crtc) 1018 { 1019 const struct intel_crtc_state *old_crtc_state = 1020 intel_atomic_get_old_crtc_state(state, crtc); 1021 const struct drm_connector_state *old_conn_state; 1022 struct drm_connector *conn; 1023 int i; 1024 1025 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1026 struct intel_encoder *encoder = 1027 to_intel_encoder(old_conn_state->best_encoder); 1028 1029 if (old_conn_state->crtc != &crtc->base) 1030 continue; 1031 1032 if (encoder->audio_disable) 1033 encoder->audio_disable(encoder, old_crtc_state, old_conn_state); 1034 } 1035 } 1036 1037 #define is_enabling(feature, old_crtc_state, new_crtc_state) \ 1038 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ 1039 (new_crtc_state)->feature) 1040 #define is_disabling(feature, old_crtc_state, new_crtc_state) \ 1041 ((old_crtc_state)->feature && \ 1042 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) 1043 1044 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 1045 const struct intel_crtc_state *new_crtc_state) 1046 { 1047 if (!new_crtc_state->hw.active) 1048 return false; 1049 1050 return is_enabling(active_planes, old_crtc_state, new_crtc_state); 1051 } 1052 1053 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 1054 const struct intel_crtc_state *new_crtc_state) 1055 { 1056 if (!old_crtc_state->hw.active) 1057 return false; 1058 1059 return is_disabling(active_planes, old_crtc_state, new_crtc_state); 1060 } 1061 1062 static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, 1063 const struct intel_crtc_state *new_crtc_state) 1064 { 1065 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline || 1066 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || 1067 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || 1068 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || 1069 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full; 1070 } 1071 1072 static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state, 1073 const struct intel_crtc_state *new_crtc_state) 1074 { 1075 return old_crtc_state->cmrr.cmrr_m != new_crtc_state->cmrr.cmrr_m || 1076 old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n; 1077 } 1078 1079 static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state, 1080 struct intel_crtc *crtc) 1081 { 1082 const struct intel_crtc_state *old_crtc_state = 1083 intel_atomic_get_old_crtc_state(state, crtc); 1084 const struct intel_crtc_state *new_crtc_state = 1085 intel_atomic_get_new_crtc_state(state, crtc); 1086 1087 if (!new_crtc_state->hw.active) 1088 return false; 1089 1090 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) || 1091 (new_crtc_state->vrr.enable && 1092 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 1093 vrr_params_changed(old_crtc_state, new_crtc_state))); 1094 } 1095 1096 bool intel_crtc_vrr_disabling(struct intel_atomic_state *state, 1097 struct intel_crtc *crtc) 1098 { 1099 const struct intel_crtc_state *old_crtc_state = 1100 intel_atomic_get_old_crtc_state(state, crtc); 1101 const struct intel_crtc_state *new_crtc_state = 1102 intel_atomic_get_new_crtc_state(state, crtc); 1103 1104 if (!old_crtc_state->hw.active) 1105 return false; 1106 1107 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) || 1108 (old_crtc_state->vrr.enable && 1109 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 1110 vrr_params_changed(old_crtc_state, new_crtc_state))); 1111 } 1112 1113 static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, 1114 const struct intel_crtc_state *new_crtc_state) 1115 { 1116 if (!new_crtc_state->hw.active) 1117 return false; 1118 1119 return is_enabling(has_audio, old_crtc_state, new_crtc_state) || 1120 (new_crtc_state->has_audio && 1121 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1122 } 1123 1124 static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, 1125 const struct intel_crtc_state *new_crtc_state) 1126 { 1127 if (!old_crtc_state->hw.active) 1128 return false; 1129 1130 return is_disabling(has_audio, old_crtc_state, new_crtc_state) || 1131 (old_crtc_state->has_audio && 1132 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1133 } 1134 1135 #undef is_disabling 1136 #undef is_enabling 1137 1138 static void intel_post_plane_update(struct intel_atomic_state *state, 1139 struct intel_crtc *crtc) 1140 { 1141 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1142 const struct intel_crtc_state *old_crtc_state = 1143 intel_atomic_get_old_crtc_state(state, crtc); 1144 const struct intel_crtc_state *new_crtc_state = 1145 intel_atomic_get_new_crtc_state(state, crtc); 1146 enum pipe pipe = crtc->pipe; 1147 1148 intel_psr_post_plane_update(state, crtc); 1149 1150 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 1151 1152 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 1153 intel_update_watermarks(dev_priv); 1154 1155 intel_fbc_post_update(state, crtc); 1156 1157 if (needs_async_flip_vtd_wa(old_crtc_state) && 1158 !needs_async_flip_vtd_wa(new_crtc_state)) 1159 intel_async_flip_vtd_wa(dev_priv, pipe, false); 1160 1161 if (needs_nv12_wa(old_crtc_state) && 1162 !needs_nv12_wa(new_crtc_state)) 1163 skl_wa_827(dev_priv, pipe, false); 1164 1165 if (needs_scalerclk_wa(old_crtc_state) && 1166 !needs_scalerclk_wa(new_crtc_state)) 1167 icl_wa_scalerclkgating(dev_priv, pipe, false); 1168 1169 if (needs_cursorclk_wa(old_crtc_state) && 1170 !needs_cursorclk_wa(new_crtc_state)) 1171 icl_wa_cursorclkgating(dev_priv, pipe, false); 1172 1173 if (intel_crtc_needs_color_update(new_crtc_state)) 1174 intel_color_post_update(new_crtc_state); 1175 1176 if (audio_enabling(old_crtc_state, new_crtc_state)) 1177 intel_encoders_audio_enable(state, crtc); 1178 } 1179 1180 static void intel_post_plane_update_after_readout(struct intel_atomic_state *state, 1181 struct intel_crtc *crtc) 1182 { 1183 const struct intel_crtc_state *new_crtc_state = 1184 intel_atomic_get_new_crtc_state(state, crtc); 1185 1186 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ 1187 hsw_ips_post_update(state, crtc); 1188 1189 /* 1190 * Activate DRRS after state readout to avoid 1191 * dp_m_n vs. dp_m2_n2 confusion on BDW+. 1192 */ 1193 intel_drrs_activate(new_crtc_state); 1194 } 1195 1196 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 1197 struct intel_crtc *crtc) 1198 { 1199 const struct intel_crtc_state *crtc_state = 1200 intel_atomic_get_new_crtc_state(state, crtc); 1201 u8 update_planes = crtc_state->update_planes; 1202 const struct intel_plane_state __maybe_unused *plane_state; 1203 struct intel_plane *plane; 1204 int i; 1205 1206 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1207 if (plane->pipe == crtc->pipe && 1208 update_planes & BIT(plane->id)) 1209 plane->enable_flip_done(plane); 1210 } 1211 } 1212 1213 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 1214 struct intel_crtc *crtc) 1215 { 1216 const struct intel_crtc_state *crtc_state = 1217 intel_atomic_get_new_crtc_state(state, crtc); 1218 u8 update_planes = crtc_state->update_planes; 1219 const struct intel_plane_state __maybe_unused *plane_state; 1220 struct intel_plane *plane; 1221 int i; 1222 1223 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1224 if (plane->pipe == crtc->pipe && 1225 update_planes & BIT(plane->id)) 1226 plane->disable_flip_done(plane); 1227 } 1228 } 1229 1230 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 1231 struct intel_crtc *crtc) 1232 { 1233 const struct intel_crtc_state *old_crtc_state = 1234 intel_atomic_get_old_crtc_state(state, crtc); 1235 const struct intel_crtc_state *new_crtc_state = 1236 intel_atomic_get_new_crtc_state(state, crtc); 1237 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & 1238 ~new_crtc_state->async_flip_planes; 1239 const struct intel_plane_state *old_plane_state; 1240 struct intel_plane *plane; 1241 bool need_vbl_wait = false; 1242 int i; 1243 1244 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1245 if (plane->need_async_flip_toggle_wa && 1246 plane->pipe == crtc->pipe && 1247 disable_async_flip_planes & BIT(plane->id)) { 1248 /* 1249 * Apart from the async flip bit we want to 1250 * preserve the old state for the plane. 1251 */ 1252 intel_plane_async_flip(plane, old_crtc_state, 1253 old_plane_state, false); 1254 need_vbl_wait = true; 1255 } 1256 } 1257 1258 if (need_vbl_wait) 1259 intel_crtc_wait_for_next_vblank(crtc); 1260 } 1261 1262 static void intel_pre_plane_update(struct intel_atomic_state *state, 1263 struct intel_crtc *crtc) 1264 { 1265 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1266 const struct intel_crtc_state *old_crtc_state = 1267 intel_atomic_get_old_crtc_state(state, crtc); 1268 const struct intel_crtc_state *new_crtc_state = 1269 intel_atomic_get_new_crtc_state(state, crtc); 1270 enum pipe pipe = crtc->pipe; 1271 1272 if (intel_crtc_vrr_disabling(state, crtc)) { 1273 intel_vrr_disable(old_crtc_state); 1274 intel_crtc_update_active_timings(old_crtc_state, false); 1275 } 1276 1277 if (audio_disabling(old_crtc_state, new_crtc_state)) 1278 intel_encoders_audio_disable(state, crtc); 1279 1280 intel_drrs_deactivate(old_crtc_state); 1281 1282 intel_psr_pre_plane_update(state, crtc); 1283 1284 if (hsw_ips_pre_update(state, crtc)) 1285 intel_crtc_wait_for_next_vblank(crtc); 1286 1287 if (intel_fbc_pre_update(state, crtc)) 1288 intel_crtc_wait_for_next_vblank(crtc); 1289 1290 if (!needs_async_flip_vtd_wa(old_crtc_state) && 1291 needs_async_flip_vtd_wa(new_crtc_state)) 1292 intel_async_flip_vtd_wa(dev_priv, pipe, true); 1293 1294 /* Display WA 827 */ 1295 if (!needs_nv12_wa(old_crtc_state) && 1296 needs_nv12_wa(new_crtc_state)) 1297 skl_wa_827(dev_priv, pipe, true); 1298 1299 /* Wa_2006604312:icl,ehl */ 1300 if (!needs_scalerclk_wa(old_crtc_state) && 1301 needs_scalerclk_wa(new_crtc_state)) 1302 icl_wa_scalerclkgating(dev_priv, pipe, true); 1303 1304 /* Wa_1604331009:icl,jsl,ehl */ 1305 if (!needs_cursorclk_wa(old_crtc_state) && 1306 needs_cursorclk_wa(new_crtc_state)) 1307 icl_wa_cursorclkgating(dev_priv, pipe, true); 1308 1309 /* 1310 * Vblank time updates from the shadow to live plane control register 1311 * are blocked if the memory self-refresh mode is active at that 1312 * moment. So to make sure the plane gets truly disabled, disable 1313 * first the self-refresh mode. The self-refresh enable bit in turn 1314 * will be checked/applied by the HW only at the next frame start 1315 * event which is after the vblank start event, so we need to have a 1316 * wait-for-vblank between disabling the plane and the pipe. 1317 */ 1318 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1319 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 1320 intel_crtc_wait_for_next_vblank(crtc); 1321 1322 /* 1323 * IVB workaround: must disable low power watermarks for at least 1324 * one frame before enabling scaling. LP watermarks can be re-enabled 1325 * when scaling is disabled. 1326 * 1327 * WaCxSRDisabledForSpriteScaling:ivb 1328 */ 1329 if (old_crtc_state->hw.active && 1330 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 1331 intel_crtc_wait_for_next_vblank(crtc); 1332 1333 /* 1334 * If we're doing a modeset we don't need to do any 1335 * pre-vblank watermark programming here. 1336 */ 1337 if (!intel_crtc_needs_modeset(new_crtc_state)) { 1338 /* 1339 * For platforms that support atomic watermarks, program the 1340 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1341 * will be the intermediate values that are safe for both pre- and 1342 * post- vblank; when vblank happens, the 'active' values will be set 1343 * to the final 'target' values and we'll do this again to get the 1344 * optimal watermarks. For gen9+ platforms, the values we program here 1345 * will be the final target values which will get automatically latched 1346 * at vblank time; no further programming will be necessary. 1347 * 1348 * If a platform hasn't been transitioned to atomic watermarks yet, 1349 * we'll continue to update watermarks the old way, if flags tell 1350 * us to. 1351 */ 1352 if (!intel_initial_watermarks(state, crtc)) 1353 if (new_crtc_state->update_wm_pre) 1354 intel_update_watermarks(dev_priv); 1355 } 1356 1357 /* 1358 * Gen2 reports pipe underruns whenever all planes are disabled. 1359 * So disable underrun reporting before all the planes get disabled. 1360 * 1361 * We do this after .initial_watermarks() so that we have a 1362 * chance of catching underruns with the intermediate watermarks 1363 * vs. the old plane configuration. 1364 */ 1365 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1366 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1367 1368 /* 1369 * WA for platforms where async address update enable bit 1370 * is double buffered and only latched at start of vblank. 1371 */ 1372 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) 1373 intel_crtc_async_flip_disable_wa(state, crtc); 1374 } 1375 1376 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1377 struct intel_crtc *crtc) 1378 { 1379 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1380 const struct intel_crtc_state *new_crtc_state = 1381 intel_atomic_get_new_crtc_state(state, crtc); 1382 unsigned int update_mask = new_crtc_state->update_planes; 1383 const struct intel_plane_state *old_plane_state; 1384 struct intel_plane *plane; 1385 unsigned fb_bits = 0; 1386 int i; 1387 1388 intel_crtc_dpms_overlay_disable(crtc); 1389 1390 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1391 if (crtc->pipe != plane->pipe || 1392 !(update_mask & BIT(plane->id))) 1393 continue; 1394 1395 intel_plane_disable_arm(plane, new_crtc_state); 1396 1397 if (old_plane_state->uapi.visible) 1398 fb_bits |= plane->frontbuffer_bit; 1399 } 1400 1401 intel_frontbuffer_flip(dev_priv, fb_bits); 1402 } 1403 1404 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1405 { 1406 struct drm_i915_private *i915 = to_i915(state->base.dev); 1407 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1408 struct intel_crtc *crtc; 1409 int i; 1410 1411 /* 1412 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 1413 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 1414 */ 1415 if (i915->display.dpll.mgr) { 1416 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1417 if (intel_crtc_needs_modeset(new_crtc_state)) 1418 continue; 1419 1420 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 1421 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1422 } 1423 } 1424 } 1425 1426 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1427 struct intel_crtc *crtc) 1428 { 1429 const struct intel_crtc_state *crtc_state = 1430 intel_atomic_get_new_crtc_state(state, crtc); 1431 const struct drm_connector_state *conn_state; 1432 struct drm_connector *conn; 1433 int i; 1434 1435 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1436 struct intel_encoder *encoder = 1437 to_intel_encoder(conn_state->best_encoder); 1438 1439 if (conn_state->crtc != &crtc->base) 1440 continue; 1441 1442 if (encoder->pre_pll_enable) 1443 encoder->pre_pll_enable(state, encoder, 1444 crtc_state, conn_state); 1445 } 1446 } 1447 1448 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1449 struct intel_crtc *crtc) 1450 { 1451 const struct intel_crtc_state *crtc_state = 1452 intel_atomic_get_new_crtc_state(state, crtc); 1453 const struct drm_connector_state *conn_state; 1454 struct drm_connector *conn; 1455 int i; 1456 1457 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1458 struct intel_encoder *encoder = 1459 to_intel_encoder(conn_state->best_encoder); 1460 1461 if (conn_state->crtc != &crtc->base) 1462 continue; 1463 1464 if (encoder->pre_enable) 1465 encoder->pre_enable(state, encoder, 1466 crtc_state, conn_state); 1467 } 1468 } 1469 1470 static void intel_encoders_enable(struct intel_atomic_state *state, 1471 struct intel_crtc *crtc) 1472 { 1473 const struct intel_crtc_state *crtc_state = 1474 intel_atomic_get_new_crtc_state(state, crtc); 1475 const struct drm_connector_state *conn_state; 1476 struct drm_connector *conn; 1477 int i; 1478 1479 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1480 struct intel_encoder *encoder = 1481 to_intel_encoder(conn_state->best_encoder); 1482 1483 if (conn_state->crtc != &crtc->base) 1484 continue; 1485 1486 if (encoder->enable) 1487 encoder->enable(state, encoder, 1488 crtc_state, conn_state); 1489 intel_opregion_notify_encoder(encoder, true); 1490 } 1491 } 1492 1493 static void intel_encoders_disable(struct intel_atomic_state *state, 1494 struct intel_crtc *crtc) 1495 { 1496 const struct intel_crtc_state *old_crtc_state = 1497 intel_atomic_get_old_crtc_state(state, crtc); 1498 const struct drm_connector_state *old_conn_state; 1499 struct drm_connector *conn; 1500 int i; 1501 1502 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1503 struct intel_encoder *encoder = 1504 to_intel_encoder(old_conn_state->best_encoder); 1505 1506 if (old_conn_state->crtc != &crtc->base) 1507 continue; 1508 1509 intel_opregion_notify_encoder(encoder, false); 1510 if (encoder->disable) 1511 encoder->disable(state, encoder, 1512 old_crtc_state, old_conn_state); 1513 } 1514 } 1515 1516 static void intel_encoders_post_disable(struct intel_atomic_state *state, 1517 struct intel_crtc *crtc) 1518 { 1519 const struct intel_crtc_state *old_crtc_state = 1520 intel_atomic_get_old_crtc_state(state, crtc); 1521 const struct drm_connector_state *old_conn_state; 1522 struct drm_connector *conn; 1523 int i; 1524 1525 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1526 struct intel_encoder *encoder = 1527 to_intel_encoder(old_conn_state->best_encoder); 1528 1529 if (old_conn_state->crtc != &crtc->base) 1530 continue; 1531 1532 if (encoder->post_disable) 1533 encoder->post_disable(state, encoder, 1534 old_crtc_state, old_conn_state); 1535 } 1536 } 1537 1538 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1539 struct intel_crtc *crtc) 1540 { 1541 const struct intel_crtc_state *old_crtc_state = 1542 intel_atomic_get_old_crtc_state(state, crtc); 1543 const struct drm_connector_state *old_conn_state; 1544 struct drm_connector *conn; 1545 int i; 1546 1547 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1548 struct intel_encoder *encoder = 1549 to_intel_encoder(old_conn_state->best_encoder); 1550 1551 if (old_conn_state->crtc != &crtc->base) 1552 continue; 1553 1554 if (encoder->post_pll_disable) 1555 encoder->post_pll_disable(state, encoder, 1556 old_crtc_state, old_conn_state); 1557 } 1558 } 1559 1560 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1561 struct intel_crtc *crtc) 1562 { 1563 const struct intel_crtc_state *crtc_state = 1564 intel_atomic_get_new_crtc_state(state, crtc); 1565 const struct drm_connector_state *conn_state; 1566 struct drm_connector *conn; 1567 int i; 1568 1569 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1570 struct intel_encoder *encoder = 1571 to_intel_encoder(conn_state->best_encoder); 1572 1573 if (conn_state->crtc != &crtc->base) 1574 continue; 1575 1576 if (encoder->update_pipe) 1577 encoder->update_pipe(state, encoder, 1578 crtc_state, conn_state); 1579 } 1580 } 1581 1582 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1583 { 1584 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1585 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1586 1587 if (crtc_state->has_pch_encoder) { 1588 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1589 &crtc_state->fdi_m_n); 1590 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1591 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1592 &crtc_state->dp_m_n); 1593 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1594 &crtc_state->dp_m2_n2); 1595 } 1596 1597 intel_set_transcoder_timings(crtc_state); 1598 1599 ilk_set_pipeconf(crtc_state); 1600 } 1601 1602 static void ilk_crtc_enable(struct intel_atomic_state *state, 1603 struct intel_crtc *crtc) 1604 { 1605 const struct intel_crtc_state *new_crtc_state = 1606 intel_atomic_get_new_crtc_state(state, crtc); 1607 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1608 enum pipe pipe = crtc->pipe; 1609 1610 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1611 return; 1612 1613 /* 1614 * Sometimes spurious CPU pipe underruns happen during FDI 1615 * training, at least with VGA+HDMI cloning. Suppress them. 1616 * 1617 * On ILK we get an occasional spurious CPU pipe underruns 1618 * between eDP port A enable and vdd enable. Also PCH port 1619 * enable seems to result in the occasional CPU pipe underrun. 1620 * 1621 * Spurious PCH underruns also occur during PCH enabling. 1622 */ 1623 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1624 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1625 1626 ilk_configure_cpu_transcoder(new_crtc_state); 1627 1628 intel_set_pipe_src_size(new_crtc_state); 1629 1630 crtc->active = true; 1631 1632 intel_encoders_pre_enable(state, crtc); 1633 1634 if (new_crtc_state->has_pch_encoder) { 1635 ilk_pch_pre_enable(state, crtc); 1636 } else { 1637 assert_fdi_tx_disabled(dev_priv, pipe); 1638 assert_fdi_rx_disabled(dev_priv, pipe); 1639 } 1640 1641 ilk_pfit_enable(new_crtc_state); 1642 1643 /* 1644 * On ILK+ LUT must be loaded before the pipe is running but with 1645 * clocks enabled 1646 */ 1647 intel_color_modeset(new_crtc_state); 1648 1649 intel_initial_watermarks(state, crtc); 1650 intel_enable_transcoder(new_crtc_state); 1651 1652 if (new_crtc_state->has_pch_encoder) 1653 ilk_pch_enable(state, crtc); 1654 1655 intel_crtc_vblank_on(new_crtc_state); 1656 1657 intel_encoders_enable(state, crtc); 1658 1659 if (HAS_PCH_CPT(dev_priv)) 1660 intel_wait_for_pipe_scanline_moving(crtc); 1661 1662 /* 1663 * Must wait for vblank to avoid spurious PCH FIFO underruns. 1664 * And a second vblank wait is needed at least on ILK with 1665 * some interlaced HDMI modes. Let's do the double wait always 1666 * in case there are more corner cases we don't know about. 1667 */ 1668 if (new_crtc_state->has_pch_encoder) { 1669 intel_crtc_wait_for_next_vblank(crtc); 1670 intel_crtc_wait_for_next_vblank(crtc); 1671 } 1672 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1673 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1674 } 1675 1676 /* Display WA #1180: WaDisableScalarClockGating: glk */ 1677 static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state) 1678 { 1679 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1680 1681 return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled; 1682 } 1683 1684 static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable) 1685 { 1686 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1687 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1688 1689 intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe), 1690 mask, enable ? mask : 0); 1691 } 1692 1693 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1694 { 1695 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1696 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1697 1698 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 1699 HSW_LINETIME(crtc_state->linetime) | 1700 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1701 } 1702 1703 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1704 { 1705 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1706 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1707 1708 intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder), 1709 HSW_FRAME_START_DELAY_MASK, 1710 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1711 } 1712 1713 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1714 { 1715 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1716 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1717 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1718 1719 if (crtc_state->has_pch_encoder) { 1720 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1721 &crtc_state->fdi_m_n); 1722 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1723 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1724 &crtc_state->dp_m_n); 1725 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1726 &crtc_state->dp_m2_n2); 1727 } 1728 1729 intel_set_transcoder_timings(crtc_state); 1730 if (HAS_VRR(dev_priv)) 1731 intel_vrr_set_transcoder_timings(crtc_state); 1732 1733 if (cpu_transcoder != TRANSCODER_EDP) 1734 intel_de_write(dev_priv, TRANS_MULT(dev_priv, cpu_transcoder), 1735 crtc_state->pixel_multiplier - 1); 1736 1737 hsw_set_frame_start_delay(crtc_state); 1738 1739 hsw_set_transconf(crtc_state); 1740 } 1741 1742 static void hsw_crtc_enable(struct intel_atomic_state *state, 1743 struct intel_crtc *crtc) 1744 { 1745 struct intel_display *display = to_intel_display(state); 1746 const struct intel_crtc_state *new_crtc_state = 1747 intel_atomic_get_new_crtc_state(state, crtc); 1748 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1749 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1750 struct intel_crtc *pipe_crtc; 1751 int i; 1752 1753 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1754 return; 1755 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) 1756 intel_dmc_enable_pipe(display, pipe_crtc->pipe); 1757 1758 intel_encoders_pre_pll_enable(state, crtc); 1759 1760 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1761 const struct intel_crtc_state *pipe_crtc_state = 1762 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1763 1764 if (pipe_crtc_state->shared_dpll) 1765 intel_enable_shared_dpll(pipe_crtc_state); 1766 } 1767 1768 intel_encoders_pre_enable(state, crtc); 1769 1770 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1771 const struct intel_crtc_state *pipe_crtc_state = 1772 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1773 1774 intel_dsc_enable(pipe_crtc_state); 1775 1776 if (HAS_UNCOMPRESSED_JOINER(dev_priv)) 1777 intel_uncompressed_joiner_enable(pipe_crtc_state); 1778 1779 intel_set_pipe_src_size(pipe_crtc_state); 1780 1781 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 1782 bdw_set_pipe_misc(pipe_crtc_state); 1783 } 1784 1785 if (!transcoder_is_dsi(cpu_transcoder)) 1786 hsw_configure_cpu_transcoder(new_crtc_state); 1787 1788 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1789 const struct intel_crtc_state *pipe_crtc_state = 1790 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1791 1792 pipe_crtc->active = true; 1793 1794 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) 1795 glk_pipe_scaler_clock_gating_wa(pipe_crtc, true); 1796 1797 if (DISPLAY_VER(dev_priv) >= 9) 1798 skl_pfit_enable(pipe_crtc_state); 1799 else 1800 ilk_pfit_enable(pipe_crtc_state); 1801 1802 /* 1803 * On ILK+ LUT must be loaded before the pipe is running but with 1804 * clocks enabled 1805 */ 1806 intel_color_modeset(pipe_crtc_state); 1807 1808 hsw_set_linetime_wm(pipe_crtc_state); 1809 1810 if (DISPLAY_VER(dev_priv) >= 11) 1811 icl_set_pipe_chicken(pipe_crtc_state); 1812 1813 intel_initial_watermarks(state, pipe_crtc); 1814 } 1815 1816 intel_encoders_enable(state, crtc); 1817 1818 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1819 const struct intel_crtc_state *pipe_crtc_state = 1820 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1821 enum pipe hsw_workaround_pipe; 1822 1823 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) { 1824 intel_crtc_wait_for_next_vblank(pipe_crtc); 1825 glk_pipe_scaler_clock_gating_wa(pipe_crtc, false); 1826 } 1827 1828 /* 1829 * If we change the relative order between pipe/planes 1830 * enabling, we need to change the workaround. 1831 */ 1832 hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe; 1833 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 1834 struct intel_crtc *wa_crtc = 1835 intel_crtc_for_pipe(display, hsw_workaround_pipe); 1836 1837 intel_crtc_wait_for_next_vblank(wa_crtc); 1838 intel_crtc_wait_for_next_vblank(wa_crtc); 1839 } 1840 } 1841 } 1842 1843 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 1844 { 1845 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1846 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1847 enum pipe pipe = crtc->pipe; 1848 1849 /* To avoid upsetting the power well on haswell only disable the pfit if 1850 * it's in use. The hw state code will make sure we get this right. */ 1851 if (!old_crtc_state->pch_pfit.enabled) 1852 return; 1853 1854 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0); 1855 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0); 1856 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0); 1857 } 1858 1859 static void ilk_crtc_disable(struct intel_atomic_state *state, 1860 struct intel_crtc *crtc) 1861 { 1862 const struct intel_crtc_state *old_crtc_state = 1863 intel_atomic_get_old_crtc_state(state, crtc); 1864 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1865 enum pipe pipe = crtc->pipe; 1866 1867 /* 1868 * Sometimes spurious CPU pipe underruns happen when the 1869 * pipe is already disabled, but FDI RX/TX is still enabled. 1870 * Happens at least with VGA+HDMI cloning. Suppress them. 1871 */ 1872 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1873 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1874 1875 intel_encoders_disable(state, crtc); 1876 1877 intel_crtc_vblank_off(old_crtc_state); 1878 1879 intel_disable_transcoder(old_crtc_state); 1880 1881 ilk_pfit_disable(old_crtc_state); 1882 1883 if (old_crtc_state->has_pch_encoder) 1884 ilk_pch_disable(state, crtc); 1885 1886 intel_encoders_post_disable(state, crtc); 1887 1888 if (old_crtc_state->has_pch_encoder) 1889 ilk_pch_post_disable(state, crtc); 1890 1891 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1892 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1893 1894 intel_disable_shared_dpll(old_crtc_state); 1895 } 1896 1897 static void hsw_crtc_disable(struct intel_atomic_state *state, 1898 struct intel_crtc *crtc) 1899 { 1900 struct intel_display *display = to_intel_display(state); 1901 const struct intel_crtc_state *old_crtc_state = 1902 intel_atomic_get_old_crtc_state(state, crtc); 1903 struct intel_crtc *pipe_crtc; 1904 int i; 1905 1906 /* 1907 * FIXME collapse everything to one hook. 1908 * Need care with mst->ddi interactions. 1909 */ 1910 intel_encoders_disable(state, crtc); 1911 intel_encoders_post_disable(state, crtc); 1912 1913 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1914 const struct intel_crtc_state *old_pipe_crtc_state = 1915 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1916 1917 intel_disable_shared_dpll(old_pipe_crtc_state); 1918 } 1919 1920 intel_encoders_post_pll_disable(state, crtc); 1921 1922 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) 1923 intel_dmc_disable_pipe(display, pipe_crtc->pipe); 1924 } 1925 1926 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 1927 { 1928 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1929 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1930 1931 if (!crtc_state->gmch_pfit.control) 1932 return; 1933 1934 /* 1935 * The panel fitter should only be adjusted whilst the pipe is disabled, 1936 * according to register description and PRM. 1937 */ 1938 drm_WARN_ON(&dev_priv->drm, 1939 intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)) & PFIT_ENABLE); 1940 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 1941 1942 intel_de_write(dev_priv, PFIT_PGM_RATIOS(dev_priv), 1943 crtc_state->gmch_pfit.pgm_ratios); 1944 intel_de_write(dev_priv, PFIT_CONTROL(dev_priv), 1945 crtc_state->gmch_pfit.control); 1946 1947 /* Border color in case we don't scale up to the full screen. Black by 1948 * default, change to something else for debugging. */ 1949 intel_de_write(dev_priv, BCLRPAT(dev_priv, crtc->pipe), 0); 1950 } 1951 1952 /* Prefer intel_encoder_is_combo() */ 1953 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 1954 { 1955 if (phy == PHY_NONE) 1956 return false; 1957 else if (IS_ALDERLAKE_S(dev_priv)) 1958 return phy <= PHY_E; 1959 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 1960 return phy <= PHY_D; 1961 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) 1962 return phy <= PHY_C; 1963 else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12)) 1964 return phy <= PHY_B; 1965 else 1966 /* 1967 * DG2 outputs labelled as "combo PHY" in the bspec use 1968 * SNPS PHYs with completely different programming, 1969 * hence we always return false here. 1970 */ 1971 return false; 1972 } 1973 1974 /* Prefer intel_encoder_is_tc() */ 1975 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 1976 { 1977 /* 1978 * Discrete GPU phy's are not attached to FIA's to support TC 1979 * subsystem Legacy or non-legacy, and only support native DP/HDMI 1980 */ 1981 if (IS_DGFX(dev_priv)) 1982 return false; 1983 1984 if (DISPLAY_VER(dev_priv) >= 13) 1985 return phy >= PHY_F && phy <= PHY_I; 1986 else if (IS_TIGERLAKE(dev_priv)) 1987 return phy >= PHY_D && phy <= PHY_I; 1988 else if (IS_ICELAKE(dev_priv)) 1989 return phy >= PHY_C && phy <= PHY_F; 1990 1991 return false; 1992 } 1993 1994 /* Prefer intel_encoder_is_snps() */ 1995 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) 1996 { 1997 /* 1998 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port 1999 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). 2000 */ 2001 return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E; 2002 } 2003 2004 /* Prefer intel_encoder_to_phy() */ 2005 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 2006 { 2007 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) 2008 return PHY_D + port - PORT_D_XELPD; 2009 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) 2010 return PHY_F + port - PORT_TC1; 2011 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) 2012 return PHY_B + port - PORT_TC1; 2013 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) 2014 return PHY_C + port - PORT_TC1; 2015 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 2016 port == PORT_D) 2017 return PHY_A; 2018 2019 return PHY_A + port - PORT_A; 2020 } 2021 2022 /* Prefer intel_encoder_to_tc() */ 2023 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 2024 { 2025 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 2026 return TC_PORT_NONE; 2027 2028 if (DISPLAY_VER(dev_priv) >= 12) 2029 return TC_PORT_1 + port - PORT_TC1; 2030 else 2031 return TC_PORT_1 + port - PORT_C; 2032 } 2033 2034 enum phy intel_encoder_to_phy(struct intel_encoder *encoder) 2035 { 2036 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2037 2038 return intel_port_to_phy(i915, encoder->port); 2039 } 2040 2041 bool intel_encoder_is_combo(struct intel_encoder *encoder) 2042 { 2043 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2044 2045 return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder)); 2046 } 2047 2048 bool intel_encoder_is_snps(struct intel_encoder *encoder) 2049 { 2050 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2051 2052 return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder)); 2053 } 2054 2055 bool intel_encoder_is_tc(struct intel_encoder *encoder) 2056 { 2057 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2058 2059 return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder)); 2060 } 2061 2062 enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder) 2063 { 2064 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2065 2066 return intel_port_to_tc(i915, encoder->port); 2067 } 2068 2069 enum intel_display_power_domain 2070 intel_aux_power_domain(struct intel_digital_port *dig_port) 2071 { 2072 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 2073 2074 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 2075 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); 2076 2077 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 2078 } 2079 2080 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, 2081 struct intel_power_domain_mask *mask) 2082 { 2083 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2084 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2085 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2086 struct drm_encoder *encoder; 2087 enum pipe pipe = crtc->pipe; 2088 2089 bitmap_zero(mask->bits, POWER_DOMAIN_NUM); 2090 2091 if (!crtc_state->hw.active) 2092 return; 2093 2094 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); 2095 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); 2096 if (crtc_state->pch_pfit.enabled || 2097 crtc_state->pch_pfit.force_thru) 2098 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); 2099 2100 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 2101 crtc_state->uapi.encoder_mask) { 2102 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2103 2104 set_bit(intel_encoder->power_domain, mask->bits); 2105 } 2106 2107 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 2108 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); 2109 2110 if (crtc_state->shared_dpll) 2111 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); 2112 2113 if (crtc_state->dsc.compression_enable) 2114 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); 2115 } 2116 2117 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, 2118 struct intel_power_domain_mask *old_domains) 2119 { 2120 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2121 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2122 enum intel_display_power_domain domain; 2123 struct intel_power_domain_mask domains, new_domains; 2124 2125 get_crtc_power_domains(crtc_state, &domains); 2126 2127 bitmap_andnot(new_domains.bits, 2128 domains.bits, 2129 crtc->enabled_power_domains.mask.bits, 2130 POWER_DOMAIN_NUM); 2131 bitmap_andnot(old_domains->bits, 2132 crtc->enabled_power_domains.mask.bits, 2133 domains.bits, 2134 POWER_DOMAIN_NUM); 2135 2136 for_each_power_domain(domain, &new_domains) 2137 intel_display_power_get_in_set(dev_priv, 2138 &crtc->enabled_power_domains, 2139 domain); 2140 } 2141 2142 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, 2143 struct intel_power_domain_mask *domains) 2144 { 2145 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), 2146 &crtc->enabled_power_domains, 2147 domains); 2148 } 2149 2150 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 2151 { 2152 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2153 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2154 2155 if (intel_crtc_has_dp_encoder(crtc_state)) { 2156 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 2157 &crtc_state->dp_m_n); 2158 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 2159 &crtc_state->dp_m2_n2); 2160 } 2161 2162 intel_set_transcoder_timings(crtc_state); 2163 2164 i9xx_set_pipeconf(crtc_state); 2165 } 2166 2167 static void valleyview_crtc_enable(struct intel_atomic_state *state, 2168 struct intel_crtc *crtc) 2169 { 2170 const struct intel_crtc_state *new_crtc_state = 2171 intel_atomic_get_new_crtc_state(state, crtc); 2172 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2173 enum pipe pipe = crtc->pipe; 2174 2175 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2176 return; 2177 2178 i9xx_configure_cpu_transcoder(new_crtc_state); 2179 2180 intel_set_pipe_src_size(new_crtc_state); 2181 2182 intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); 2183 2184 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 2185 intel_de_write(dev_priv, CHV_BLEND(dev_priv, pipe), 2186 CHV_BLEND_LEGACY); 2187 intel_de_write(dev_priv, CHV_CANVAS(dev_priv, pipe), 0); 2188 } 2189 2190 crtc->active = true; 2191 2192 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2193 2194 intel_encoders_pre_pll_enable(state, crtc); 2195 2196 if (IS_CHERRYVIEW(dev_priv)) 2197 chv_enable_pll(new_crtc_state); 2198 else 2199 vlv_enable_pll(new_crtc_state); 2200 2201 intel_encoders_pre_enable(state, crtc); 2202 2203 i9xx_pfit_enable(new_crtc_state); 2204 2205 intel_color_modeset(new_crtc_state); 2206 2207 intel_initial_watermarks(state, crtc); 2208 intel_enable_transcoder(new_crtc_state); 2209 2210 intel_crtc_vblank_on(new_crtc_state); 2211 2212 intel_encoders_enable(state, crtc); 2213 } 2214 2215 static void i9xx_crtc_enable(struct intel_atomic_state *state, 2216 struct intel_crtc *crtc) 2217 { 2218 const struct intel_crtc_state *new_crtc_state = 2219 intel_atomic_get_new_crtc_state(state, crtc); 2220 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2221 enum pipe pipe = crtc->pipe; 2222 2223 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2224 return; 2225 2226 i9xx_configure_cpu_transcoder(new_crtc_state); 2227 2228 intel_set_pipe_src_size(new_crtc_state); 2229 2230 crtc->active = true; 2231 2232 if (DISPLAY_VER(dev_priv) != 2) 2233 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2234 2235 intel_encoders_pre_enable(state, crtc); 2236 2237 i9xx_enable_pll(new_crtc_state); 2238 2239 i9xx_pfit_enable(new_crtc_state); 2240 2241 intel_color_modeset(new_crtc_state); 2242 2243 if (!intel_initial_watermarks(state, crtc)) 2244 intel_update_watermarks(dev_priv); 2245 intel_enable_transcoder(new_crtc_state); 2246 2247 intel_crtc_vblank_on(new_crtc_state); 2248 2249 intel_encoders_enable(state, crtc); 2250 2251 /* prevents spurious underruns */ 2252 if (DISPLAY_VER(dev_priv) == 2) 2253 intel_crtc_wait_for_next_vblank(crtc); 2254 } 2255 2256 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 2257 { 2258 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 2259 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2260 2261 if (!old_crtc_state->gmch_pfit.control) 2262 return; 2263 2264 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); 2265 2266 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 2267 intel_de_read(dev_priv, PFIT_CONTROL(dev_priv))); 2268 intel_de_write(dev_priv, PFIT_CONTROL(dev_priv), 0); 2269 } 2270 2271 static void i9xx_crtc_disable(struct intel_atomic_state *state, 2272 struct intel_crtc *crtc) 2273 { 2274 struct intel_display *display = to_intel_display(state); 2275 struct drm_i915_private *dev_priv = to_i915(display->drm); 2276 struct intel_crtc_state *old_crtc_state = 2277 intel_atomic_get_old_crtc_state(state, crtc); 2278 enum pipe pipe = crtc->pipe; 2279 2280 /* 2281 * On gen2 planes are double buffered but the pipe isn't, so we must 2282 * wait for planes to fully turn off before disabling the pipe. 2283 */ 2284 if (DISPLAY_VER(dev_priv) == 2) 2285 intel_crtc_wait_for_next_vblank(crtc); 2286 2287 intel_encoders_disable(state, crtc); 2288 2289 intel_crtc_vblank_off(old_crtc_state); 2290 2291 intel_disable_transcoder(old_crtc_state); 2292 2293 i9xx_pfit_disable(old_crtc_state); 2294 2295 intel_encoders_post_disable(state, crtc); 2296 2297 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2298 if (IS_CHERRYVIEW(dev_priv)) 2299 chv_disable_pll(dev_priv, pipe); 2300 else if (IS_VALLEYVIEW(dev_priv)) 2301 vlv_disable_pll(dev_priv, pipe); 2302 else 2303 i9xx_disable_pll(old_crtc_state); 2304 } 2305 2306 intel_encoders_post_pll_disable(state, crtc); 2307 2308 if (DISPLAY_VER(dev_priv) != 2) 2309 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 2310 2311 if (!dev_priv->display.funcs.wm->initial_watermarks) 2312 intel_update_watermarks(dev_priv); 2313 2314 /* clock the pipe down to 640x480@60 to potentially save power */ 2315 if (IS_I830(dev_priv)) 2316 i830_enable_pipe(display, pipe); 2317 } 2318 2319 void intel_encoder_destroy(struct drm_encoder *encoder) 2320 { 2321 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2322 2323 drm_encoder_cleanup(encoder); 2324 kfree(intel_encoder); 2325 } 2326 2327 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2328 { 2329 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2330 2331 /* GDG double wide on either pipe, otherwise pipe A only */ 2332 return DISPLAY_VER(dev_priv) < 4 && 2333 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 2334 } 2335 2336 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2337 { 2338 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 2339 struct drm_rect src; 2340 2341 /* 2342 * We only use IF-ID interlacing. If we ever use 2343 * PF-ID we'll need to adjust the pixel_rate here. 2344 */ 2345 2346 if (!crtc_state->pch_pfit.enabled) 2347 return pixel_rate; 2348 2349 drm_rect_init(&src, 0, 0, 2350 drm_rect_width(&crtc_state->pipe_src) << 16, 2351 drm_rect_height(&crtc_state->pipe_src) << 16); 2352 2353 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 2354 pixel_rate); 2355 } 2356 2357 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 2358 const struct drm_display_mode *timings) 2359 { 2360 mode->hdisplay = timings->crtc_hdisplay; 2361 mode->htotal = timings->crtc_htotal; 2362 mode->hsync_start = timings->crtc_hsync_start; 2363 mode->hsync_end = timings->crtc_hsync_end; 2364 2365 mode->vdisplay = timings->crtc_vdisplay; 2366 mode->vtotal = timings->crtc_vtotal; 2367 mode->vsync_start = timings->crtc_vsync_start; 2368 mode->vsync_end = timings->crtc_vsync_end; 2369 2370 mode->flags = timings->flags; 2371 mode->type = DRM_MODE_TYPE_DRIVER; 2372 2373 mode->clock = timings->crtc_clock; 2374 2375 drm_mode_set_name(mode); 2376 } 2377 2378 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2379 { 2380 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2381 2382 if (HAS_GMCH(dev_priv)) 2383 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2384 crtc_state->pixel_rate = 2385 crtc_state->hw.pipe_mode.crtc_clock; 2386 else 2387 crtc_state->pixel_rate = 2388 ilk_pipe_pixel_rate(crtc_state); 2389 } 2390 2391 static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state, 2392 struct drm_display_mode *mode) 2393 { 2394 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2395 2396 if (num_pipes == 1) 2397 return; 2398 2399 mode->crtc_clock /= num_pipes; 2400 mode->crtc_hdisplay /= num_pipes; 2401 mode->crtc_hblank_start /= num_pipes; 2402 mode->crtc_hblank_end /= num_pipes; 2403 mode->crtc_hsync_start /= num_pipes; 2404 mode->crtc_hsync_end /= num_pipes; 2405 mode->crtc_htotal /= num_pipes; 2406 } 2407 2408 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, 2409 struct drm_display_mode *mode) 2410 { 2411 int overlap = crtc_state->splitter.pixel_overlap; 2412 int n = crtc_state->splitter.link_count; 2413 2414 if (!crtc_state->splitter.enable) 2415 return; 2416 2417 /* 2418 * eDP MSO uses segment timings from EDID for transcoder 2419 * timings, but full mode for everything else. 2420 * 2421 * h_full = (h_segment - pixel_overlap) * link_count 2422 */ 2423 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; 2424 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; 2425 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; 2426 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; 2427 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; 2428 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; 2429 mode->crtc_clock *= n; 2430 } 2431 2432 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 2433 { 2434 struct drm_display_mode *mode = &crtc_state->hw.mode; 2435 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2436 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2437 2438 /* 2439 * Start with the adjusted_mode crtc timings, which 2440 * have been filled with the transcoder timings. 2441 */ 2442 drm_mode_copy(pipe_mode, adjusted_mode); 2443 2444 /* Expand MSO per-segment transcoder timings to full */ 2445 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2446 2447 /* 2448 * We want the full numbers in adjusted_mode normal timings, 2449 * adjusted_mode crtc timings are left with the raw transcoder 2450 * timings. 2451 */ 2452 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 2453 2454 /* Populate the "user" mode with full numbers */ 2455 drm_mode_copy(mode, pipe_mode); 2456 intel_mode_from_crtc_timings(mode, mode); 2457 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * 2458 intel_crtc_num_joined_pipes(crtc_state); 2459 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); 2460 2461 /* Derive per-pipe timings in case joiner is used */ 2462 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2463 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2464 2465 intel_crtc_compute_pixel_rate(crtc_state); 2466 } 2467 2468 void intel_encoder_get_config(struct intel_encoder *encoder, 2469 struct intel_crtc_state *crtc_state) 2470 { 2471 encoder->get_config(encoder, crtc_state); 2472 2473 intel_crtc_readout_derived_state(crtc_state); 2474 } 2475 2476 static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state) 2477 { 2478 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2479 int width, height; 2480 2481 if (num_pipes == 1) 2482 return; 2483 2484 width = drm_rect_width(&crtc_state->pipe_src); 2485 height = drm_rect_height(&crtc_state->pipe_src); 2486 2487 drm_rect_init(&crtc_state->pipe_src, 0, 0, 2488 width / num_pipes, height); 2489 } 2490 2491 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) 2492 { 2493 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2494 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2495 2496 intel_joiner_compute_pipe_src(crtc_state); 2497 2498 /* 2499 * Pipe horizontal size must be even in: 2500 * - DVO ganged mode 2501 * - LVDS dual channel mode 2502 * - Double wide pipe 2503 */ 2504 if (drm_rect_width(&crtc_state->pipe_src) & 1) { 2505 if (crtc_state->double_wide) { 2506 drm_dbg_kms(&i915->drm, 2507 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", 2508 crtc->base.base.id, crtc->base.name); 2509 return -EINVAL; 2510 } 2511 2512 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 2513 intel_is_dual_link_lvds(i915)) { 2514 drm_dbg_kms(&i915->drm, 2515 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", 2516 crtc->base.base.id, crtc->base.name); 2517 return -EINVAL; 2518 } 2519 } 2520 2521 return 0; 2522 } 2523 2524 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) 2525 { 2526 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2527 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2528 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2529 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2530 int clock_limit = i915->display.cdclk.max_dotclk_freq; 2531 2532 /* 2533 * Start with the adjusted_mode crtc timings, which 2534 * have been filled with the transcoder timings. 2535 */ 2536 drm_mode_copy(pipe_mode, adjusted_mode); 2537 2538 /* Expand MSO per-segment transcoder timings to full */ 2539 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2540 2541 /* Derive per-pipe timings in case joiner is used */ 2542 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2543 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2544 2545 if (DISPLAY_VER(i915) < 4) { 2546 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; 2547 2548 /* 2549 * Enable double wide mode when the dot clock 2550 * is > 90% of the (display) core speed. 2551 */ 2552 if (intel_crtc_supports_double_wide(crtc) && 2553 pipe_mode->crtc_clock > clock_limit) { 2554 clock_limit = i915->display.cdclk.max_dotclk_freq; 2555 crtc_state->double_wide = true; 2556 } 2557 } 2558 2559 if (pipe_mode->crtc_clock > clock_limit) { 2560 drm_dbg_kms(&i915->drm, 2561 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 2562 crtc->base.base.id, crtc->base.name, 2563 pipe_mode->crtc_clock, clock_limit, 2564 str_yes_no(crtc_state->double_wide)); 2565 return -EINVAL; 2566 } 2567 2568 return 0; 2569 } 2570 2571 static int intel_crtc_compute_config(struct intel_atomic_state *state, 2572 struct intel_crtc *crtc) 2573 { 2574 struct intel_crtc_state *crtc_state = 2575 intel_atomic_get_new_crtc_state(state, crtc); 2576 int ret; 2577 2578 ret = intel_dpll_crtc_compute_clock(state, crtc); 2579 if (ret) 2580 return ret; 2581 2582 ret = intel_crtc_compute_pipe_src(crtc_state); 2583 if (ret) 2584 return ret; 2585 2586 ret = intel_crtc_compute_pipe_mode(crtc_state); 2587 if (ret) 2588 return ret; 2589 2590 intel_crtc_compute_pixel_rate(crtc_state); 2591 2592 if (crtc_state->has_pch_encoder) 2593 return ilk_fdi_compute_config(crtc, crtc_state); 2594 2595 return 0; 2596 } 2597 2598 static void 2599 intel_reduce_m_n_ratio(u32 *num, u32 *den) 2600 { 2601 while (*num > DATA_LINK_M_N_MASK || 2602 *den > DATA_LINK_M_N_MASK) { 2603 *num >>= 1; 2604 *den >>= 1; 2605 } 2606 } 2607 2608 static void compute_m_n(u32 *ret_m, u32 *ret_n, 2609 u32 m, u32 n, u32 constant_n) 2610 { 2611 if (constant_n) 2612 *ret_n = constant_n; 2613 else 2614 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2615 2616 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2617 intel_reduce_m_n_ratio(ret_m, ret_n); 2618 } 2619 2620 void 2621 intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, 2622 int pixel_clock, int link_clock, 2623 int bw_overhead, 2624 struct intel_link_m_n *m_n) 2625 { 2626 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); 2627 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, 2628 bw_overhead); 2629 u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); 2630 2631 /* 2632 * Windows/BIOS uses fixed M/N values always. Follow suit. 2633 * 2634 * Also several DP dongles in particular seem to be fussy 2635 * about too large link M/N values. Presumably the 20bit 2636 * value used by Windows/BIOS is acceptable to everyone. 2637 */ 2638 m_n->tu = 64; 2639 compute_m_n(&m_n->data_m, &m_n->data_n, 2640 data_m, data_n, 2641 0x8000000); 2642 2643 compute_m_n(&m_n->link_m, &m_n->link_n, 2644 pixel_clock, link_symbol_clock, 2645 0x80000); 2646 } 2647 2648 void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 2649 { 2650 /* 2651 * There may be no VBT; and if the BIOS enabled SSC we can 2652 * just keep using it to avoid unnecessary flicker. Whereas if the 2653 * BIOS isn't using it, don't assume it will work even if the VBT 2654 * indicates as much. 2655 */ 2656 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 2657 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 2658 PCH_DREF_CONTROL) & 2659 DREF_SSC1_ENABLE; 2660 2661 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2662 drm_dbg_kms(&dev_priv->drm, 2663 "SSC %s by BIOS, overriding VBT which says %s\n", 2664 str_enabled_disabled(bios_lvds_use_ssc), 2665 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); 2666 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; 2667 } 2668 } 2669 } 2670 2671 void intel_zero_m_n(struct intel_link_m_n *m_n) 2672 { 2673 /* corresponds to 0 register value */ 2674 memset(m_n, 0, sizeof(*m_n)); 2675 m_n->tu = 1; 2676 } 2677 2678 void intel_set_m_n(struct drm_i915_private *i915, 2679 const struct intel_link_m_n *m_n, 2680 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 2681 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 2682 { 2683 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 2684 intel_de_write(i915, data_n_reg, m_n->data_n); 2685 intel_de_write(i915, link_m_reg, m_n->link_m); 2686 /* 2687 * On BDW+ writing LINK_N arms the double buffered update 2688 * of all the M/N registers, so it must be written last. 2689 */ 2690 intel_de_write(i915, link_n_reg, m_n->link_n); 2691 } 2692 2693 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 2694 enum transcoder transcoder) 2695 { 2696 if (IS_HASWELL(dev_priv)) 2697 return transcoder == TRANSCODER_EDP; 2698 2699 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); 2700 } 2701 2702 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 2703 enum transcoder transcoder, 2704 const struct intel_link_m_n *m_n) 2705 { 2706 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2707 enum pipe pipe = crtc->pipe; 2708 2709 if (DISPLAY_VER(dev_priv) >= 5) 2710 intel_set_m_n(dev_priv, m_n, 2711 PIPE_DATA_M1(dev_priv, transcoder), 2712 PIPE_DATA_N1(dev_priv, transcoder), 2713 PIPE_LINK_M1(dev_priv, transcoder), 2714 PIPE_LINK_N1(dev_priv, transcoder)); 2715 else 2716 intel_set_m_n(dev_priv, m_n, 2717 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 2718 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 2719 } 2720 2721 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 2722 enum transcoder transcoder, 2723 const struct intel_link_m_n *m_n) 2724 { 2725 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2726 2727 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 2728 return; 2729 2730 intel_set_m_n(dev_priv, m_n, 2731 PIPE_DATA_M2(dev_priv, transcoder), 2732 PIPE_DATA_N2(dev_priv, transcoder), 2733 PIPE_LINK_M2(dev_priv, transcoder), 2734 PIPE_LINK_N2(dev_priv, transcoder)); 2735 } 2736 2737 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 2738 { 2739 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2740 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2741 enum pipe pipe = crtc->pipe; 2742 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2743 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2744 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2745 int vsyncshift = 0; 2746 2747 /* We need to be careful not to changed the adjusted mode, for otherwise 2748 * the hw state checker will get angry at the mismatch. */ 2749 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2750 crtc_vtotal = adjusted_mode->crtc_vtotal; 2751 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2752 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2753 2754 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2755 /* the chip adds 2 halflines automatically */ 2756 crtc_vtotal -= 1; 2757 crtc_vblank_end -= 1; 2758 2759 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2760 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 2761 else 2762 vsyncshift = adjusted_mode->crtc_hsync_start - 2763 adjusted_mode->crtc_htotal / 2; 2764 if (vsyncshift < 0) 2765 vsyncshift += adjusted_mode->crtc_htotal; 2766 } 2767 2768 /* 2769 * VBLANK_START no longer works on ADL+, instead we must use 2770 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. 2771 */ 2772 if (DISPLAY_VER(dev_priv) >= 13) { 2773 intel_de_write(dev_priv, 2774 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder), 2775 crtc_vblank_start - crtc_vdisplay); 2776 2777 /* 2778 * VBLANK_START not used by hw, just clear it 2779 * to make it stand out in register dumps. 2780 */ 2781 crtc_vblank_start = 1; 2782 } 2783 2784 if (DISPLAY_VER(dev_priv) >= 4) 2785 intel_de_write(dev_priv, 2786 TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder), 2787 vsyncshift); 2788 2789 intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder), 2790 HACTIVE(adjusted_mode->crtc_hdisplay - 1) | 2791 HTOTAL(adjusted_mode->crtc_htotal - 1)); 2792 intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder), 2793 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | 2794 HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); 2795 intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder), 2796 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | 2797 HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); 2798 2799 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder), 2800 VACTIVE(crtc_vdisplay - 1) | 2801 VTOTAL(crtc_vtotal - 1)); 2802 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder), 2803 VBLANK_START(crtc_vblank_start - 1) | 2804 VBLANK_END(crtc_vblank_end - 1)); 2805 intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder), 2806 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | 2807 VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); 2808 2809 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2810 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 2811 * documented on the DDI_FUNC_CTL register description, EDP Input Select 2812 * bits. */ 2813 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 2814 (pipe == PIPE_B || pipe == PIPE_C)) 2815 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, pipe), 2816 VACTIVE(crtc_vdisplay - 1) | 2817 VTOTAL(crtc_vtotal - 1)); 2818 } 2819 2820 static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state) 2821 { 2822 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2823 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2824 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2825 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2826 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2827 2828 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2829 crtc_vtotal = adjusted_mode->crtc_vtotal; 2830 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2831 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2832 2833 drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE); 2834 2835 /* 2836 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode. 2837 * But let's write it anyway to keep the state checker happy. 2838 */ 2839 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder), 2840 VBLANK_START(crtc_vblank_start - 1) | 2841 VBLANK_END(crtc_vblank_end - 1)); 2842 /* 2843 * The double buffer latch point for TRANS_VTOTAL 2844 * is the transcoder's undelayed vblank. 2845 */ 2846 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder), 2847 VACTIVE(crtc_vdisplay - 1) | 2848 VTOTAL(crtc_vtotal - 1)); 2849 } 2850 2851 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 2852 { 2853 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2854 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2855 int width = drm_rect_width(&crtc_state->pipe_src); 2856 int height = drm_rect_height(&crtc_state->pipe_src); 2857 enum pipe pipe = crtc->pipe; 2858 2859 /* pipesrc controls the size that is scaled from, which should 2860 * always be the user's requested size. 2861 */ 2862 intel_de_write(dev_priv, PIPESRC(dev_priv, pipe), 2863 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2864 } 2865 2866 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 2867 { 2868 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2869 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2870 2871 if (DISPLAY_VER(dev_priv) == 2) 2872 return false; 2873 2874 if (DISPLAY_VER(dev_priv) >= 9 || 2875 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2876 return intel_de_read(dev_priv, 2877 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; 2878 else 2879 return intel_de_read(dev_priv, 2880 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; 2881 } 2882 2883 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 2884 struct intel_crtc_state *pipe_config) 2885 { 2886 struct drm_device *dev = crtc->base.dev; 2887 struct drm_i915_private *dev_priv = to_i915(dev); 2888 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2889 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2890 u32 tmp; 2891 2892 tmp = intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder)); 2893 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; 2894 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; 2895 2896 if (!transcoder_is_dsi(cpu_transcoder)) { 2897 tmp = intel_de_read(dev_priv, 2898 TRANS_HBLANK(dev_priv, cpu_transcoder)); 2899 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; 2900 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; 2901 } 2902 2903 tmp = intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder)); 2904 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; 2905 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; 2906 2907 tmp = intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder)); 2908 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; 2909 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; 2910 2911 /* FIXME TGL+ DSI transcoders have this! */ 2912 if (!transcoder_is_dsi(cpu_transcoder)) { 2913 tmp = intel_de_read(dev_priv, 2914 TRANS_VBLANK(dev_priv, cpu_transcoder)); 2915 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; 2916 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; 2917 } 2918 tmp = intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder)); 2919 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; 2920 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; 2921 2922 if (intel_pipe_is_interlaced(pipe_config)) { 2923 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; 2924 adjusted_mode->crtc_vtotal += 1; 2925 adjusted_mode->crtc_vblank_end += 1; 2926 } 2927 2928 if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) 2929 adjusted_mode->crtc_vblank_start = 2930 adjusted_mode->crtc_vdisplay + 2931 intel_de_read(dev_priv, 2932 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder)); 2933 } 2934 2935 static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) 2936 { 2937 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2938 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2939 enum pipe primary_pipe, pipe = crtc->pipe; 2940 int width; 2941 2942 if (num_pipes == 1) 2943 return; 2944 2945 primary_pipe = joiner_primary_pipe(crtc_state); 2946 width = drm_rect_width(&crtc_state->pipe_src); 2947 2948 drm_rect_translate_to(&crtc_state->pipe_src, 2949 (pipe - primary_pipe) * width, 0); 2950 } 2951 2952 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 2953 struct intel_crtc_state *pipe_config) 2954 { 2955 struct drm_device *dev = crtc->base.dev; 2956 struct drm_i915_private *dev_priv = to_i915(dev); 2957 u32 tmp; 2958 2959 tmp = intel_de_read(dev_priv, PIPESRC(dev_priv, crtc->pipe)); 2960 2961 drm_rect_init(&pipe_config->pipe_src, 0, 0, 2962 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, 2963 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); 2964 2965 intel_joiner_adjust_pipe_src(pipe_config); 2966 } 2967 2968 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 2969 { 2970 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2971 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2972 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2973 u32 val = 0; 2974 2975 /* 2976 * - We keep both pipes enabled on 830 2977 * - During modeset the pipe is still disabled and must remain so 2978 * - During fastset the pipe is already enabled and must remain so 2979 */ 2980 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) 2981 val |= TRANSCONF_ENABLE; 2982 2983 if (crtc_state->double_wide) 2984 val |= TRANSCONF_DOUBLE_WIDE; 2985 2986 /* only g4x and later have fancy bpc/dither controls */ 2987 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2988 IS_CHERRYVIEW(dev_priv)) { 2989 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 2990 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 2991 val |= TRANSCONF_DITHER_EN | 2992 TRANSCONF_DITHER_TYPE_SP; 2993 2994 switch (crtc_state->pipe_bpp) { 2995 default: 2996 /* Case prevented by intel_choose_pipe_bpp_dither. */ 2997 MISSING_CASE(crtc_state->pipe_bpp); 2998 fallthrough; 2999 case 18: 3000 val |= TRANSCONF_BPC_6; 3001 break; 3002 case 24: 3003 val |= TRANSCONF_BPC_8; 3004 break; 3005 case 30: 3006 val |= TRANSCONF_BPC_10; 3007 break; 3008 } 3009 } 3010 3011 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 3012 if (DISPLAY_VER(dev_priv) < 4 || 3013 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3014 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; 3015 else 3016 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; 3017 } else { 3018 val |= TRANSCONF_INTERLACE_PROGRESSIVE; 3019 } 3020 3021 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3022 crtc_state->limited_color_range) 3023 val |= TRANSCONF_COLOR_RANGE_SELECT; 3024 3025 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3026 3027 if (crtc_state->wgc_enable) 3028 val |= TRANSCONF_WGC_ENABLE; 3029 3030 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3031 3032 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3033 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3034 } 3035 3036 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 3037 { 3038 if (IS_I830(dev_priv)) 3039 return false; 3040 3041 return DISPLAY_VER(dev_priv) >= 4 || 3042 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 3043 } 3044 3045 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 3046 { 3047 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3048 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3049 enum pipe pipe; 3050 u32 tmp; 3051 3052 if (!i9xx_has_pfit(dev_priv)) 3053 return; 3054 3055 tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)); 3056 if (!(tmp & PFIT_ENABLE)) 3057 return; 3058 3059 /* Check whether the pfit is attached to our pipe. */ 3060 if (DISPLAY_VER(dev_priv) >= 4) 3061 pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp); 3062 else 3063 pipe = PIPE_B; 3064 3065 if (pipe != crtc->pipe) 3066 return; 3067 3068 crtc_state->gmch_pfit.control = tmp; 3069 crtc_state->gmch_pfit.pgm_ratios = 3070 intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv)); 3071 } 3072 3073 static enum intel_output_format 3074 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) 3075 { 3076 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3077 u32 tmp; 3078 3079 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3080 3081 if (tmp & PIPE_MISC_YUV420_ENABLE) { 3082 /* We support 4:2:0 in full blend mode only */ 3083 drm_WARN_ON(&dev_priv->drm, 3084 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); 3085 3086 return INTEL_OUTPUT_FORMAT_YCBCR420; 3087 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { 3088 return INTEL_OUTPUT_FORMAT_YCBCR444; 3089 } else { 3090 return INTEL_OUTPUT_FORMAT_RGB; 3091 } 3092 } 3093 3094 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 3095 struct intel_crtc_state *pipe_config) 3096 { 3097 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3098 enum intel_display_power_domain power_domain; 3099 intel_wakeref_t wakeref; 3100 u32 tmp; 3101 bool ret; 3102 3103 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3104 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3105 if (!wakeref) 3106 return false; 3107 3108 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3109 pipe_config->sink_format = pipe_config->output_format; 3110 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3111 pipe_config->shared_dpll = NULL; 3112 3113 ret = false; 3114 3115 tmp = intel_de_read(dev_priv, 3116 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 3117 if (!(tmp & TRANSCONF_ENABLE)) 3118 goto out; 3119 3120 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3121 IS_CHERRYVIEW(dev_priv)) { 3122 switch (tmp & TRANSCONF_BPC_MASK) { 3123 case TRANSCONF_BPC_6: 3124 pipe_config->pipe_bpp = 18; 3125 break; 3126 case TRANSCONF_BPC_8: 3127 pipe_config->pipe_bpp = 24; 3128 break; 3129 case TRANSCONF_BPC_10: 3130 pipe_config->pipe_bpp = 30; 3131 break; 3132 default: 3133 MISSING_CASE(tmp); 3134 break; 3135 } 3136 } 3137 3138 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3139 (tmp & TRANSCONF_COLOR_RANGE_SELECT)) 3140 pipe_config->limited_color_range = true; 3141 3142 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); 3143 3144 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3145 3146 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3147 (tmp & TRANSCONF_WGC_ENABLE)) 3148 pipe_config->wgc_enable = true; 3149 3150 intel_color_get_config(pipe_config); 3151 3152 if (DISPLAY_VER(dev_priv) < 4) 3153 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; 3154 3155 intel_get_transcoder_timings(crtc, pipe_config); 3156 intel_get_pipe_src_size(crtc, pipe_config); 3157 3158 i9xx_get_pfit_config(pipe_config); 3159 3160 i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state); 3161 3162 if (DISPLAY_VER(dev_priv) >= 4) { 3163 tmp = pipe_config->dpll_hw_state.i9xx.dpll_md; 3164 pipe_config->pixel_multiplier = 3165 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 3166 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 3167 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 3168 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 3169 tmp = pipe_config->dpll_hw_state.i9xx.dpll; 3170 pipe_config->pixel_multiplier = 3171 ((tmp & SDVO_MULTIPLIER_MASK) 3172 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 3173 } else { 3174 /* Note that on i915G/GM the pixel multiplier is in the sdvo 3175 * port and will be fixed up in the encoder->get_config 3176 * function. */ 3177 pipe_config->pixel_multiplier = 1; 3178 } 3179 3180 if (IS_CHERRYVIEW(dev_priv)) 3181 chv_crtc_clock_get(pipe_config); 3182 else if (IS_VALLEYVIEW(dev_priv)) 3183 vlv_crtc_clock_get(pipe_config); 3184 else 3185 i9xx_crtc_clock_get(pipe_config); 3186 3187 /* 3188 * Normally the dotclock is filled in by the encoder .get_config() 3189 * but in case the pipe is enabled w/o any ports we need a sane 3190 * default. 3191 */ 3192 pipe_config->hw.adjusted_mode.crtc_clock = 3193 pipe_config->port_clock / pipe_config->pixel_multiplier; 3194 3195 ret = true; 3196 3197 out: 3198 intel_display_power_put(dev_priv, power_domain, wakeref); 3199 3200 return ret; 3201 } 3202 3203 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3204 { 3205 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3206 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3207 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3208 u32 val = 0; 3209 3210 /* 3211 * - During modeset the pipe is still disabled and must remain so 3212 * - During fastset the pipe is already enabled and must remain so 3213 */ 3214 if (!intel_crtc_needs_modeset(crtc_state)) 3215 val |= TRANSCONF_ENABLE; 3216 3217 switch (crtc_state->pipe_bpp) { 3218 default: 3219 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3220 MISSING_CASE(crtc_state->pipe_bpp); 3221 fallthrough; 3222 case 18: 3223 val |= TRANSCONF_BPC_6; 3224 break; 3225 case 24: 3226 val |= TRANSCONF_BPC_8; 3227 break; 3228 case 30: 3229 val |= TRANSCONF_BPC_10; 3230 break; 3231 case 36: 3232 val |= TRANSCONF_BPC_12; 3233 break; 3234 } 3235 3236 if (crtc_state->dither) 3237 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3238 3239 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3240 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3241 else 3242 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3243 3244 /* 3245 * This would end up with an odd purple hue over 3246 * the entire display. Make sure we don't do it. 3247 */ 3248 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 3249 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3250 3251 if (crtc_state->limited_color_range && 3252 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3253 val |= TRANSCONF_COLOR_RANGE_SELECT; 3254 3255 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3256 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; 3257 3258 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3259 3260 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3261 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3262 3263 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3264 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3265 } 3266 3267 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3268 { 3269 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3270 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3271 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3272 u32 val = 0; 3273 3274 /* 3275 * - During modeset the pipe is still disabled and must remain so 3276 * - During fastset the pipe is already enabled and must remain so 3277 */ 3278 if (!intel_crtc_needs_modeset(crtc_state)) 3279 val |= TRANSCONF_ENABLE; 3280 3281 if (IS_HASWELL(dev_priv) && crtc_state->dither) 3282 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3283 3284 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3285 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3286 else 3287 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3288 3289 if (IS_HASWELL(dev_priv) && 3290 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3291 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; 3292 3293 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3294 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3295 } 3296 3297 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) 3298 { 3299 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3300 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3301 u32 val = 0; 3302 3303 switch (crtc_state->pipe_bpp) { 3304 case 18: 3305 val |= PIPE_MISC_BPC_6; 3306 break; 3307 case 24: 3308 val |= PIPE_MISC_BPC_8; 3309 break; 3310 case 30: 3311 val |= PIPE_MISC_BPC_10; 3312 break; 3313 case 36: 3314 /* Port output 12BPC defined for ADLP+ */ 3315 if (DISPLAY_VER(dev_priv) >= 13) 3316 val |= PIPE_MISC_BPC_12_ADLP; 3317 break; 3318 default: 3319 MISSING_CASE(crtc_state->pipe_bpp); 3320 break; 3321 } 3322 3323 if (crtc_state->dither) 3324 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; 3325 3326 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3327 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3328 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; 3329 3330 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3331 val |= PIPE_MISC_YUV420_ENABLE | 3332 PIPE_MISC_YUV420_MODE_FULL_BLEND; 3333 3334 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) 3335 val |= PIPE_MISC_HDR_MODE_PRECISION; 3336 3337 if (DISPLAY_VER(dev_priv) >= 12) 3338 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; 3339 3340 /* allow PSR with sprite enabled */ 3341 if (IS_BROADWELL(dev_priv)) 3342 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; 3343 3344 intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); 3345 } 3346 3347 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) 3348 { 3349 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3350 u32 tmp; 3351 3352 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3353 3354 switch (tmp & PIPE_MISC_BPC_MASK) { 3355 case PIPE_MISC_BPC_6: 3356 return 18; 3357 case PIPE_MISC_BPC_8: 3358 return 24; 3359 case PIPE_MISC_BPC_10: 3360 return 30; 3361 /* 3362 * PORT OUTPUT 12 BPC defined for ADLP+. 3363 * 3364 * TODO: 3365 * For previous platforms with DSI interface, bits 5:7 3366 * are used for storing pipe_bpp irrespective of dithering. 3367 * Since the value of 12 BPC is not defined for these bits 3368 * on older platforms, need to find a workaround for 12 BPC 3369 * MIPI DSI HW readout. 3370 */ 3371 case PIPE_MISC_BPC_12_ADLP: 3372 if (DISPLAY_VER(dev_priv) >= 13) 3373 return 36; 3374 fallthrough; 3375 default: 3376 MISSING_CASE(tmp); 3377 return 0; 3378 } 3379 } 3380 3381 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3382 { 3383 /* 3384 * Account for spread spectrum to avoid 3385 * oversubscribing the link. Max center spread 3386 * is 2.5%; use 5% for safety's sake. 3387 */ 3388 u32 bps = target_clock * bpp * 21 / 20; 3389 return DIV_ROUND_UP(bps, link_bw * 8); 3390 } 3391 3392 void intel_get_m_n(struct drm_i915_private *i915, 3393 struct intel_link_m_n *m_n, 3394 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 3395 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3396 { 3397 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; 3398 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; 3399 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; 3400 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; 3401 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; 3402 } 3403 3404 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3405 enum transcoder transcoder, 3406 struct intel_link_m_n *m_n) 3407 { 3408 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3409 enum pipe pipe = crtc->pipe; 3410 3411 if (DISPLAY_VER(dev_priv) >= 5) 3412 intel_get_m_n(dev_priv, m_n, 3413 PIPE_DATA_M1(dev_priv, transcoder), 3414 PIPE_DATA_N1(dev_priv, transcoder), 3415 PIPE_LINK_M1(dev_priv, transcoder), 3416 PIPE_LINK_N1(dev_priv, transcoder)); 3417 else 3418 intel_get_m_n(dev_priv, m_n, 3419 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 3420 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3421 } 3422 3423 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 3424 enum transcoder transcoder, 3425 struct intel_link_m_n *m_n) 3426 { 3427 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3428 3429 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 3430 return; 3431 3432 intel_get_m_n(dev_priv, m_n, 3433 PIPE_DATA_M2(dev_priv, transcoder), 3434 PIPE_DATA_N2(dev_priv, transcoder), 3435 PIPE_LINK_M2(dev_priv, transcoder), 3436 PIPE_LINK_N2(dev_priv, transcoder)); 3437 } 3438 3439 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 3440 { 3441 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3442 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3443 u32 ctl, pos, size; 3444 enum pipe pipe; 3445 3446 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 3447 if ((ctl & PF_ENABLE) == 0) 3448 return; 3449 3450 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 3451 pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl); 3452 else 3453 pipe = crtc->pipe; 3454 3455 crtc_state->pch_pfit.enabled = true; 3456 3457 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 3458 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 3459 3460 drm_rect_init(&crtc_state->pch_pfit.dst, 3461 REG_FIELD_GET(PF_WIN_XPOS_MASK, pos), 3462 REG_FIELD_GET(PF_WIN_YPOS_MASK, pos), 3463 REG_FIELD_GET(PF_WIN_XSIZE_MASK, size), 3464 REG_FIELD_GET(PF_WIN_YSIZE_MASK, size)); 3465 3466 /* 3467 * We currently do not free assignements of panel fitters on 3468 * ivb/hsw (since we don't use the higher upscaling modes which 3469 * differentiates them) so just WARN about this case for now. 3470 */ 3471 drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe); 3472 } 3473 3474 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3475 struct intel_crtc_state *pipe_config) 3476 { 3477 struct drm_device *dev = crtc->base.dev; 3478 struct drm_i915_private *dev_priv = to_i915(dev); 3479 enum intel_display_power_domain power_domain; 3480 intel_wakeref_t wakeref; 3481 u32 tmp; 3482 bool ret; 3483 3484 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3485 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3486 if (!wakeref) 3487 return false; 3488 3489 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3490 pipe_config->shared_dpll = NULL; 3491 3492 ret = false; 3493 tmp = intel_de_read(dev_priv, 3494 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 3495 if (!(tmp & TRANSCONF_ENABLE)) 3496 goto out; 3497 3498 switch (tmp & TRANSCONF_BPC_MASK) { 3499 case TRANSCONF_BPC_6: 3500 pipe_config->pipe_bpp = 18; 3501 break; 3502 case TRANSCONF_BPC_8: 3503 pipe_config->pipe_bpp = 24; 3504 break; 3505 case TRANSCONF_BPC_10: 3506 pipe_config->pipe_bpp = 30; 3507 break; 3508 case TRANSCONF_BPC_12: 3509 pipe_config->pipe_bpp = 36; 3510 break; 3511 default: 3512 break; 3513 } 3514 3515 if (tmp & TRANSCONF_COLOR_RANGE_SELECT) 3516 pipe_config->limited_color_range = true; 3517 3518 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { 3519 case TRANSCONF_OUTPUT_COLORSPACE_YUV601: 3520 case TRANSCONF_OUTPUT_COLORSPACE_YUV709: 3521 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3522 break; 3523 default: 3524 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3525 break; 3526 } 3527 3528 pipe_config->sink_format = pipe_config->output_format; 3529 3530 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); 3531 3532 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3533 3534 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); 3535 3536 intel_color_get_config(pipe_config); 3537 3538 pipe_config->pixel_multiplier = 1; 3539 3540 ilk_pch_get_config(pipe_config); 3541 3542 intel_get_transcoder_timings(crtc, pipe_config); 3543 intel_get_pipe_src_size(crtc, pipe_config); 3544 3545 ilk_get_pfit_config(pipe_config); 3546 3547 ret = true; 3548 3549 out: 3550 intel_display_power_put(dev_priv, power_domain, wakeref); 3551 3552 return ret; 3553 } 3554 3555 static u8 joiner_pipes(struct drm_i915_private *i915) 3556 { 3557 u8 pipes; 3558 3559 if (DISPLAY_VER(i915) >= 12) 3560 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3561 else if (DISPLAY_VER(i915) >= 11) 3562 pipes = BIT(PIPE_B) | BIT(PIPE_C); 3563 else 3564 pipes = 0; 3565 3566 return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask; 3567 } 3568 3569 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, 3570 enum transcoder cpu_transcoder) 3571 { 3572 enum intel_display_power_domain power_domain; 3573 intel_wakeref_t wakeref; 3574 u32 tmp = 0; 3575 3576 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3577 3578 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3579 tmp = intel_de_read(dev_priv, 3580 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); 3581 3582 return tmp & TRANS_DDI_FUNC_ENABLE; 3583 } 3584 3585 static void enabled_joiner_pipes(struct drm_i915_private *dev_priv, 3586 u8 *primary_pipes, u8 *secondary_pipes) 3587 { 3588 struct intel_crtc *crtc; 3589 3590 *primary_pipes = 0; 3591 *secondary_pipes = 0; 3592 3593 if (!HAS_BIGJOINER(dev_priv)) 3594 return; 3595 3596 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, 3597 joiner_pipes(dev_priv)) { 3598 enum intel_display_power_domain power_domain; 3599 enum pipe pipe = crtc->pipe; 3600 intel_wakeref_t wakeref; 3601 3602 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); 3603 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 3604 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 3605 3606 if (!(tmp & BIG_JOINER_ENABLE)) 3607 continue; 3608 3609 if (tmp & PRIMARY_BIG_JOINER_ENABLE) 3610 *primary_pipes |= BIT(pipe); 3611 else 3612 *secondary_pipes |= BIT(pipe); 3613 } 3614 3615 if (!HAS_UNCOMPRESSED_JOINER(dev_priv)) 3616 continue; 3617 3618 power_domain = POWER_DOMAIN_PIPE(pipe); 3619 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 3620 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 3621 3622 if (tmp & UNCOMPRESSED_JOINER_PRIMARY) 3623 *primary_pipes |= BIT(pipe); 3624 if (tmp & UNCOMPRESSED_JOINER_SECONDARY) 3625 *secondary_pipes |= BIT(pipe); 3626 } 3627 } 3628 3629 /* Joiner pipes should always be consecutive primary and secondary */ 3630 drm_WARN(&dev_priv->drm, *secondary_pipes != *primary_pipes << 1, 3631 "Joiner misconfigured (primary pipes 0x%x, secondary pipes 0x%x)\n", 3632 *primary_pipes, *secondary_pipes); 3633 } 3634 3635 static enum pipe get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes) 3636 { 3637 if ((secondary_pipes & BIT(pipe)) == 0) 3638 return pipe; 3639 3640 /* ignore everything above our pipe */ 3641 primary_pipes &= ~GENMASK(7, pipe); 3642 3643 /* highest remaining bit should be our primary pipe */ 3644 return fls(primary_pipes) - 1; 3645 } 3646 3647 static u8 get_joiner_secondary_pipes(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes) 3648 { 3649 enum pipe primary_pipe, next_primary_pipe; 3650 3651 primary_pipe = get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes); 3652 3653 if ((primary_pipes & BIT(primary_pipe)) == 0) 3654 return 0; 3655 3656 /* ignore our primary pipe and everything below it */ 3657 primary_pipes &= ~GENMASK(primary_pipe, 0); 3658 /* make sure a high bit is set for the ffs() */ 3659 primary_pipes |= BIT(7); 3660 /* lowest remaining bit should be the next primary pipe */ 3661 next_primary_pipe = ffs(primary_pipes) - 1; 3662 3663 return secondary_pipes & GENMASK(next_primary_pipe - 1, primary_pipe); 3664 } 3665 3666 static u8 hsw_panel_transcoders(struct drm_i915_private *i915) 3667 { 3668 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 3669 3670 if (DISPLAY_VER(i915) >= 11) 3671 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 3672 3673 return panel_transcoder_mask; 3674 } 3675 3676 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3677 { 3678 struct drm_device *dev = crtc->base.dev; 3679 struct drm_i915_private *dev_priv = to_i915(dev); 3680 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); 3681 enum transcoder cpu_transcoder; 3682 u8 primary_pipes, secondary_pipes; 3683 u8 enabled_transcoders = 0; 3684 3685 /* 3686 * XXX: Do intel_display_power_get_if_enabled before reading this (for 3687 * consistency and less surprising code; it's in always on power). 3688 */ 3689 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, 3690 panel_transcoder_mask) { 3691 enum intel_display_power_domain power_domain; 3692 intel_wakeref_t wakeref; 3693 enum pipe trans_pipe; 3694 u32 tmp = 0; 3695 3696 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3697 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3698 tmp = intel_de_read(dev_priv, 3699 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); 3700 3701 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 3702 continue; 3703 3704 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 3705 default: 3706 drm_WARN(dev, 1, 3707 "unknown pipe linked to transcoder %s\n", 3708 transcoder_name(cpu_transcoder)); 3709 fallthrough; 3710 case TRANS_DDI_EDP_INPUT_A_ONOFF: 3711 case TRANS_DDI_EDP_INPUT_A_ON: 3712 trans_pipe = PIPE_A; 3713 break; 3714 case TRANS_DDI_EDP_INPUT_B_ONOFF: 3715 trans_pipe = PIPE_B; 3716 break; 3717 case TRANS_DDI_EDP_INPUT_C_ONOFF: 3718 trans_pipe = PIPE_C; 3719 break; 3720 case TRANS_DDI_EDP_INPUT_D_ONOFF: 3721 trans_pipe = PIPE_D; 3722 break; 3723 } 3724 3725 if (trans_pipe == crtc->pipe) 3726 enabled_transcoders |= BIT(cpu_transcoder); 3727 } 3728 3729 /* single pipe or joiner primary */ 3730 cpu_transcoder = (enum transcoder) crtc->pipe; 3731 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3732 enabled_transcoders |= BIT(cpu_transcoder); 3733 3734 /* joiner secondary -> consider the primary pipe's transcoder as well */ 3735 enabled_joiner_pipes(dev_priv, &primary_pipes, &secondary_pipes); 3736 if (secondary_pipes & BIT(crtc->pipe)) { 3737 cpu_transcoder = (enum transcoder) 3738 get_joiner_primary_pipe(crtc->pipe, primary_pipes, secondary_pipes); 3739 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3740 enabled_transcoders |= BIT(cpu_transcoder); 3741 } 3742 3743 return enabled_transcoders; 3744 } 3745 3746 static bool has_edp_transcoders(u8 enabled_transcoders) 3747 { 3748 return enabled_transcoders & BIT(TRANSCODER_EDP); 3749 } 3750 3751 static bool has_dsi_transcoders(u8 enabled_transcoders) 3752 { 3753 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 3754 BIT(TRANSCODER_DSI_1)); 3755 } 3756 3757 static bool has_pipe_transcoders(u8 enabled_transcoders) 3758 { 3759 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 3760 BIT(TRANSCODER_DSI_0) | 3761 BIT(TRANSCODER_DSI_1)); 3762 } 3763 3764 static void assert_enabled_transcoders(struct drm_i915_private *i915, 3765 u8 enabled_transcoders) 3766 { 3767 /* Only one type of transcoder please */ 3768 drm_WARN_ON(&i915->drm, 3769 has_edp_transcoders(enabled_transcoders) + 3770 has_dsi_transcoders(enabled_transcoders) + 3771 has_pipe_transcoders(enabled_transcoders) > 1); 3772 3773 /* Only DSI transcoders can be ganged */ 3774 drm_WARN_ON(&i915->drm, 3775 !has_dsi_transcoders(enabled_transcoders) && 3776 !is_power_of_2(enabled_transcoders)); 3777 } 3778 3779 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 3780 struct intel_crtc_state *pipe_config, 3781 struct intel_display_power_domain_set *power_domain_set) 3782 { 3783 struct drm_device *dev = crtc->base.dev; 3784 struct drm_i915_private *dev_priv = to_i915(dev); 3785 unsigned long enabled_transcoders; 3786 u32 tmp; 3787 3788 enabled_transcoders = hsw_enabled_transcoders(crtc); 3789 if (!enabled_transcoders) 3790 return false; 3791 3792 assert_enabled_transcoders(dev_priv, enabled_transcoders); 3793 3794 /* 3795 * With the exception of DSI we should only ever have 3796 * a single enabled transcoder. With DSI let's just 3797 * pick the first one. 3798 */ 3799 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 3800 3801 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 3802 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 3803 return false; 3804 3805 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { 3806 tmp = intel_de_read(dev_priv, 3807 TRANS_DDI_FUNC_CTL(dev_priv, pipe_config->cpu_transcoder)); 3808 3809 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 3810 pipe_config->pch_pfit.force_thru = true; 3811 } 3812 3813 tmp = intel_de_read(dev_priv, 3814 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 3815 3816 return tmp & TRANSCONF_ENABLE; 3817 } 3818 3819 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 3820 struct intel_crtc_state *pipe_config, 3821 struct intel_display_power_domain_set *power_domain_set) 3822 { 3823 struct intel_display *display = to_intel_display(crtc); 3824 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3825 enum transcoder cpu_transcoder; 3826 enum port port; 3827 u32 tmp; 3828 3829 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 3830 if (port == PORT_A) 3831 cpu_transcoder = TRANSCODER_DSI_A; 3832 else 3833 cpu_transcoder = TRANSCODER_DSI_C; 3834 3835 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 3836 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 3837 continue; 3838 3839 /* 3840 * The PLL needs to be enabled with a valid divider 3841 * configuration, otherwise accessing DSI registers will hang 3842 * the machine. See BSpec North Display Engine 3843 * registers/MIPI[BXT]. We can break out here early, since we 3844 * need the same DSI PLL to be enabled for both DSI ports. 3845 */ 3846 if (!bxt_dsi_pll_is_enabled(dev_priv)) 3847 break; 3848 3849 /* XXX: this works for video mode only */ 3850 tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port)); 3851 if (!(tmp & DPI_ENABLE)) 3852 continue; 3853 3854 tmp = intel_de_read(display, MIPI_CTRL(display, port)); 3855 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 3856 continue; 3857 3858 pipe_config->cpu_transcoder = cpu_transcoder; 3859 break; 3860 } 3861 3862 return transcoder_is_dsi(pipe_config->cpu_transcoder); 3863 } 3864 3865 static void intel_joiner_get_config(struct intel_crtc_state *crtc_state) 3866 { 3867 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3868 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3869 u8 primary_pipes, secondary_pipes; 3870 enum pipe pipe = crtc->pipe; 3871 3872 enabled_joiner_pipes(i915, &primary_pipes, &secondary_pipes); 3873 3874 if (((primary_pipes | secondary_pipes) & BIT(pipe)) == 0) 3875 return; 3876 3877 crtc_state->joiner_pipes = 3878 BIT(get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes)) | 3879 get_joiner_secondary_pipes(pipe, primary_pipes, secondary_pipes); 3880 } 3881 3882 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 3883 struct intel_crtc_state *pipe_config) 3884 { 3885 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3886 bool active; 3887 u32 tmp; 3888 3889 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 3890 POWER_DOMAIN_PIPE(crtc->pipe))) 3891 return false; 3892 3893 pipe_config->shared_dpll = NULL; 3894 3895 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains); 3896 3897 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 3898 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) { 3899 drm_WARN_ON(&dev_priv->drm, active); 3900 active = true; 3901 } 3902 3903 if (!active) 3904 goto out; 3905 3906 intel_joiner_get_config(pipe_config); 3907 intel_dsc_get_config(pipe_config); 3908 3909 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 3910 DISPLAY_VER(dev_priv) >= 11) 3911 intel_get_transcoder_timings(crtc, pipe_config); 3912 3913 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 3914 intel_vrr_get_config(pipe_config); 3915 3916 intel_get_pipe_src_size(crtc, pipe_config); 3917 3918 if (IS_HASWELL(dev_priv)) { 3919 u32 tmp = intel_de_read(dev_priv, 3920 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 3921 3922 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) 3923 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3924 else 3925 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3926 } else { 3927 pipe_config->output_format = 3928 bdw_get_pipe_misc_output_format(crtc); 3929 } 3930 3931 pipe_config->sink_format = pipe_config->output_format; 3932 3933 intel_color_get_config(pipe_config); 3934 3935 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 3936 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 3937 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 3938 pipe_config->ips_linetime = 3939 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 3940 3941 if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 3942 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 3943 if (DISPLAY_VER(dev_priv) >= 9) 3944 skl_scaler_get_config(pipe_config); 3945 else 3946 ilk_get_pfit_config(pipe_config); 3947 } 3948 3949 hsw_ips_get_config(pipe_config); 3950 3951 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 3952 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 3953 pipe_config->pixel_multiplier = 3954 intel_de_read(dev_priv, 3955 TRANS_MULT(dev_priv, pipe_config->cpu_transcoder)) + 1; 3956 } else { 3957 pipe_config->pixel_multiplier = 1; 3958 } 3959 3960 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 3961 tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder)); 3962 3963 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 3964 } else { 3965 /* no idea if this is correct */ 3966 pipe_config->framestart_delay = 1; 3967 } 3968 3969 out: 3970 intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains); 3971 3972 return active; 3973 } 3974 3975 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 3976 { 3977 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3978 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3979 3980 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state)) 3981 return false; 3982 3983 crtc_state->hw.active = true; 3984 3985 intel_crtc_readout_derived_state(crtc_state); 3986 3987 return true; 3988 } 3989 3990 int intel_dotclock_calculate(int link_freq, 3991 const struct intel_link_m_n *m_n) 3992 { 3993 /* 3994 * The calculation for the data clock -> pixel clock is: 3995 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 3996 * But we want to avoid losing precison if possible, so: 3997 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 3998 * 3999 * and for link freq (10kbs units) -> pixel clock it is: 4000 * link_symbol_clock = link_freq * 10 / link_symbol_size 4001 * pixel_clock = (m * link_symbol_clock) / n 4002 * or for more precision: 4003 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size) 4004 */ 4005 4006 if (!m_n->link_n) 4007 return 0; 4008 4009 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10), 4010 m_n->link_n * intel_dp_link_symbol_size(link_freq)); 4011 } 4012 4013 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) 4014 { 4015 int dotclock; 4016 4017 if (intel_crtc_has_dp_encoder(pipe_config)) 4018 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 4019 &pipe_config->dp_m_n); 4020 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) 4021 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, 4022 pipe_config->pipe_bpp); 4023 else 4024 dotclock = pipe_config->port_clock; 4025 4026 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 4027 !intel_crtc_has_dp_encoder(pipe_config)) 4028 dotclock *= 2; 4029 4030 if (pipe_config->pixel_multiplier) 4031 dotclock /= pipe_config->pixel_multiplier; 4032 4033 return dotclock; 4034 } 4035 4036 /* Returns the currently programmed mode of the given encoder. */ 4037 struct drm_display_mode * 4038 intel_encoder_current_mode(struct intel_encoder *encoder) 4039 { 4040 struct intel_display *display = to_intel_display(encoder); 4041 struct intel_crtc_state *crtc_state; 4042 struct drm_display_mode *mode; 4043 struct intel_crtc *crtc; 4044 enum pipe pipe; 4045 4046 if (!encoder->get_hw_state(encoder, &pipe)) 4047 return NULL; 4048 4049 crtc = intel_crtc_for_pipe(display, pipe); 4050 4051 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 4052 if (!mode) 4053 return NULL; 4054 4055 crtc_state = intel_crtc_state_alloc(crtc); 4056 if (!crtc_state) { 4057 kfree(mode); 4058 return NULL; 4059 } 4060 4061 if (!intel_crtc_get_pipe_config(crtc_state)) { 4062 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4063 kfree(mode); 4064 return NULL; 4065 } 4066 4067 intel_encoder_get_config(encoder, crtc_state); 4068 4069 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 4070 4071 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4072 4073 return mode; 4074 } 4075 4076 static bool encoders_cloneable(const struct intel_encoder *a, 4077 const struct intel_encoder *b) 4078 { 4079 /* masks could be asymmetric, so check both ways */ 4080 return a == b || (a->cloneable & BIT(b->type) && 4081 b->cloneable & BIT(a->type)); 4082 } 4083 4084 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 4085 struct intel_crtc *crtc, 4086 struct intel_encoder *encoder) 4087 { 4088 struct intel_encoder *source_encoder; 4089 struct drm_connector *connector; 4090 struct drm_connector_state *connector_state; 4091 int i; 4092 4093 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4094 if (connector_state->crtc != &crtc->base) 4095 continue; 4096 4097 source_encoder = 4098 to_intel_encoder(connector_state->best_encoder); 4099 if (!encoders_cloneable(encoder, source_encoder)) 4100 return false; 4101 } 4102 4103 return true; 4104 } 4105 4106 static int icl_add_linked_planes(struct intel_atomic_state *state) 4107 { 4108 struct intel_plane *plane, *linked; 4109 struct intel_plane_state *plane_state, *linked_plane_state; 4110 int i; 4111 4112 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4113 linked = plane_state->planar_linked_plane; 4114 4115 if (!linked) 4116 continue; 4117 4118 linked_plane_state = intel_atomic_get_plane_state(state, linked); 4119 if (IS_ERR(linked_plane_state)) 4120 return PTR_ERR(linked_plane_state); 4121 4122 drm_WARN_ON(state->base.dev, 4123 linked_plane_state->planar_linked_plane != plane); 4124 drm_WARN_ON(state->base.dev, 4125 linked_plane_state->planar_slave == plane_state->planar_slave); 4126 } 4127 4128 return 0; 4129 } 4130 4131 static int icl_check_nv12_planes(struct intel_atomic_state *state, 4132 struct intel_crtc *crtc) 4133 { 4134 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4135 struct intel_crtc_state *crtc_state = 4136 intel_atomic_get_new_crtc_state(state, crtc); 4137 struct intel_plane *plane, *linked; 4138 struct intel_plane_state *plane_state; 4139 int i; 4140 4141 if (DISPLAY_VER(dev_priv) < 11) 4142 return 0; 4143 4144 /* 4145 * Destroy all old plane links and make the slave plane invisible 4146 * in the crtc_state->active_planes mask. 4147 */ 4148 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4149 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 4150 continue; 4151 4152 plane_state->planar_linked_plane = NULL; 4153 if (plane_state->planar_slave && !plane_state->uapi.visible) { 4154 crtc_state->enabled_planes &= ~BIT(plane->id); 4155 crtc_state->active_planes &= ~BIT(plane->id); 4156 crtc_state->update_planes |= BIT(plane->id); 4157 crtc_state->data_rate[plane->id] = 0; 4158 crtc_state->rel_data_rate[plane->id] = 0; 4159 } 4160 4161 plane_state->planar_slave = false; 4162 } 4163 4164 if (!crtc_state->nv12_planes) 4165 return 0; 4166 4167 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4168 struct intel_plane_state *linked_state = NULL; 4169 4170 if (plane->pipe != crtc->pipe || 4171 !(crtc_state->nv12_planes & BIT(plane->id))) 4172 continue; 4173 4174 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 4175 if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 4176 continue; 4177 4178 if (crtc_state->active_planes & BIT(linked->id)) 4179 continue; 4180 4181 linked_state = intel_atomic_get_plane_state(state, linked); 4182 if (IS_ERR(linked_state)) 4183 return PTR_ERR(linked_state); 4184 4185 break; 4186 } 4187 4188 if (!linked_state) { 4189 drm_dbg_kms(&dev_priv->drm, 4190 "Need %d free Y planes for planar YUV\n", 4191 hweight8(crtc_state->nv12_planes)); 4192 4193 return -EINVAL; 4194 } 4195 4196 plane_state->planar_linked_plane = linked; 4197 4198 linked_state->planar_slave = true; 4199 linked_state->planar_linked_plane = plane; 4200 crtc_state->enabled_planes |= BIT(linked->id); 4201 crtc_state->active_planes |= BIT(linked->id); 4202 crtc_state->update_planes |= BIT(linked->id); 4203 crtc_state->data_rate[linked->id] = 4204 crtc_state->data_rate_y[plane->id]; 4205 crtc_state->rel_data_rate[linked->id] = 4206 crtc_state->rel_data_rate_y[plane->id]; 4207 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 4208 linked->base.name, plane->base.name); 4209 4210 /* Copy parameters to slave plane */ 4211 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 4212 linked_state->color_ctl = plane_state->color_ctl; 4213 linked_state->view = plane_state->view; 4214 linked_state->decrypt = plane_state->decrypt; 4215 4216 intel_plane_copy_hw_state(linked_state, plane_state); 4217 linked_state->uapi.src = plane_state->uapi.src; 4218 linked_state->uapi.dst = plane_state->uapi.dst; 4219 4220 if (icl_is_hdr_plane(dev_priv, plane->id)) { 4221 if (linked->id == PLANE_7) 4222 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; 4223 else if (linked->id == PLANE_6) 4224 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; 4225 else if (linked->id == PLANE_5) 4226 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; 4227 else if (linked->id == PLANE_4) 4228 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; 4229 else 4230 MISSING_CASE(linked->id); 4231 } 4232 } 4233 4234 return 0; 4235 } 4236 4237 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4238 { 4239 const struct drm_display_mode *pipe_mode = 4240 &crtc_state->hw.pipe_mode; 4241 int linetime_wm; 4242 4243 if (!crtc_state->hw.enable) 4244 return 0; 4245 4246 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4247 pipe_mode->crtc_clock); 4248 4249 return min(linetime_wm, 0x1ff); 4250 } 4251 4252 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4253 const struct intel_cdclk_state *cdclk_state) 4254 { 4255 const struct drm_display_mode *pipe_mode = 4256 &crtc_state->hw.pipe_mode; 4257 int linetime_wm; 4258 4259 if (!crtc_state->hw.enable) 4260 return 0; 4261 4262 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4263 cdclk_state->logical.cdclk); 4264 4265 return min(linetime_wm, 0x1ff); 4266 } 4267 4268 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4269 { 4270 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4271 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4272 const struct drm_display_mode *pipe_mode = 4273 &crtc_state->hw.pipe_mode; 4274 int linetime_wm; 4275 4276 if (!crtc_state->hw.enable) 4277 return 0; 4278 4279 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4280 crtc_state->pixel_rate); 4281 4282 /* Display WA #1135: BXT:ALL GLK:ALL */ 4283 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4284 skl_watermark_ipc_enabled(dev_priv)) 4285 linetime_wm /= 2; 4286 4287 return min(linetime_wm, 0x1ff); 4288 } 4289 4290 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4291 struct intel_crtc *crtc) 4292 { 4293 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4294 struct intel_crtc_state *crtc_state = 4295 intel_atomic_get_new_crtc_state(state, crtc); 4296 const struct intel_cdclk_state *cdclk_state; 4297 4298 if (DISPLAY_VER(dev_priv) >= 9) 4299 crtc_state->linetime = skl_linetime_wm(crtc_state); 4300 else 4301 crtc_state->linetime = hsw_linetime_wm(crtc_state); 4302 4303 if (!hsw_crtc_supports_ips(crtc)) 4304 return 0; 4305 4306 cdclk_state = intel_atomic_get_cdclk_state(state); 4307 if (IS_ERR(cdclk_state)) 4308 return PTR_ERR(cdclk_state); 4309 4310 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4311 cdclk_state); 4312 4313 return 0; 4314 } 4315 4316 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4317 struct intel_crtc *crtc) 4318 { 4319 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4320 struct intel_crtc_state *crtc_state = 4321 intel_atomic_get_new_crtc_state(state, crtc); 4322 int ret; 4323 4324 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) && 4325 intel_crtc_needs_modeset(crtc_state) && 4326 !crtc_state->hw.active) 4327 crtc_state->update_wm_post = true; 4328 4329 if (intel_crtc_needs_modeset(crtc_state)) { 4330 ret = intel_dpll_crtc_get_shared_dpll(state, crtc); 4331 if (ret) 4332 return ret; 4333 } 4334 4335 ret = intel_color_check(state, crtc); 4336 if (ret) 4337 return ret; 4338 4339 ret = intel_compute_pipe_wm(state, crtc); 4340 if (ret) { 4341 drm_dbg_kms(&dev_priv->drm, 4342 "Target pipe watermarks are invalid\n"); 4343 return ret; 4344 } 4345 4346 /* 4347 * Calculate 'intermediate' watermarks that satisfy both the 4348 * old state and the new state. We can program these 4349 * immediately. 4350 */ 4351 ret = intel_compute_intermediate_wm(state, crtc); 4352 if (ret) { 4353 drm_dbg_kms(&dev_priv->drm, 4354 "No valid intermediate pipe watermarks are possible\n"); 4355 return ret; 4356 } 4357 4358 if (DISPLAY_VER(dev_priv) >= 9) { 4359 if (intel_crtc_needs_modeset(crtc_state) || 4360 intel_crtc_needs_fastset(crtc_state)) { 4361 ret = skl_update_scaler_crtc(crtc_state); 4362 if (ret) 4363 return ret; 4364 } 4365 4366 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); 4367 if (ret) 4368 return ret; 4369 } 4370 4371 if (HAS_IPS(dev_priv)) { 4372 ret = hsw_ips_compute_config(state, crtc); 4373 if (ret) 4374 return ret; 4375 } 4376 4377 if (DISPLAY_VER(dev_priv) >= 9 || 4378 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 4379 ret = hsw_compute_linetime_wm(state, crtc); 4380 if (ret) 4381 return ret; 4382 4383 } 4384 4385 ret = intel_psr2_sel_fetch_update(state, crtc); 4386 if (ret) 4387 return ret; 4388 4389 return 0; 4390 } 4391 4392 static int 4393 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 4394 struct intel_crtc_state *crtc_state) 4395 { 4396 struct drm_connector *connector = conn_state->connector; 4397 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 4398 const struct drm_display_info *info = &connector->display_info; 4399 int bpp; 4400 4401 switch (conn_state->max_bpc) { 4402 case 6 ... 7: 4403 bpp = 6 * 3; 4404 break; 4405 case 8 ... 9: 4406 bpp = 8 * 3; 4407 break; 4408 case 10 ... 11: 4409 bpp = 10 * 3; 4410 break; 4411 case 12 ... 16: 4412 bpp = 12 * 3; 4413 break; 4414 default: 4415 MISSING_CASE(conn_state->max_bpc); 4416 return -EINVAL; 4417 } 4418 4419 if (bpp < crtc_state->pipe_bpp) { 4420 drm_dbg_kms(&i915->drm, 4421 "[CONNECTOR:%d:%s] Limiting display bpp to %d " 4422 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", 4423 connector->base.id, connector->name, 4424 bpp, 3 * info->bpc, 4425 3 * conn_state->max_requested_bpc, 4426 crtc_state->pipe_bpp); 4427 4428 crtc_state->pipe_bpp = bpp; 4429 } 4430 4431 return 0; 4432 } 4433 4434 static int 4435 compute_baseline_pipe_bpp(struct intel_atomic_state *state, 4436 struct intel_crtc *crtc) 4437 { 4438 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4439 struct intel_crtc_state *crtc_state = 4440 intel_atomic_get_new_crtc_state(state, crtc); 4441 struct drm_connector *connector; 4442 struct drm_connector_state *connector_state; 4443 int bpp, i; 4444 4445 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 4446 IS_CHERRYVIEW(dev_priv))) 4447 bpp = 10*3; 4448 else if (DISPLAY_VER(dev_priv) >= 5) 4449 bpp = 12*3; 4450 else 4451 bpp = 8*3; 4452 4453 crtc_state->pipe_bpp = bpp; 4454 4455 /* Clamp display bpp to connector max bpp */ 4456 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4457 int ret; 4458 4459 if (connector_state->crtc != &crtc->base) 4460 continue; 4461 4462 ret = compute_sink_pipe_bpp(connector_state, crtc_state); 4463 if (ret) 4464 return ret; 4465 } 4466 4467 return 0; 4468 } 4469 4470 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 4471 { 4472 struct drm_device *dev = state->base.dev; 4473 struct drm_connector *connector; 4474 struct drm_connector_list_iter conn_iter; 4475 unsigned int used_ports = 0; 4476 unsigned int used_mst_ports = 0; 4477 bool ret = true; 4478 4479 /* 4480 * We're going to peek into connector->state, 4481 * hence connection_mutex must be held. 4482 */ 4483 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 4484 4485 /* 4486 * Walk the connector list instead of the encoder 4487 * list to detect the problem on ddi platforms 4488 * where there's just one encoder per digital port. 4489 */ 4490 drm_connector_list_iter_begin(dev, &conn_iter); 4491 drm_for_each_connector_iter(connector, &conn_iter) { 4492 struct drm_connector_state *connector_state; 4493 struct intel_encoder *encoder; 4494 4495 connector_state = 4496 drm_atomic_get_new_connector_state(&state->base, 4497 connector); 4498 if (!connector_state) 4499 connector_state = connector->state; 4500 4501 if (!connector_state->best_encoder) 4502 continue; 4503 4504 encoder = to_intel_encoder(connector_state->best_encoder); 4505 4506 drm_WARN_ON(dev, !connector_state->crtc); 4507 4508 switch (encoder->type) { 4509 case INTEL_OUTPUT_DDI: 4510 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 4511 break; 4512 fallthrough; 4513 case INTEL_OUTPUT_DP: 4514 case INTEL_OUTPUT_HDMI: 4515 case INTEL_OUTPUT_EDP: 4516 /* the same port mustn't appear more than once */ 4517 if (used_ports & BIT(encoder->port)) 4518 ret = false; 4519 4520 used_ports |= BIT(encoder->port); 4521 break; 4522 case INTEL_OUTPUT_DP_MST: 4523 used_mst_ports |= 4524 1 << encoder->port; 4525 break; 4526 default: 4527 break; 4528 } 4529 } 4530 drm_connector_list_iter_end(&conn_iter); 4531 4532 /* can't mix MST and SST/HDMI on the same port */ 4533 if (used_ports & used_mst_ports) 4534 return false; 4535 4536 return ret; 4537 } 4538 4539 static void 4540 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 4541 struct intel_crtc *crtc) 4542 { 4543 struct intel_crtc_state *crtc_state = 4544 intel_atomic_get_new_crtc_state(state, crtc); 4545 4546 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4547 4548 drm_property_replace_blob(&crtc_state->hw.degamma_lut, 4549 crtc_state->uapi.degamma_lut); 4550 drm_property_replace_blob(&crtc_state->hw.gamma_lut, 4551 crtc_state->uapi.gamma_lut); 4552 drm_property_replace_blob(&crtc_state->hw.ctm, 4553 crtc_state->uapi.ctm); 4554 } 4555 4556 static void 4557 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 4558 struct intel_crtc *crtc) 4559 { 4560 struct intel_crtc_state *crtc_state = 4561 intel_atomic_get_new_crtc_state(state, crtc); 4562 4563 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4564 4565 crtc_state->hw.enable = crtc_state->uapi.enable; 4566 crtc_state->hw.active = crtc_state->uapi.active; 4567 drm_mode_copy(&crtc_state->hw.mode, 4568 &crtc_state->uapi.mode); 4569 drm_mode_copy(&crtc_state->hw.adjusted_mode, 4570 &crtc_state->uapi.adjusted_mode); 4571 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 4572 4573 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 4574 } 4575 4576 static void 4577 copy_joiner_crtc_state_nomodeset(struct intel_atomic_state *state, 4578 struct intel_crtc *secondary_crtc) 4579 { 4580 struct intel_crtc_state *secondary_crtc_state = 4581 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4582 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4583 const struct intel_crtc_state *primary_crtc_state = 4584 intel_atomic_get_new_crtc_state(state, primary_crtc); 4585 4586 drm_property_replace_blob(&secondary_crtc_state->hw.degamma_lut, 4587 primary_crtc_state->hw.degamma_lut); 4588 drm_property_replace_blob(&secondary_crtc_state->hw.gamma_lut, 4589 primary_crtc_state->hw.gamma_lut); 4590 drm_property_replace_blob(&secondary_crtc_state->hw.ctm, 4591 primary_crtc_state->hw.ctm); 4592 4593 secondary_crtc_state->uapi.color_mgmt_changed = primary_crtc_state->uapi.color_mgmt_changed; 4594 } 4595 4596 static int 4597 copy_joiner_crtc_state_modeset(struct intel_atomic_state *state, 4598 struct intel_crtc *secondary_crtc) 4599 { 4600 struct intel_crtc_state *secondary_crtc_state = 4601 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4602 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4603 const struct intel_crtc_state *primary_crtc_state = 4604 intel_atomic_get_new_crtc_state(state, primary_crtc); 4605 struct intel_crtc_state *saved_state; 4606 4607 WARN_ON(primary_crtc_state->joiner_pipes != 4608 secondary_crtc_state->joiner_pipes); 4609 4610 saved_state = kmemdup(primary_crtc_state, sizeof(*saved_state), GFP_KERNEL); 4611 if (!saved_state) 4612 return -ENOMEM; 4613 4614 /* preserve some things from the slave's original crtc state */ 4615 saved_state->uapi = secondary_crtc_state->uapi; 4616 saved_state->scaler_state = secondary_crtc_state->scaler_state; 4617 saved_state->shared_dpll = secondary_crtc_state->shared_dpll; 4618 saved_state->crc_enabled = secondary_crtc_state->crc_enabled; 4619 4620 intel_crtc_free_hw_state(secondary_crtc_state); 4621 if (secondary_crtc_state->dp_tunnel_ref.tunnel) 4622 drm_dp_tunnel_ref_put(&secondary_crtc_state->dp_tunnel_ref); 4623 memcpy(secondary_crtc_state, saved_state, sizeof(*secondary_crtc_state)); 4624 kfree(saved_state); 4625 4626 /* Re-init hw state */ 4627 memset(&secondary_crtc_state->hw, 0, sizeof(secondary_crtc_state->hw)); 4628 secondary_crtc_state->hw.enable = primary_crtc_state->hw.enable; 4629 secondary_crtc_state->hw.active = primary_crtc_state->hw.active; 4630 drm_mode_copy(&secondary_crtc_state->hw.mode, 4631 &primary_crtc_state->hw.mode); 4632 drm_mode_copy(&secondary_crtc_state->hw.pipe_mode, 4633 &primary_crtc_state->hw.pipe_mode); 4634 drm_mode_copy(&secondary_crtc_state->hw.adjusted_mode, 4635 &primary_crtc_state->hw.adjusted_mode); 4636 secondary_crtc_state->hw.scaling_filter = primary_crtc_state->hw.scaling_filter; 4637 4638 if (primary_crtc_state->dp_tunnel_ref.tunnel) 4639 drm_dp_tunnel_ref_get(primary_crtc_state->dp_tunnel_ref.tunnel, 4640 &secondary_crtc_state->dp_tunnel_ref); 4641 4642 copy_joiner_crtc_state_nomodeset(state, secondary_crtc); 4643 4644 secondary_crtc_state->uapi.mode_changed = primary_crtc_state->uapi.mode_changed; 4645 secondary_crtc_state->uapi.connectors_changed = primary_crtc_state->uapi.connectors_changed; 4646 secondary_crtc_state->uapi.active_changed = primary_crtc_state->uapi.active_changed; 4647 4648 WARN_ON(primary_crtc_state->joiner_pipes != 4649 secondary_crtc_state->joiner_pipes); 4650 4651 return 0; 4652 } 4653 4654 static int 4655 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 4656 struct intel_crtc *crtc) 4657 { 4658 struct intel_crtc_state *crtc_state = 4659 intel_atomic_get_new_crtc_state(state, crtc); 4660 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4661 struct intel_crtc_state *saved_state; 4662 4663 saved_state = intel_crtc_state_alloc(crtc); 4664 if (!saved_state) 4665 return -ENOMEM; 4666 4667 /* free the old crtc_state->hw members */ 4668 intel_crtc_free_hw_state(crtc_state); 4669 4670 intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state); 4671 4672 /* FIXME: before the switch to atomic started, a new pipe_config was 4673 * kzalloc'd. Code that depends on any field being zero should be 4674 * fixed, so that the crtc_state can be safely duplicated. For now, 4675 * only fields that are know to not cause problems are preserved. */ 4676 4677 saved_state->uapi = crtc_state->uapi; 4678 saved_state->inherited = crtc_state->inherited; 4679 saved_state->scaler_state = crtc_state->scaler_state; 4680 saved_state->shared_dpll = crtc_state->shared_dpll; 4681 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 4682 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 4683 sizeof(saved_state->icl_port_dplls)); 4684 saved_state->crc_enabled = crtc_state->crc_enabled; 4685 if (IS_G4X(dev_priv) || 4686 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4687 saved_state->wm = crtc_state->wm; 4688 4689 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 4690 kfree(saved_state); 4691 4692 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 4693 4694 return 0; 4695 } 4696 4697 static int 4698 intel_modeset_pipe_config(struct intel_atomic_state *state, 4699 struct intel_crtc *crtc, 4700 const struct intel_link_bw_limits *limits) 4701 { 4702 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4703 struct intel_crtc_state *crtc_state = 4704 intel_atomic_get_new_crtc_state(state, crtc); 4705 struct drm_connector *connector; 4706 struct drm_connector_state *connector_state; 4707 int pipe_src_w, pipe_src_h; 4708 int base_bpp, ret, i; 4709 4710 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; 4711 4712 crtc_state->framestart_delay = 1; 4713 4714 /* 4715 * Sanitize sync polarity flags based on requested ones. If neither 4716 * positive or negative polarity is requested, treat this as meaning 4717 * negative polarity. 4718 */ 4719 if (!(crtc_state->hw.adjusted_mode.flags & 4720 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 4721 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 4722 4723 if (!(crtc_state->hw.adjusted_mode.flags & 4724 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 4725 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 4726 4727 ret = compute_baseline_pipe_bpp(state, crtc); 4728 if (ret) 4729 return ret; 4730 4731 crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe); 4732 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; 4733 4734 if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) { 4735 drm_dbg_kms(&i915->drm, 4736 "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n", 4737 crtc->base.base.id, crtc->base.name, 4738 FXP_Q4_ARGS(crtc_state->max_link_bpp_x16)); 4739 crtc_state->bw_constrained = true; 4740 } 4741 4742 base_bpp = crtc_state->pipe_bpp; 4743 4744 /* 4745 * Determine the real pipe dimensions. Note that stereo modes can 4746 * increase the actual pipe size due to the frame doubling and 4747 * insertion of additional space for blanks between the frame. This 4748 * is stored in the crtc timings. We use the requested mode to do this 4749 * computation to clearly distinguish it from the adjusted mode, which 4750 * can be changed by the connectors in the below retry loop. 4751 */ 4752 drm_mode_get_hv_timing(&crtc_state->hw.mode, 4753 &pipe_src_w, &pipe_src_h); 4754 drm_rect_init(&crtc_state->pipe_src, 0, 0, 4755 pipe_src_w, pipe_src_h); 4756 4757 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4758 struct intel_encoder *encoder = 4759 to_intel_encoder(connector_state->best_encoder); 4760 4761 if (connector_state->crtc != &crtc->base) 4762 continue; 4763 4764 if (!check_single_encoder_cloning(state, crtc, encoder)) { 4765 drm_dbg_kms(&i915->drm, 4766 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", 4767 encoder->base.base.id, encoder->base.name); 4768 return -EINVAL; 4769 } 4770 4771 /* 4772 * Determine output_types before calling the .compute_config() 4773 * hooks so that the hooks can use this information safely. 4774 */ 4775 if (encoder->compute_output_type) 4776 crtc_state->output_types |= 4777 BIT(encoder->compute_output_type(encoder, crtc_state, 4778 connector_state)); 4779 else 4780 crtc_state->output_types |= BIT(encoder->type); 4781 } 4782 4783 /* Ensure the port clock defaults are reset when retrying. */ 4784 crtc_state->port_clock = 0; 4785 crtc_state->pixel_multiplier = 1; 4786 4787 /* Fill in default crtc timings, allow encoders to overwrite them. */ 4788 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, 4789 CRTC_STEREO_DOUBLE); 4790 4791 /* Pass our mode to the connectors and the CRTC to give them a chance to 4792 * adjust it according to limitations or connector properties, and also 4793 * a chance to reject the mode entirely. 4794 */ 4795 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4796 struct intel_encoder *encoder = 4797 to_intel_encoder(connector_state->best_encoder); 4798 4799 if (connector_state->crtc != &crtc->base) 4800 continue; 4801 4802 ret = encoder->compute_config(encoder, crtc_state, 4803 connector_state); 4804 if (ret == -EDEADLK) 4805 return ret; 4806 if (ret < 0) { 4807 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n", 4808 encoder->base.base.id, encoder->base.name, ret); 4809 return ret; 4810 } 4811 } 4812 4813 /* Set default port clock if not overwritten by the encoder. Needs to be 4814 * done afterwards in case the encoder adjusts the mode. */ 4815 if (!crtc_state->port_clock) 4816 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock 4817 * crtc_state->pixel_multiplier; 4818 4819 ret = intel_crtc_compute_config(state, crtc); 4820 if (ret == -EDEADLK) 4821 return ret; 4822 if (ret < 0) { 4823 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n", 4824 crtc->base.base.id, crtc->base.name, ret); 4825 return ret; 4826 } 4827 4828 /* Dithering seems to not pass-through bits correctly when it should, so 4829 * only enable it on 6bpc panels and when its not a compliance 4830 * test requesting 6bpc video pattern. 4831 */ 4832 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && 4833 !crtc_state->dither_force_disable; 4834 drm_dbg_kms(&i915->drm, 4835 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 4836 crtc->base.base.id, crtc->base.name, 4837 base_bpp, crtc_state->pipe_bpp, crtc_state->dither); 4838 4839 return 0; 4840 } 4841 4842 static int 4843 intel_modeset_pipe_config_late(struct intel_atomic_state *state, 4844 struct intel_crtc *crtc) 4845 { 4846 struct intel_crtc_state *crtc_state = 4847 intel_atomic_get_new_crtc_state(state, crtc); 4848 struct drm_connector_state *conn_state; 4849 struct drm_connector *connector; 4850 int i; 4851 4852 for_each_new_connector_in_state(&state->base, connector, 4853 conn_state, i) { 4854 struct intel_encoder *encoder = 4855 to_intel_encoder(conn_state->best_encoder); 4856 int ret; 4857 4858 if (conn_state->crtc != &crtc->base || 4859 !encoder->compute_config_late) 4860 continue; 4861 4862 ret = encoder->compute_config_late(encoder, crtc_state, 4863 conn_state); 4864 if (ret) 4865 return ret; 4866 } 4867 4868 return 0; 4869 } 4870 4871 bool intel_fuzzy_clock_check(int clock1, int clock2) 4872 { 4873 int diff; 4874 4875 if (clock1 == clock2) 4876 return true; 4877 4878 if (!clock1 || !clock2) 4879 return false; 4880 4881 diff = abs(clock1 - clock2); 4882 4883 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 4884 return true; 4885 4886 return false; 4887 } 4888 4889 static bool 4890 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 4891 const struct intel_link_m_n *m2_n2) 4892 { 4893 return m_n->tu == m2_n2->tu && 4894 m_n->data_m == m2_n2->data_m && 4895 m_n->data_n == m2_n2->data_n && 4896 m_n->link_m == m2_n2->link_m && 4897 m_n->link_n == m2_n2->link_n; 4898 } 4899 4900 static bool 4901 intel_compare_infoframe(const union hdmi_infoframe *a, 4902 const union hdmi_infoframe *b) 4903 { 4904 return memcmp(a, b, sizeof(*a)) == 0; 4905 } 4906 4907 static bool 4908 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 4909 const struct drm_dp_vsc_sdp *b) 4910 { 4911 return a->pixelformat == b->pixelformat && 4912 a->colorimetry == b->colorimetry && 4913 a->bpc == b->bpc && 4914 a->dynamic_range == b->dynamic_range && 4915 a->content_type == b->content_type; 4916 } 4917 4918 static bool 4919 intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a, 4920 const struct drm_dp_as_sdp *b) 4921 { 4922 return a->vtotal == b->vtotal && 4923 a->target_rr == b->target_rr && 4924 a->duration_incr_ms == b->duration_incr_ms && 4925 a->duration_decr_ms == b->duration_decr_ms && 4926 a->mode == b->mode; 4927 } 4928 4929 static bool 4930 intel_compare_buffer(const u8 *a, const u8 *b, size_t len) 4931 { 4932 return memcmp(a, b, len) == 0; 4933 } 4934 4935 static void __printf(5, 6) 4936 pipe_config_mismatch(struct drm_printer *p, bool fastset, 4937 const struct intel_crtc *crtc, 4938 const char *name, const char *format, ...) 4939 { 4940 struct va_format vaf; 4941 va_list args; 4942 4943 va_start(args, format); 4944 vaf.fmt = format; 4945 vaf.va = &args; 4946 4947 if (fastset) 4948 drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", 4949 crtc->base.base.id, crtc->base.name, name, &vaf); 4950 else 4951 drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n", 4952 crtc->base.base.id, crtc->base.name, name, &vaf); 4953 4954 va_end(args); 4955 } 4956 4957 static void 4958 pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset, 4959 const struct intel_crtc *crtc, 4960 const char *name, 4961 const union hdmi_infoframe *a, 4962 const union hdmi_infoframe *b) 4963 { 4964 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4965 const char *loglevel; 4966 4967 if (fastset) { 4968 if (!drm_debug_enabled(DRM_UT_KMS)) 4969 return; 4970 4971 loglevel = KERN_DEBUG; 4972 } else { 4973 loglevel = KERN_ERR; 4974 } 4975 4976 pipe_config_mismatch(p, fastset, crtc, name, "infoframe"); 4977 4978 drm_printf(p, "expected:\n"); 4979 hdmi_infoframe_log(loglevel, i915->drm.dev, a); 4980 drm_printf(p, "found:\n"); 4981 hdmi_infoframe_log(loglevel, i915->drm.dev, b); 4982 } 4983 4984 static void 4985 pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset, 4986 const struct intel_crtc *crtc, 4987 const char *name, 4988 const struct drm_dp_vsc_sdp *a, 4989 const struct drm_dp_vsc_sdp *b) 4990 { 4991 pipe_config_mismatch(p, fastset, crtc, name, "dp sdp"); 4992 4993 drm_printf(p, "expected:\n"); 4994 drm_dp_vsc_sdp_log(p, a); 4995 drm_printf(p, "found:\n"); 4996 drm_dp_vsc_sdp_log(p, b); 4997 } 4998 4999 static void 5000 pipe_config_dp_as_sdp_mismatch(struct drm_i915_private *i915, 5001 bool fastset, const char *name, 5002 const struct drm_dp_as_sdp *a, 5003 const struct drm_dp_as_sdp *b) 5004 { 5005 struct drm_printer p; 5006 5007 if (fastset) { 5008 p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL); 5009 5010 drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name); 5011 } else { 5012 p = drm_err_printer(&i915->drm, NULL); 5013 5014 drm_printf(&p, "mismatch in %s dp sdp\n", name); 5015 } 5016 5017 drm_printf(&p, "expected:\n"); 5018 drm_dp_as_sdp_log(&p, a); 5019 drm_printf(&p, "found:\n"); 5020 drm_dp_as_sdp_log(&p, b); 5021 } 5022 5023 /* Returns the length up to and including the last differing byte */ 5024 static size_t 5025 memcmp_diff_len(const u8 *a, const u8 *b, size_t len) 5026 { 5027 int i; 5028 5029 for (i = len - 1; i >= 0; i--) { 5030 if (a[i] != b[i]) 5031 return i + 1; 5032 } 5033 5034 return 0; 5035 } 5036 5037 static void 5038 pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset, 5039 const struct intel_crtc *crtc, 5040 const char *name, 5041 const u8 *a, const u8 *b, size_t len) 5042 { 5043 const char *loglevel; 5044 5045 if (fastset) { 5046 if (!drm_debug_enabled(DRM_UT_KMS)) 5047 return; 5048 5049 loglevel = KERN_DEBUG; 5050 } else { 5051 loglevel = KERN_ERR; 5052 } 5053 5054 pipe_config_mismatch(p, fastset, crtc, name, "buffer"); 5055 5056 /* only dump up to the last difference */ 5057 len = memcmp_diff_len(a, b, len); 5058 5059 print_hex_dump(loglevel, "expected: ", DUMP_PREFIX_NONE, 5060 16, 0, a, len, false); 5061 print_hex_dump(loglevel, "found: ", DUMP_PREFIX_NONE, 5062 16, 0, b, len, false); 5063 } 5064 5065 static void 5066 pipe_config_pll_mismatch(struct drm_printer *p, bool fastset, 5067 const struct intel_crtc *crtc, 5068 const char *name, 5069 const struct intel_dpll_hw_state *a, 5070 const struct intel_dpll_hw_state *b) 5071 { 5072 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5073 5074 pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */ 5075 5076 drm_printf(p, "expected:\n"); 5077 intel_dpll_dump_hw_state(i915, p, a); 5078 drm_printf(p, "found:\n"); 5079 intel_dpll_dump_hw_state(i915, p, b); 5080 } 5081 5082 static void 5083 pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset, 5084 const struct intel_crtc *crtc, 5085 const char *name, 5086 const struct intel_cx0pll_state *a, 5087 const struct intel_cx0pll_state *b) 5088 { 5089 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5090 char *chipname = a->use_c10 ? "C10" : "C20"; 5091 5092 pipe_config_mismatch(p, fastset, crtc, name, chipname); 5093 5094 drm_printf(p, "expected:\n"); 5095 intel_cx0pll_dump_hw_state(i915, a); 5096 drm_printf(p, "found:\n"); 5097 intel_cx0pll_dump_hw_state(i915, b); 5098 } 5099 5100 bool 5101 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 5102 const struct intel_crtc_state *pipe_config, 5103 bool fastset) 5104 { 5105 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 5106 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 5107 struct drm_printer p; 5108 bool ret = true; 5109 5110 if (fastset) 5111 p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL); 5112 else 5113 p = drm_err_printer(&dev_priv->drm, NULL); 5114 5115 #define PIPE_CONF_CHECK_X(name) do { \ 5116 if (current_config->name != pipe_config->name) { \ 5117 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5118 __stringify(name) " is bool"); \ 5119 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5120 "(expected 0x%08x, found 0x%08x)", \ 5121 current_config->name, \ 5122 pipe_config->name); \ 5123 ret = false; \ 5124 } \ 5125 } while (0) 5126 5127 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 5128 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 5129 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5130 __stringify(name) " is bool"); \ 5131 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5132 "(expected 0x%08x, found 0x%08x)", \ 5133 current_config->name & (mask), \ 5134 pipe_config->name & (mask)); \ 5135 ret = false; \ 5136 } \ 5137 } while (0) 5138 5139 #define PIPE_CONF_CHECK_I(name) do { \ 5140 if (current_config->name != pipe_config->name) { \ 5141 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5142 __stringify(name) " is bool"); \ 5143 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5144 "(expected %i, found %i)", \ 5145 current_config->name, \ 5146 pipe_config->name); \ 5147 ret = false; \ 5148 } \ 5149 } while (0) 5150 5151 #define PIPE_CONF_CHECK_LLI(name) do { \ 5152 if (current_config->name != pipe_config->name) { \ 5153 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5154 "(expected %lli, found %lli)", \ 5155 current_config->name, \ 5156 pipe_config->name); \ 5157 ret = false; \ 5158 } \ 5159 } while (0) 5160 5161 #define PIPE_CONF_CHECK_BOOL(name) do { \ 5162 if (current_config->name != pipe_config->name) { \ 5163 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ 5164 __stringify(name) " is not bool"); \ 5165 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5166 "(expected %s, found %s)", \ 5167 str_yes_no(current_config->name), \ 5168 str_yes_no(pipe_config->name)); \ 5169 ret = false; \ 5170 } \ 5171 } while (0) 5172 5173 #define PIPE_CONF_CHECK_P(name) do { \ 5174 if (current_config->name != pipe_config->name) { \ 5175 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5176 "(expected %p, found %p)", \ 5177 current_config->name, \ 5178 pipe_config->name); \ 5179 ret = false; \ 5180 } \ 5181 } while (0) 5182 5183 #define PIPE_CONF_CHECK_M_N(name) do { \ 5184 if (!intel_compare_link_m_n(¤t_config->name, \ 5185 &pipe_config->name)) { \ 5186 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5187 "(expected tu %i data %i/%i link %i/%i, " \ 5188 "found tu %i, data %i/%i link %i/%i)", \ 5189 current_config->name.tu, \ 5190 current_config->name.data_m, \ 5191 current_config->name.data_n, \ 5192 current_config->name.link_m, \ 5193 current_config->name.link_n, \ 5194 pipe_config->name.tu, \ 5195 pipe_config->name.data_m, \ 5196 pipe_config->name.data_n, \ 5197 pipe_config->name.link_m, \ 5198 pipe_config->name.link_n); \ 5199 ret = false; \ 5200 } \ 5201 } while (0) 5202 5203 #define PIPE_CONF_CHECK_PLL(name) do { \ 5204 if (!intel_dpll_compare_hw_state(dev_priv, ¤t_config->name, \ 5205 &pipe_config->name)) { \ 5206 pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5207 ¤t_config->name, \ 5208 &pipe_config->name); \ 5209 ret = false; \ 5210 } \ 5211 } while (0) 5212 5213 #define PIPE_CONF_CHECK_PLL_CX0(name) do { \ 5214 if (!intel_cx0pll_compare_hw_state(¤t_config->name, \ 5215 &pipe_config->name)) { \ 5216 pipe_config_cx0pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5217 ¤t_config->name, \ 5218 &pipe_config->name); \ 5219 ret = false; \ 5220 } \ 5221 } while (0) 5222 5223 #define PIPE_CONF_CHECK_TIMINGS(name) do { \ 5224 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ 5225 PIPE_CONF_CHECK_I(name.crtc_htotal); \ 5226 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ 5227 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ 5228 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ 5229 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ 5230 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ 5231 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ 5232 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ 5233 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ 5234 if (!fastset || !pipe_config->update_lrr) { \ 5235 PIPE_CONF_CHECK_I(name.crtc_vtotal); \ 5236 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ 5237 } \ 5238 } while (0) 5239 5240 #define PIPE_CONF_CHECK_RECT(name) do { \ 5241 PIPE_CONF_CHECK_I(name.x1); \ 5242 PIPE_CONF_CHECK_I(name.x2); \ 5243 PIPE_CONF_CHECK_I(name.y1); \ 5244 PIPE_CONF_CHECK_I(name.y2); \ 5245 } while (0) 5246 5247 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 5248 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 5249 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5250 "(%x) (expected %i, found %i)", \ 5251 (mask), \ 5252 current_config->name & (mask), \ 5253 pipe_config->name & (mask)); \ 5254 ret = false; \ 5255 } \ 5256 } while (0) 5257 5258 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 5259 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 5260 &pipe_config->infoframes.name)) { \ 5261 pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \ 5262 ¤t_config->infoframes.name, \ 5263 &pipe_config->infoframes.name); \ 5264 ret = false; \ 5265 } \ 5266 } while (0) 5267 5268 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 5269 if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 5270 &pipe_config->infoframes.name)) { \ 5271 pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5272 ¤t_config->infoframes.name, \ 5273 &pipe_config->infoframes.name); \ 5274 ret = false; \ 5275 } \ 5276 } while (0) 5277 5278 #define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \ 5279 if (!intel_compare_dp_as_sdp(¤t_config->infoframes.name, \ 5280 &pipe_config->infoframes.name)) { \ 5281 pipe_config_dp_as_sdp_mismatch(dev_priv, fastset, __stringify(name), \ 5282 ¤t_config->infoframes.name, \ 5283 &pipe_config->infoframes.name); \ 5284 ret = false; \ 5285 } \ 5286 } while (0) 5287 5288 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \ 5289 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ 5290 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ 5291 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ 5292 pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \ 5293 current_config->name, \ 5294 pipe_config->name, \ 5295 (len)); \ 5296 ret = false; \ 5297 } \ 5298 } while (0) 5299 5300 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \ 5301 if (current_config->gamma_mode == pipe_config->gamma_mode && \ 5302 !intel_color_lut_equal(current_config, \ 5303 current_config->lut, pipe_config->lut, \ 5304 is_pre_csc_lut)) { \ 5305 pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \ 5306 "hw_state doesn't match sw_state"); \ 5307 ret = false; \ 5308 } \ 5309 } while (0) 5310 5311 #define PIPE_CONF_CHECK_CSC(name) do { \ 5312 PIPE_CONF_CHECK_X(name.preoff[0]); \ 5313 PIPE_CONF_CHECK_X(name.preoff[1]); \ 5314 PIPE_CONF_CHECK_X(name.preoff[2]); \ 5315 PIPE_CONF_CHECK_X(name.coeff[0]); \ 5316 PIPE_CONF_CHECK_X(name.coeff[1]); \ 5317 PIPE_CONF_CHECK_X(name.coeff[2]); \ 5318 PIPE_CONF_CHECK_X(name.coeff[3]); \ 5319 PIPE_CONF_CHECK_X(name.coeff[4]); \ 5320 PIPE_CONF_CHECK_X(name.coeff[5]); \ 5321 PIPE_CONF_CHECK_X(name.coeff[6]); \ 5322 PIPE_CONF_CHECK_X(name.coeff[7]); \ 5323 PIPE_CONF_CHECK_X(name.coeff[8]); \ 5324 PIPE_CONF_CHECK_X(name.postoff[0]); \ 5325 PIPE_CONF_CHECK_X(name.postoff[1]); \ 5326 PIPE_CONF_CHECK_X(name.postoff[2]); \ 5327 } while (0) 5328 5329 #define PIPE_CONF_QUIRK(quirk) \ 5330 ((current_config->quirks | pipe_config->quirks) & (quirk)) 5331 5332 PIPE_CONF_CHECK_BOOL(hw.enable); 5333 PIPE_CONF_CHECK_BOOL(hw.active); 5334 5335 PIPE_CONF_CHECK_I(cpu_transcoder); 5336 PIPE_CONF_CHECK_I(mst_master_transcoder); 5337 5338 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 5339 PIPE_CONF_CHECK_I(fdi_lanes); 5340 PIPE_CONF_CHECK_M_N(fdi_m_n); 5341 5342 PIPE_CONF_CHECK_I(lane_count); 5343 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 5344 5345 if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) { 5346 if (!fastset || !pipe_config->update_m_n) 5347 PIPE_CONF_CHECK_M_N(dp_m_n); 5348 } else { 5349 PIPE_CONF_CHECK_M_N(dp_m_n); 5350 PIPE_CONF_CHECK_M_N(dp_m2_n2); 5351 } 5352 5353 PIPE_CONF_CHECK_X(output_types); 5354 5355 PIPE_CONF_CHECK_I(framestart_delay); 5356 PIPE_CONF_CHECK_I(msa_timing_delay); 5357 5358 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); 5359 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); 5360 5361 PIPE_CONF_CHECK_I(pixel_multiplier); 5362 5363 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5364 DRM_MODE_FLAG_INTERLACE); 5365 5366 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 5367 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5368 DRM_MODE_FLAG_PHSYNC); 5369 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5370 DRM_MODE_FLAG_NHSYNC); 5371 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5372 DRM_MODE_FLAG_PVSYNC); 5373 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5374 DRM_MODE_FLAG_NVSYNC); 5375 } 5376 5377 PIPE_CONF_CHECK_I(output_format); 5378 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 5379 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 5380 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5381 PIPE_CONF_CHECK_BOOL(limited_color_range); 5382 5383 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 5384 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 5385 PIPE_CONF_CHECK_BOOL(has_infoframe); 5386 PIPE_CONF_CHECK_BOOL(enhanced_framing); 5387 PIPE_CONF_CHECK_BOOL(fec_enable); 5388 5389 if (!fastset) { 5390 PIPE_CONF_CHECK_BOOL(has_audio); 5391 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5392 } 5393 5394 PIPE_CONF_CHECK_X(gmch_pfit.control); 5395 /* pfit ratios are autocomputed by the hw on gen4+ */ 5396 if (DISPLAY_VER(dev_priv) < 4) 5397 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 5398 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 5399 5400 /* 5401 * Changing the EDP transcoder input mux 5402 * (A_ONOFF vs. A_ON) requires a full modeset. 5403 */ 5404 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 5405 5406 if (!fastset) { 5407 PIPE_CONF_CHECK_RECT(pipe_src); 5408 5409 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 5410 PIPE_CONF_CHECK_RECT(pch_pfit.dst); 5411 5412 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 5413 PIPE_CONF_CHECK_I(pixel_rate); 5414 5415 PIPE_CONF_CHECK_X(gamma_mode); 5416 if (IS_CHERRYVIEW(dev_priv)) 5417 PIPE_CONF_CHECK_X(cgm_mode); 5418 else 5419 PIPE_CONF_CHECK_X(csc_mode); 5420 PIPE_CONF_CHECK_BOOL(gamma_enable); 5421 PIPE_CONF_CHECK_BOOL(csc_enable); 5422 PIPE_CONF_CHECK_BOOL(wgc_enable); 5423 5424 PIPE_CONF_CHECK_I(linetime); 5425 PIPE_CONF_CHECK_I(ips_linetime); 5426 5427 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); 5428 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); 5429 5430 PIPE_CONF_CHECK_CSC(csc); 5431 PIPE_CONF_CHECK_CSC(output_csc); 5432 } 5433 5434 /* 5435 * Panel replay has to be enabled before link training. PSR doesn't have 5436 * this requirement -> check these only if using panel replay 5437 */ 5438 if (current_config->active_planes && 5439 (current_config->has_panel_replay || 5440 pipe_config->has_panel_replay)) { 5441 PIPE_CONF_CHECK_BOOL(has_psr); 5442 PIPE_CONF_CHECK_BOOL(has_sel_update); 5443 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); 5444 PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et); 5445 PIPE_CONF_CHECK_BOOL(has_panel_replay); 5446 } 5447 5448 PIPE_CONF_CHECK_BOOL(double_wide); 5449 5450 if (dev_priv->display.dpll.mgr) 5451 PIPE_CONF_CHECK_P(shared_dpll); 5452 5453 /* FIXME convert everything over the dpll_mgr */ 5454 if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv)) 5455 PIPE_CONF_CHECK_PLL(dpll_hw_state); 5456 5457 /* FIXME convert MTL+ platforms over to dpll_mgr */ 5458 if (DISPLAY_VER(dev_priv) >= 14) 5459 PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll); 5460 5461 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 5462 PIPE_CONF_CHECK_X(dsi_pll.div); 5463 5464 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 5465 PIPE_CONF_CHECK_I(pipe_bpp); 5466 5467 if (!fastset || !pipe_config->update_m_n) { 5468 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); 5469 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); 5470 } 5471 PIPE_CONF_CHECK_I(port_clock); 5472 5473 PIPE_CONF_CHECK_I(min_voltage_level); 5474 5475 if (current_config->has_psr || pipe_config->has_psr) 5476 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, 5477 ~intel_hdmi_infoframe_enable(DP_SDP_VSC)); 5478 else 5479 PIPE_CONF_CHECK_X(infoframes.enable); 5480 5481 PIPE_CONF_CHECK_X(infoframes.gcp); 5482 PIPE_CONF_CHECK_INFOFRAME(avi); 5483 PIPE_CONF_CHECK_INFOFRAME(spd); 5484 PIPE_CONF_CHECK_INFOFRAME(hdmi); 5485 PIPE_CONF_CHECK_INFOFRAME(drm); 5486 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 5487 PIPE_CONF_CHECK_DP_AS_SDP(as_sdp); 5488 5489 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 5490 PIPE_CONF_CHECK_I(master_transcoder); 5491 PIPE_CONF_CHECK_X(joiner_pipes); 5492 5493 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable); 5494 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb); 5495 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422); 5496 PIPE_CONF_CHECK_BOOL(dsc.config.native_422); 5497 PIPE_CONF_CHECK_BOOL(dsc.config.native_420); 5498 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable); 5499 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth); 5500 PIPE_CONF_CHECK_I(dsc.config.bits_per_component); 5501 PIPE_CONF_CHECK_I(dsc.config.pic_width); 5502 PIPE_CONF_CHECK_I(dsc.config.pic_height); 5503 PIPE_CONF_CHECK_I(dsc.config.slice_width); 5504 PIPE_CONF_CHECK_I(dsc.config.slice_height); 5505 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay); 5506 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay); 5507 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval); 5508 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval); 5509 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value); 5510 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset); 5511 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp); 5512 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp); 5513 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset); 5514 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset); 5515 PIPE_CONF_CHECK_I(dsc.config.initial_offset); 5516 PIPE_CONF_CHECK_I(dsc.config.final_offset); 5517 PIPE_CONF_CHECK_I(dsc.config.rc_model_size); 5518 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0); 5519 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1); 5520 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size); 5521 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset); 5522 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset); 5523 5524 PIPE_CONF_CHECK_BOOL(dsc.compression_enable); 5525 PIPE_CONF_CHECK_BOOL(dsc.dsc_split); 5526 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16); 5527 5528 PIPE_CONF_CHECK_BOOL(splitter.enable); 5529 PIPE_CONF_CHECK_I(splitter.link_count); 5530 PIPE_CONF_CHECK_I(splitter.pixel_overlap); 5531 5532 if (!fastset) { 5533 PIPE_CONF_CHECK_BOOL(vrr.enable); 5534 PIPE_CONF_CHECK_I(vrr.vmin); 5535 PIPE_CONF_CHECK_I(vrr.vmax); 5536 PIPE_CONF_CHECK_I(vrr.flipline); 5537 PIPE_CONF_CHECK_I(vrr.pipeline_full); 5538 PIPE_CONF_CHECK_I(vrr.guardband); 5539 PIPE_CONF_CHECK_I(vrr.vsync_start); 5540 PIPE_CONF_CHECK_I(vrr.vsync_end); 5541 PIPE_CONF_CHECK_LLI(cmrr.cmrr_m); 5542 PIPE_CONF_CHECK_LLI(cmrr.cmrr_n); 5543 PIPE_CONF_CHECK_BOOL(cmrr.enable); 5544 } 5545 5546 #undef PIPE_CONF_CHECK_X 5547 #undef PIPE_CONF_CHECK_I 5548 #undef PIPE_CONF_CHECK_LLI 5549 #undef PIPE_CONF_CHECK_BOOL 5550 #undef PIPE_CONF_CHECK_P 5551 #undef PIPE_CONF_CHECK_FLAGS 5552 #undef PIPE_CONF_CHECK_COLOR_LUT 5553 #undef PIPE_CONF_CHECK_TIMINGS 5554 #undef PIPE_CONF_CHECK_RECT 5555 #undef PIPE_CONF_QUIRK 5556 5557 return ret; 5558 } 5559 5560 static void 5561 intel_verify_planes(struct intel_atomic_state *state) 5562 { 5563 struct intel_plane *plane; 5564 const struct intel_plane_state *plane_state; 5565 int i; 5566 5567 for_each_new_intel_plane_in_state(state, plane, 5568 plane_state, i) 5569 assert_plane(plane, plane_state->planar_slave || 5570 plane_state->uapi.visible); 5571 } 5572 5573 static int intel_modeset_pipe(struct intel_atomic_state *state, 5574 struct intel_crtc_state *crtc_state, 5575 const char *reason) 5576 { 5577 struct drm_i915_private *i915 = to_i915(state->base.dev); 5578 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5579 int ret; 5580 5581 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n", 5582 crtc->base.base.id, crtc->base.name, reason); 5583 5584 ret = drm_atomic_add_affected_connectors(&state->base, 5585 &crtc->base); 5586 if (ret) 5587 return ret; 5588 5589 ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc); 5590 if (ret) 5591 return ret; 5592 5593 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); 5594 if (ret) 5595 return ret; 5596 5597 ret = intel_atomic_add_affected_planes(state, crtc); 5598 if (ret) 5599 return ret; 5600 5601 crtc_state->uapi.mode_changed = true; 5602 5603 return 0; 5604 } 5605 5606 /** 5607 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes 5608 * @state: intel atomic state 5609 * @reason: the reason for the full modeset 5610 * @mask: mask of pipes to modeset 5611 * 5612 * Add pipes in @mask to @state and force a full modeset on the enabled ones 5613 * due to the description in @reason. 5614 * This function can be called only before new plane states are computed. 5615 * 5616 * Returns 0 in case of success, negative error code otherwise. 5617 */ 5618 int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state, 5619 const char *reason, u8 mask) 5620 { 5621 struct drm_i915_private *i915 = to_i915(state->base.dev); 5622 struct intel_crtc *crtc; 5623 5624 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) { 5625 struct intel_crtc_state *crtc_state; 5626 int ret; 5627 5628 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5629 if (IS_ERR(crtc_state)) 5630 return PTR_ERR(crtc_state); 5631 5632 if (!crtc_state->hw.enable || 5633 intel_crtc_needs_modeset(crtc_state)) 5634 continue; 5635 5636 ret = intel_modeset_pipe(state, crtc_state, reason); 5637 if (ret) 5638 return ret; 5639 } 5640 5641 return 0; 5642 } 5643 5644 static void 5645 intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state) 5646 { 5647 crtc_state->uapi.mode_changed = true; 5648 5649 crtc_state->update_pipe = false; 5650 crtc_state->update_m_n = false; 5651 crtc_state->update_lrr = false; 5652 } 5653 5654 /** 5655 * intel_modeset_all_pipes_late - force a full modeset on all pipes 5656 * @state: intel atomic state 5657 * @reason: the reason for the full modeset 5658 * 5659 * Add all pipes to @state and force a full modeset on the active ones due to 5660 * the description in @reason. 5661 * This function can be called only after new plane states are computed already. 5662 * 5663 * Returns 0 in case of success, negative error code otherwise. 5664 */ 5665 int intel_modeset_all_pipes_late(struct intel_atomic_state *state, 5666 const char *reason) 5667 { 5668 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5669 struct intel_crtc *crtc; 5670 5671 for_each_intel_crtc(&dev_priv->drm, crtc) { 5672 struct intel_crtc_state *crtc_state; 5673 int ret; 5674 5675 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5676 if (IS_ERR(crtc_state)) 5677 return PTR_ERR(crtc_state); 5678 5679 if (!crtc_state->hw.active || 5680 intel_crtc_needs_modeset(crtc_state)) 5681 continue; 5682 5683 ret = intel_modeset_pipe(state, crtc_state, reason); 5684 if (ret) 5685 return ret; 5686 5687 intel_crtc_flag_modeset(crtc_state); 5688 5689 crtc_state->update_planes |= crtc_state->active_planes; 5690 crtc_state->async_flip_planes = 0; 5691 crtc_state->do_async_flip = false; 5692 } 5693 5694 return 0; 5695 } 5696 5697 int intel_modeset_commit_pipes(struct drm_i915_private *i915, 5698 u8 pipe_mask, 5699 struct drm_modeset_acquire_ctx *ctx) 5700 { 5701 struct drm_atomic_state *state; 5702 struct intel_crtc *crtc; 5703 int ret; 5704 5705 state = drm_atomic_state_alloc(&i915->drm); 5706 if (!state) 5707 return -ENOMEM; 5708 5709 state->acquire_ctx = ctx; 5710 to_intel_atomic_state(state)->internal = true; 5711 5712 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { 5713 struct intel_crtc_state *crtc_state = 5714 intel_atomic_get_crtc_state(state, crtc); 5715 5716 if (IS_ERR(crtc_state)) { 5717 ret = PTR_ERR(crtc_state); 5718 goto out; 5719 } 5720 5721 crtc_state->uapi.connectors_changed = true; 5722 } 5723 5724 ret = drm_atomic_commit(state); 5725 out: 5726 drm_atomic_state_put(state); 5727 5728 return ret; 5729 } 5730 5731 /* 5732 * This implements the workaround described in the "notes" section of the mode 5733 * set sequence documentation. When going from no pipes or single pipe to 5734 * multiple pipes, and planes are enabled after the pipe, we need to wait at 5735 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 5736 */ 5737 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 5738 { 5739 struct intel_crtc_state *crtc_state; 5740 struct intel_crtc *crtc; 5741 struct intel_crtc_state *first_crtc_state = NULL; 5742 struct intel_crtc_state *other_crtc_state = NULL; 5743 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 5744 int i; 5745 5746 /* look at all crtc's that are going to be enabled in during modeset */ 5747 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5748 if (!crtc_state->hw.active || 5749 !intel_crtc_needs_modeset(crtc_state)) 5750 continue; 5751 5752 if (first_crtc_state) { 5753 other_crtc_state = crtc_state; 5754 break; 5755 } else { 5756 first_crtc_state = crtc_state; 5757 first_pipe = crtc->pipe; 5758 } 5759 } 5760 5761 /* No workaround needed? */ 5762 if (!first_crtc_state) 5763 return 0; 5764 5765 /* w/a possibly needed, check how many crtc's are already enabled. */ 5766 for_each_intel_crtc(state->base.dev, crtc) { 5767 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5768 if (IS_ERR(crtc_state)) 5769 return PTR_ERR(crtc_state); 5770 5771 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 5772 5773 if (!crtc_state->hw.active || 5774 intel_crtc_needs_modeset(crtc_state)) 5775 continue; 5776 5777 /* 2 or more enabled crtcs means no need for w/a */ 5778 if (enabled_pipe != INVALID_PIPE) 5779 return 0; 5780 5781 enabled_pipe = crtc->pipe; 5782 } 5783 5784 if (enabled_pipe != INVALID_PIPE) 5785 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 5786 else if (other_crtc_state) 5787 other_crtc_state->hsw_workaround_pipe = first_pipe; 5788 5789 return 0; 5790 } 5791 5792 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 5793 u8 active_pipes) 5794 { 5795 const struct intel_crtc_state *crtc_state; 5796 struct intel_crtc *crtc; 5797 int i; 5798 5799 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5800 if (crtc_state->hw.active) 5801 active_pipes |= BIT(crtc->pipe); 5802 else 5803 active_pipes &= ~BIT(crtc->pipe); 5804 } 5805 5806 return active_pipes; 5807 } 5808 5809 static int intel_modeset_checks(struct intel_atomic_state *state) 5810 { 5811 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5812 5813 state->modeset = true; 5814 5815 if (IS_HASWELL(dev_priv)) 5816 return hsw_mode_set_planes_workaround(state); 5817 5818 return 0; 5819 } 5820 5821 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 5822 struct intel_crtc_state *new_crtc_state) 5823 { 5824 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 5825 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5826 5827 /* only allow LRR when the timings stay within the VRR range */ 5828 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range) 5829 new_crtc_state->update_lrr = false; 5830 5831 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 5832 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n", 5833 crtc->base.base.id, crtc->base.name); 5834 else 5835 new_crtc_state->uapi.mode_changed = false; 5836 5837 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n, 5838 &new_crtc_state->dp_m_n)) 5839 new_crtc_state->update_m_n = false; 5840 5841 if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal && 5842 old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end)) 5843 new_crtc_state->update_lrr = false; 5844 5845 if (intel_crtc_needs_modeset(new_crtc_state)) 5846 intel_crtc_flag_modeset(new_crtc_state); 5847 else 5848 new_crtc_state->update_pipe = true; 5849 } 5850 5851 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 5852 struct intel_crtc *crtc, 5853 u8 plane_ids_mask) 5854 { 5855 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5856 struct intel_plane *plane; 5857 5858 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 5859 struct intel_plane_state *plane_state; 5860 5861 if ((plane_ids_mask & BIT(plane->id)) == 0) 5862 continue; 5863 5864 plane_state = intel_atomic_get_plane_state(state, plane); 5865 if (IS_ERR(plane_state)) 5866 return PTR_ERR(plane_state); 5867 } 5868 5869 return 0; 5870 } 5871 5872 int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 5873 struct intel_crtc *crtc) 5874 { 5875 const struct intel_crtc_state *old_crtc_state = 5876 intel_atomic_get_old_crtc_state(state, crtc); 5877 const struct intel_crtc_state *new_crtc_state = 5878 intel_atomic_get_new_crtc_state(state, crtc); 5879 5880 return intel_crtc_add_planes_to_state(state, crtc, 5881 old_crtc_state->enabled_planes | 5882 new_crtc_state->enabled_planes); 5883 } 5884 5885 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 5886 { 5887 /* See {hsw,vlv,ivb}_plane_ratio() */ 5888 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 5889 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 5890 IS_IVYBRIDGE(dev_priv); 5891 } 5892 5893 static int intel_crtc_add_joiner_planes(struct intel_atomic_state *state, 5894 struct intel_crtc *crtc, 5895 struct intel_crtc *other) 5896 { 5897 const struct intel_plane_state __maybe_unused *plane_state; 5898 struct intel_plane *plane; 5899 u8 plane_ids = 0; 5900 int i; 5901 5902 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 5903 if (plane->pipe == crtc->pipe) 5904 plane_ids |= BIT(plane->id); 5905 } 5906 5907 return intel_crtc_add_planes_to_state(state, other, plane_ids); 5908 } 5909 5910 static int intel_joiner_add_affected_planes(struct intel_atomic_state *state) 5911 { 5912 struct drm_i915_private *i915 = to_i915(state->base.dev); 5913 const struct intel_crtc_state *crtc_state; 5914 struct intel_crtc *crtc; 5915 int i; 5916 5917 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5918 struct intel_crtc *other; 5919 5920 for_each_intel_crtc_in_pipe_mask(&i915->drm, other, 5921 crtc_state->joiner_pipes) { 5922 int ret; 5923 5924 if (crtc == other) 5925 continue; 5926 5927 ret = intel_crtc_add_joiner_planes(state, crtc, other); 5928 if (ret) 5929 return ret; 5930 } 5931 } 5932 5933 return 0; 5934 } 5935 5936 static int intel_atomic_check_planes(struct intel_atomic_state *state) 5937 { 5938 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5939 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 5940 struct intel_plane_state __maybe_unused *plane_state; 5941 struct intel_plane *plane; 5942 struct intel_crtc *crtc; 5943 int i, ret; 5944 5945 ret = icl_add_linked_planes(state); 5946 if (ret) 5947 return ret; 5948 5949 ret = intel_joiner_add_affected_planes(state); 5950 if (ret) 5951 return ret; 5952 5953 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 5954 ret = intel_plane_atomic_check(state, plane); 5955 if (ret) { 5956 drm_dbg_atomic(&dev_priv->drm, 5957 "[PLANE:%d:%s] atomic driver check failed\n", 5958 plane->base.base.id, plane->base.name); 5959 return ret; 5960 } 5961 } 5962 5963 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 5964 new_crtc_state, i) { 5965 u8 old_active_planes, new_active_planes; 5966 5967 ret = icl_check_nv12_planes(state, crtc); 5968 if (ret) 5969 return ret; 5970 5971 /* 5972 * On some platforms the number of active planes affects 5973 * the planes' minimum cdclk calculation. Add such planes 5974 * to the state before we compute the minimum cdclk. 5975 */ 5976 if (!active_planes_affects_min_cdclk(dev_priv)) 5977 continue; 5978 5979 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 5980 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 5981 5982 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 5983 continue; 5984 5985 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 5986 if (ret) 5987 return ret; 5988 } 5989 5990 return 0; 5991 } 5992 5993 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 5994 { 5995 struct intel_crtc_state __maybe_unused *crtc_state; 5996 struct intel_crtc *crtc; 5997 int i; 5998 5999 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6000 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 6001 int ret; 6002 6003 ret = intel_crtc_atomic_check(state, crtc); 6004 if (ret) { 6005 drm_dbg_atomic(&i915->drm, 6006 "[CRTC:%d:%s] atomic driver check failed\n", 6007 crtc->base.base.id, crtc->base.name); 6008 return ret; 6009 } 6010 } 6011 6012 return 0; 6013 } 6014 6015 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 6016 u8 transcoders) 6017 { 6018 const struct intel_crtc_state *new_crtc_state; 6019 struct intel_crtc *crtc; 6020 int i; 6021 6022 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6023 if (new_crtc_state->hw.enable && 6024 transcoders & BIT(new_crtc_state->cpu_transcoder) && 6025 intel_crtc_needs_modeset(new_crtc_state)) 6026 return true; 6027 } 6028 6029 return false; 6030 } 6031 6032 static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 6033 u8 pipes) 6034 { 6035 const struct intel_crtc_state *new_crtc_state; 6036 struct intel_crtc *crtc; 6037 int i; 6038 6039 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6040 if (new_crtc_state->hw.enable && 6041 pipes & BIT(crtc->pipe) && 6042 intel_crtc_needs_modeset(new_crtc_state)) 6043 return true; 6044 } 6045 6046 return false; 6047 } 6048 6049 static int intel_atomic_check_joiner(struct intel_atomic_state *state, 6050 struct intel_crtc *primary_crtc) 6051 { 6052 struct drm_i915_private *i915 = to_i915(state->base.dev); 6053 struct intel_crtc_state *primary_crtc_state = 6054 intel_atomic_get_new_crtc_state(state, primary_crtc); 6055 struct intel_crtc *secondary_crtc; 6056 6057 if (!primary_crtc_state->joiner_pipes) 6058 return 0; 6059 6060 /* sanity check */ 6061 if (drm_WARN_ON(&i915->drm, 6062 primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state))) 6063 return -EINVAL; 6064 6065 if (primary_crtc_state->joiner_pipes & ~joiner_pipes(i915)) { 6066 drm_dbg_kms(&i915->drm, 6067 "[CRTC:%d:%s] Cannot act as joiner primary " 6068 "(need 0x%x as pipes, only 0x%x possible)\n", 6069 primary_crtc->base.base.id, primary_crtc->base.name, 6070 primary_crtc_state->joiner_pipes, joiner_pipes(i915)); 6071 return -EINVAL; 6072 } 6073 6074 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc, 6075 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 6076 struct intel_crtc_state *secondary_crtc_state; 6077 int ret; 6078 6079 secondary_crtc_state = intel_atomic_get_crtc_state(&state->base, secondary_crtc); 6080 if (IS_ERR(secondary_crtc_state)) 6081 return PTR_ERR(secondary_crtc_state); 6082 6083 /* primary being enabled, secondary was already configured? */ 6084 if (secondary_crtc_state->uapi.enable) { 6085 drm_dbg_kms(&i915->drm, 6086 "[CRTC:%d:%s] secondary is enabled as normal CRTC, but " 6087 "[CRTC:%d:%s] claiming this CRTC for joiner.\n", 6088 secondary_crtc->base.base.id, secondary_crtc->base.name, 6089 primary_crtc->base.base.id, primary_crtc->base.name); 6090 return -EINVAL; 6091 } 6092 6093 /* 6094 * The state copy logic assumes the primary crtc gets processed 6095 * before the secondary crtc during the main compute_config loop. 6096 * This works because the crtcs are created in pipe order, 6097 * and the hardware requires primary pipe < secondary pipe as well. 6098 * Should that change we need to rethink the logic. 6099 */ 6100 if (WARN_ON(drm_crtc_index(&primary_crtc->base) > 6101 drm_crtc_index(&secondary_crtc->base))) 6102 return -EINVAL; 6103 6104 drm_dbg_kms(&i915->drm, 6105 "[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n", 6106 secondary_crtc->base.base.id, secondary_crtc->base.name, 6107 primary_crtc->base.base.id, primary_crtc->base.name); 6108 6109 secondary_crtc_state->joiner_pipes = 6110 primary_crtc_state->joiner_pipes; 6111 6112 ret = copy_joiner_crtc_state_modeset(state, secondary_crtc); 6113 if (ret) 6114 return ret; 6115 } 6116 6117 return 0; 6118 } 6119 6120 static void kill_joiner_secondaries(struct intel_atomic_state *state, 6121 struct intel_crtc *primary_crtc) 6122 { 6123 struct drm_i915_private *i915 = to_i915(state->base.dev); 6124 struct intel_crtc_state *primary_crtc_state = 6125 intel_atomic_get_new_crtc_state(state, primary_crtc); 6126 struct intel_crtc *secondary_crtc; 6127 6128 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc, 6129 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 6130 struct intel_crtc_state *secondary_crtc_state = 6131 intel_atomic_get_new_crtc_state(state, secondary_crtc); 6132 6133 secondary_crtc_state->joiner_pipes = 0; 6134 6135 intel_crtc_copy_uapi_to_hw_state_modeset(state, secondary_crtc); 6136 } 6137 6138 primary_crtc_state->joiner_pipes = 0; 6139 } 6140 6141 /** 6142 * DOC: asynchronous flip implementation 6143 * 6144 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 6145 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 6146 * Correspondingly, support is currently added for primary plane only. 6147 * 6148 * Async flip can only change the plane surface address, so anything else 6149 * changing is rejected from the intel_async_flip_check_hw() function. 6150 * Once this check is cleared, flip done interrupt is enabled using 6151 * the intel_crtc_enable_flip_done() function. 6152 * 6153 * As soon as the surface address register is written, flip done interrupt is 6154 * generated and the requested events are sent to the usersapce in the interrupt 6155 * handler itself. The timestamp and sequence sent during the flip done event 6156 * correspond to the last vblank and have no relation to the actual time when 6157 * the flip done event was sent. 6158 */ 6159 static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 6160 struct intel_crtc *crtc) 6161 { 6162 struct drm_i915_private *i915 = to_i915(state->base.dev); 6163 const struct intel_crtc_state *new_crtc_state = 6164 intel_atomic_get_new_crtc_state(state, crtc); 6165 const struct intel_plane_state *old_plane_state; 6166 struct intel_plane_state *new_plane_state; 6167 struct intel_plane *plane; 6168 int i; 6169 6170 if (!new_crtc_state->uapi.async_flip) 6171 return 0; 6172 6173 if (!new_crtc_state->uapi.active) { 6174 drm_dbg_kms(&i915->drm, 6175 "[CRTC:%d:%s] not active\n", 6176 crtc->base.base.id, crtc->base.name); 6177 return -EINVAL; 6178 } 6179 6180 if (intel_crtc_needs_modeset(new_crtc_state)) { 6181 drm_dbg_kms(&i915->drm, 6182 "[CRTC:%d:%s] modeset required\n", 6183 crtc->base.base.id, crtc->base.name); 6184 return -EINVAL; 6185 } 6186 6187 /* 6188 * FIXME: joiner+async flip is busted currently. 6189 * Remove this check once the issues are fixed. 6190 */ 6191 if (new_crtc_state->joiner_pipes) { 6192 drm_dbg_kms(&i915->drm, 6193 "[CRTC:%d:%s] async flip disallowed with joiner\n", 6194 crtc->base.base.id, crtc->base.name); 6195 return -EINVAL; 6196 } 6197 6198 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6199 new_plane_state, i) { 6200 if (plane->pipe != crtc->pipe) 6201 continue; 6202 6203 /* 6204 * TODO: Async flip is only supported through the page flip IOCTL 6205 * as of now. So support currently added for primary plane only. 6206 * Support for other planes on platforms on which supports 6207 * this(vlv/chv and icl+) should be added when async flip is 6208 * enabled in the atomic IOCTL path. 6209 */ 6210 if (!plane->async_flip) { 6211 drm_dbg_kms(&i915->drm, 6212 "[PLANE:%d:%s] async flip not supported\n", 6213 plane->base.base.id, plane->base.name); 6214 return -EINVAL; 6215 } 6216 6217 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 6218 drm_dbg_kms(&i915->drm, 6219 "[PLANE:%d:%s] no old or new framebuffer\n", 6220 plane->base.base.id, plane->base.name); 6221 return -EINVAL; 6222 } 6223 } 6224 6225 return 0; 6226 } 6227 6228 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 6229 { 6230 struct drm_i915_private *i915 = to_i915(state->base.dev); 6231 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6232 const struct intel_plane_state *new_plane_state, *old_plane_state; 6233 struct intel_plane *plane; 6234 int i; 6235 6236 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6237 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6238 6239 if (!new_crtc_state->uapi.async_flip) 6240 return 0; 6241 6242 if (!new_crtc_state->hw.active) { 6243 drm_dbg_kms(&i915->drm, 6244 "[CRTC:%d:%s] not active\n", 6245 crtc->base.base.id, crtc->base.name); 6246 return -EINVAL; 6247 } 6248 6249 if (intel_crtc_needs_modeset(new_crtc_state)) { 6250 drm_dbg_kms(&i915->drm, 6251 "[CRTC:%d:%s] modeset required\n", 6252 crtc->base.base.id, crtc->base.name); 6253 return -EINVAL; 6254 } 6255 6256 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 6257 drm_dbg_kms(&i915->drm, 6258 "[CRTC:%d:%s] Active planes cannot be in async flip\n", 6259 crtc->base.base.id, crtc->base.name); 6260 return -EINVAL; 6261 } 6262 6263 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6264 new_plane_state, i) { 6265 if (plane->pipe != crtc->pipe) 6266 continue; 6267 6268 /* 6269 * Only async flip capable planes should be in the state 6270 * if we're really about to ask the hardware to perform 6271 * an async flip. We should never get this far otherwise. 6272 */ 6273 if (drm_WARN_ON(&i915->drm, 6274 new_crtc_state->do_async_flip && !plane->async_flip)) 6275 return -EINVAL; 6276 6277 /* 6278 * Only check async flip capable planes other planes 6279 * may be involved in the initial commit due to 6280 * the wm0/ddb optimization. 6281 * 6282 * TODO maybe should track which planes actually 6283 * were requested to do the async flip... 6284 */ 6285 if (!plane->async_flip) 6286 continue; 6287 6288 /* 6289 * FIXME: This check is kept generic for all platforms. 6290 * Need to verify this for all gen9 platforms to enable 6291 * this selectively if required. 6292 */ 6293 switch (new_plane_state->hw.fb->modifier) { 6294 case DRM_FORMAT_MOD_LINEAR: 6295 /* 6296 * FIXME: Async on Linear buffer is supported on ICL as 6297 * but with additional alignment and fbc restrictions 6298 * need to be taken care of. These aren't applicable for 6299 * gen12+. 6300 */ 6301 if (DISPLAY_VER(i915) < 12) { 6302 drm_dbg_kms(&i915->drm, 6303 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n", 6304 plane->base.base.id, plane->base.name, 6305 new_plane_state->hw.fb->modifier, DISPLAY_VER(i915)); 6306 return -EINVAL; 6307 } 6308 break; 6309 6310 case I915_FORMAT_MOD_X_TILED: 6311 case I915_FORMAT_MOD_Y_TILED: 6312 case I915_FORMAT_MOD_Yf_TILED: 6313 case I915_FORMAT_MOD_4_TILED: 6314 case I915_FORMAT_MOD_4_TILED_BMG_CCS: 6315 case I915_FORMAT_MOD_4_TILED_LNL_CCS: 6316 break; 6317 default: 6318 drm_dbg_kms(&i915->drm, 6319 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n", 6320 plane->base.base.id, plane->base.name, 6321 new_plane_state->hw.fb->modifier); 6322 return -EINVAL; 6323 } 6324 6325 if (new_plane_state->hw.fb->format->num_planes > 1) { 6326 drm_dbg_kms(&i915->drm, 6327 "[PLANE:%d:%s] Planar formats do not support async flips\n", 6328 plane->base.base.id, plane->base.name); 6329 return -EINVAL; 6330 } 6331 6332 /* 6333 * We turn the first async flip request into a sync flip 6334 * so that we can reconfigure the plane (eg. change modifier). 6335 */ 6336 if (!new_crtc_state->do_async_flip) 6337 continue; 6338 6339 if (old_plane_state->view.color_plane[0].mapping_stride != 6340 new_plane_state->view.color_plane[0].mapping_stride) { 6341 drm_dbg_kms(&i915->drm, 6342 "[PLANE:%d:%s] Stride cannot be changed in async flip\n", 6343 plane->base.base.id, plane->base.name); 6344 return -EINVAL; 6345 } 6346 6347 if (old_plane_state->hw.fb->modifier != 6348 new_plane_state->hw.fb->modifier) { 6349 drm_dbg_kms(&i915->drm, 6350 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", 6351 plane->base.base.id, plane->base.name); 6352 return -EINVAL; 6353 } 6354 6355 if (old_plane_state->hw.fb->format != 6356 new_plane_state->hw.fb->format) { 6357 drm_dbg_kms(&i915->drm, 6358 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", 6359 plane->base.base.id, plane->base.name); 6360 return -EINVAL; 6361 } 6362 6363 if (old_plane_state->hw.rotation != 6364 new_plane_state->hw.rotation) { 6365 drm_dbg_kms(&i915->drm, 6366 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", 6367 plane->base.base.id, plane->base.name); 6368 return -EINVAL; 6369 } 6370 6371 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 6372 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 6373 drm_dbg_kms(&i915->drm, 6374 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", 6375 plane->base.base.id, plane->base.name); 6376 return -EINVAL; 6377 } 6378 6379 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 6380 drm_dbg_kms(&i915->drm, 6381 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", 6382 plane->base.base.id, plane->base.name); 6383 return -EINVAL; 6384 } 6385 6386 if (old_plane_state->hw.pixel_blend_mode != 6387 new_plane_state->hw.pixel_blend_mode) { 6388 drm_dbg_kms(&i915->drm, 6389 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", 6390 plane->base.base.id, plane->base.name); 6391 return -EINVAL; 6392 } 6393 6394 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 6395 drm_dbg_kms(&i915->drm, 6396 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", 6397 plane->base.base.id, plane->base.name); 6398 return -EINVAL; 6399 } 6400 6401 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 6402 drm_dbg_kms(&i915->drm, 6403 "[PLANE:%d:%s] Color range cannot be changed in async flip\n", 6404 plane->base.base.id, plane->base.name); 6405 return -EINVAL; 6406 } 6407 6408 /* plane decryption is allow to change only in synchronous flips */ 6409 if (old_plane_state->decrypt != new_plane_state->decrypt) { 6410 drm_dbg_kms(&i915->drm, 6411 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", 6412 plane->base.base.id, plane->base.name); 6413 return -EINVAL; 6414 } 6415 } 6416 6417 return 0; 6418 } 6419 6420 static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state) 6421 { 6422 struct drm_i915_private *i915 = to_i915(state->base.dev); 6423 struct intel_crtc_state *crtc_state; 6424 struct intel_crtc *crtc; 6425 u8 affected_pipes = 0; 6426 u8 modeset_pipes = 0; 6427 int i; 6428 6429 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6430 affected_pipes |= crtc_state->joiner_pipes; 6431 if (intel_crtc_needs_modeset(crtc_state)) 6432 modeset_pipes |= crtc_state->joiner_pipes; 6433 } 6434 6435 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { 6436 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6437 if (IS_ERR(crtc_state)) 6438 return PTR_ERR(crtc_state); 6439 } 6440 6441 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { 6442 int ret; 6443 6444 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6445 6446 crtc_state->uapi.mode_changed = true; 6447 6448 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6449 if (ret) 6450 return ret; 6451 6452 ret = intel_atomic_add_affected_planes(state, crtc); 6453 if (ret) 6454 return ret; 6455 } 6456 6457 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6458 /* Kill old joiner link, we may re-establish afterwards */ 6459 if (intel_crtc_needs_modeset(crtc_state) && 6460 intel_crtc_is_joiner_primary(crtc_state)) 6461 kill_joiner_secondaries(state, crtc); 6462 } 6463 6464 return 0; 6465 } 6466 6467 static int intel_atomic_check_config(struct intel_atomic_state *state, 6468 struct intel_link_bw_limits *limits, 6469 enum pipe *failed_pipe) 6470 { 6471 struct drm_i915_private *i915 = to_i915(state->base.dev); 6472 struct intel_crtc_state *new_crtc_state; 6473 struct intel_crtc *crtc; 6474 int ret; 6475 int i; 6476 6477 *failed_pipe = INVALID_PIPE; 6478 6479 ret = intel_joiner_add_affected_crtcs(state); 6480 if (ret) 6481 return ret; 6482 6483 ret = intel_fdi_add_affected_crtcs(state); 6484 if (ret) 6485 return ret; 6486 6487 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6488 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6489 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 6490 copy_joiner_crtc_state_nomodeset(state, crtc); 6491 else 6492 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 6493 continue; 6494 } 6495 6496 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6497 continue; 6498 6499 ret = intel_crtc_prepare_cleared_state(state, crtc); 6500 if (ret) 6501 goto fail; 6502 6503 if (!new_crtc_state->hw.enable) 6504 continue; 6505 6506 ret = intel_modeset_pipe_config(state, crtc, limits); 6507 if (ret) 6508 goto fail; 6509 } 6510 6511 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6512 if (!intel_crtc_needs_modeset(new_crtc_state)) 6513 continue; 6514 6515 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6516 continue; 6517 6518 if (!new_crtc_state->hw.enable) 6519 continue; 6520 6521 ret = intel_modeset_pipe_config_late(state, crtc); 6522 if (ret) 6523 goto fail; 6524 } 6525 6526 fail: 6527 if (ret) 6528 *failed_pipe = crtc->pipe; 6529 6530 return ret; 6531 } 6532 6533 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state) 6534 { 6535 struct intel_link_bw_limits new_limits; 6536 struct intel_link_bw_limits old_limits; 6537 int ret; 6538 6539 intel_link_bw_init_limits(state, &new_limits); 6540 old_limits = new_limits; 6541 6542 while (true) { 6543 enum pipe failed_pipe; 6544 6545 ret = intel_atomic_check_config(state, &new_limits, 6546 &failed_pipe); 6547 if (ret) { 6548 /* 6549 * The bpp limit for a pipe is below the minimum it supports, set the 6550 * limit to the minimum and recalculate the config. 6551 */ 6552 if (ret == -EINVAL && 6553 intel_link_bw_set_bpp_limit_for_pipe(state, 6554 &old_limits, 6555 &new_limits, 6556 failed_pipe)) 6557 continue; 6558 6559 break; 6560 } 6561 6562 old_limits = new_limits; 6563 6564 ret = intel_link_bw_atomic_check(state, &new_limits); 6565 if (ret != -EAGAIN) 6566 break; 6567 } 6568 6569 return ret; 6570 } 6571 /** 6572 * intel_atomic_check - validate state object 6573 * @dev: drm device 6574 * @_state: state to validate 6575 */ 6576 int intel_atomic_check(struct drm_device *dev, 6577 struct drm_atomic_state *_state) 6578 { 6579 struct drm_i915_private *dev_priv = to_i915(dev); 6580 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6581 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6582 struct intel_crtc *crtc; 6583 int ret, i; 6584 bool any_ms = false; 6585 6586 if (!intel_display_driver_check_access(dev_priv)) 6587 return -ENODEV; 6588 6589 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6590 new_crtc_state, i) { 6591 /* 6592 * crtc's state no longer considered to be inherited 6593 * after the first userspace/client initiated commit. 6594 */ 6595 if (!state->internal) 6596 new_crtc_state->inherited = false; 6597 6598 if (new_crtc_state->inherited != old_crtc_state->inherited) 6599 new_crtc_state->uapi.mode_changed = true; 6600 6601 if (new_crtc_state->uapi.scaling_filter != 6602 old_crtc_state->uapi.scaling_filter) 6603 new_crtc_state->uapi.mode_changed = true; 6604 } 6605 6606 intel_vrr_check_modeset(state); 6607 6608 ret = drm_atomic_helper_check_modeset(dev, &state->base); 6609 if (ret) 6610 goto fail; 6611 6612 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6613 ret = intel_async_flip_check_uapi(state, crtc); 6614 if (ret) 6615 return ret; 6616 } 6617 6618 ret = intel_atomic_check_config_and_link(state); 6619 if (ret) 6620 goto fail; 6621 6622 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6623 if (!intel_crtc_needs_modeset(new_crtc_state)) 6624 continue; 6625 6626 if (intel_crtc_is_joiner_secondary(new_crtc_state)) { 6627 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable); 6628 continue; 6629 } 6630 6631 ret = intel_atomic_check_joiner(state, crtc); 6632 if (ret) 6633 goto fail; 6634 } 6635 6636 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6637 new_crtc_state, i) { 6638 if (!intel_crtc_needs_modeset(new_crtc_state)) 6639 continue; 6640 6641 intel_joiner_adjust_pipe_src(new_crtc_state); 6642 6643 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 6644 } 6645 6646 /** 6647 * Check if fastset is allowed by external dependencies like other 6648 * pipes and transcoders. 6649 * 6650 * Right now it only forces a fullmodeset when the MST master 6651 * transcoder did not changed but the pipe of the master transcoder 6652 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 6653 * in case of port synced crtcs, if one of the synced crtcs 6654 * needs a full modeset, all other synced crtcs should be 6655 * forced a full modeset. 6656 */ 6657 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6658 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6659 continue; 6660 6661 if (intel_dp_mst_crtc_needs_modeset(state, crtc)) 6662 intel_crtc_flag_modeset(new_crtc_state); 6663 6664 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6665 enum transcoder master = new_crtc_state->mst_master_transcoder; 6666 6667 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) 6668 intel_crtc_flag_modeset(new_crtc_state); 6669 } 6670 6671 if (is_trans_port_sync_mode(new_crtc_state)) { 6672 u8 trans = new_crtc_state->sync_mode_slaves_mask; 6673 6674 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6675 trans |= BIT(new_crtc_state->master_transcoder); 6676 6677 if (intel_cpu_transcoders_need_modeset(state, trans)) 6678 intel_crtc_flag_modeset(new_crtc_state); 6679 } 6680 6681 if (new_crtc_state->joiner_pipes) { 6682 if (intel_pipes_need_modeset(state, new_crtc_state->joiner_pipes)) 6683 intel_crtc_flag_modeset(new_crtc_state); 6684 } 6685 } 6686 6687 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6688 new_crtc_state, i) { 6689 if (!intel_crtc_needs_modeset(new_crtc_state)) 6690 continue; 6691 6692 any_ms = true; 6693 6694 intel_release_shared_dplls(state, crtc); 6695 } 6696 6697 if (any_ms && !check_digital_port_conflicts(state)) { 6698 drm_dbg_kms(&dev_priv->drm, 6699 "rejecting conflicting digital port configuration\n"); 6700 ret = -EINVAL; 6701 goto fail; 6702 } 6703 6704 ret = intel_atomic_check_planes(state); 6705 if (ret) 6706 goto fail; 6707 6708 ret = intel_compute_global_watermarks(state); 6709 if (ret) 6710 goto fail; 6711 6712 ret = intel_bw_atomic_check(state); 6713 if (ret) 6714 goto fail; 6715 6716 ret = intel_cdclk_atomic_check(state, &any_ms); 6717 if (ret) 6718 goto fail; 6719 6720 if (intel_any_crtc_needs_modeset(state)) 6721 any_ms = true; 6722 6723 if (any_ms) { 6724 ret = intel_modeset_checks(state); 6725 if (ret) 6726 goto fail; 6727 6728 ret = intel_modeset_calc_cdclk(state); 6729 if (ret) 6730 return ret; 6731 } 6732 6733 ret = intel_pmdemand_atomic_check(state); 6734 if (ret) 6735 goto fail; 6736 6737 ret = intel_atomic_check_crtcs(state); 6738 if (ret) 6739 goto fail; 6740 6741 ret = intel_fbc_atomic_check(state); 6742 if (ret) 6743 goto fail; 6744 6745 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6746 new_crtc_state, i) { 6747 intel_color_assert_luts(new_crtc_state); 6748 6749 ret = intel_async_flip_check_hw(state, crtc); 6750 if (ret) 6751 goto fail; 6752 6753 /* Either full modeset or fastset (or neither), never both */ 6754 drm_WARN_ON(&dev_priv->drm, 6755 intel_crtc_needs_modeset(new_crtc_state) && 6756 intel_crtc_needs_fastset(new_crtc_state)); 6757 6758 if (!intel_crtc_needs_modeset(new_crtc_state) && 6759 !intel_crtc_needs_fastset(new_crtc_state)) 6760 continue; 6761 6762 intel_crtc_state_dump(new_crtc_state, state, 6763 intel_crtc_needs_modeset(new_crtc_state) ? 6764 "modeset" : "fastset"); 6765 } 6766 6767 return 0; 6768 6769 fail: 6770 if (ret == -EDEADLK) 6771 return ret; 6772 6773 /* 6774 * FIXME would probably be nice to know which crtc specifically 6775 * caused the failure, in cases where we can pinpoint it. 6776 */ 6777 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6778 new_crtc_state, i) 6779 intel_crtc_state_dump(new_crtc_state, state, "failed"); 6780 6781 return ret; 6782 } 6783 6784 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 6785 { 6786 struct intel_crtc_state __maybe_unused *crtc_state; 6787 struct intel_crtc *crtc; 6788 int i, ret; 6789 6790 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 6791 if (ret < 0) 6792 return ret; 6793 6794 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) 6795 intel_color_prepare_commit(state, crtc); 6796 6797 return 0; 6798 } 6799 6800 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 6801 struct intel_crtc_state *crtc_state) 6802 { 6803 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6804 6805 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes) 6806 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 6807 6808 if (crtc_state->has_pch_encoder) { 6809 enum pipe pch_transcoder = 6810 intel_crtc_pch_transcoder(crtc); 6811 6812 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 6813 } 6814 } 6815 6816 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 6817 const struct intel_crtc_state *new_crtc_state) 6818 { 6819 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6820 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6821 6822 /* 6823 * Update pipe size and adjust fitter if needed: the reason for this is 6824 * that in compute_mode_changes we check the native mode (not the pfit 6825 * mode) to see if we can flip rather than do a full mode set. In the 6826 * fastboot case, we'll flip, but if we don't update the pipesrc and 6827 * pfit state, we'll end up with a big fb scanned out into the wrong 6828 * sized surface. 6829 */ 6830 intel_set_pipe_src_size(new_crtc_state); 6831 6832 /* on skylake this is done by detaching scalers */ 6833 if (DISPLAY_VER(dev_priv) >= 9) { 6834 if (new_crtc_state->pch_pfit.enabled) 6835 skl_pfit_enable(new_crtc_state); 6836 } else if (HAS_PCH_SPLIT(dev_priv)) { 6837 if (new_crtc_state->pch_pfit.enabled) 6838 ilk_pfit_enable(new_crtc_state); 6839 else if (old_crtc_state->pch_pfit.enabled) 6840 ilk_pfit_disable(old_crtc_state); 6841 } 6842 6843 /* 6844 * The register is supposedly single buffered so perhaps 6845 * not 100% correct to do this here. But SKL+ calculate 6846 * this based on the adjust pixel rate so pfit changes do 6847 * affect it and so it must be updated for fastsets. 6848 * HSW/BDW only really need this here for fastboot, after 6849 * that the value should not change without a full modeset. 6850 */ 6851 if (DISPLAY_VER(dev_priv) >= 9 || 6852 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 6853 hsw_set_linetime_wm(new_crtc_state); 6854 6855 if (new_crtc_state->update_m_n) 6856 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, 6857 &new_crtc_state->dp_m_n); 6858 6859 if (new_crtc_state->update_lrr) 6860 intel_set_transcoder_timings_lrr(new_crtc_state); 6861 } 6862 6863 static void commit_pipe_pre_planes(struct intel_atomic_state *state, 6864 struct intel_crtc *crtc) 6865 { 6866 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6867 const struct intel_crtc_state *old_crtc_state = 6868 intel_atomic_get_old_crtc_state(state, crtc); 6869 const struct intel_crtc_state *new_crtc_state = 6870 intel_atomic_get_new_crtc_state(state, crtc); 6871 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6872 6873 /* 6874 * During modesets pipe configuration was programmed as the 6875 * CRTC was enabled. 6876 */ 6877 if (!modeset) { 6878 if (intel_crtc_needs_color_update(new_crtc_state)) 6879 intel_color_commit_arm(new_crtc_state); 6880 6881 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6882 bdw_set_pipe_misc(new_crtc_state); 6883 6884 if (intel_crtc_needs_fastset(new_crtc_state)) 6885 intel_pipe_fastset(old_crtc_state, new_crtc_state); 6886 } 6887 6888 intel_psr2_program_trans_man_trk_ctl(new_crtc_state); 6889 6890 intel_atomic_update_watermarks(state, crtc); 6891 } 6892 6893 static void commit_pipe_post_planes(struct intel_atomic_state *state, 6894 struct intel_crtc *crtc) 6895 { 6896 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6897 const struct intel_crtc_state *new_crtc_state = 6898 intel_atomic_get_new_crtc_state(state, crtc); 6899 6900 /* 6901 * Disable the scaler(s) after the plane(s) so that we don't 6902 * get a catastrophic underrun even if the two operations 6903 * end up happening in two different frames. 6904 */ 6905 if (DISPLAY_VER(dev_priv) >= 9 && 6906 !intel_crtc_needs_modeset(new_crtc_state)) 6907 skl_detach_scalers(new_crtc_state); 6908 6909 if (intel_crtc_vrr_enabling(state, crtc)) 6910 intel_vrr_enable(new_crtc_state); 6911 } 6912 6913 static void intel_enable_crtc(struct intel_atomic_state *state, 6914 struct intel_crtc *crtc) 6915 { 6916 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6917 const struct intel_crtc_state *new_crtc_state = 6918 intel_atomic_get_new_crtc_state(state, crtc); 6919 struct intel_crtc *pipe_crtc; 6920 6921 if (!intel_crtc_needs_modeset(new_crtc_state)) 6922 return; 6923 6924 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, 6925 intel_crtc_joined_pipe_mask(new_crtc_state)) { 6926 const struct intel_crtc_state *pipe_crtc_state = 6927 intel_atomic_get_new_crtc_state(state, pipe_crtc); 6928 6929 /* VRR will be enable later, if required */ 6930 intel_crtc_update_active_timings(pipe_crtc_state, false); 6931 } 6932 6933 dev_priv->display.funcs.display->crtc_enable(state, crtc); 6934 6935 /* vblanks work again, re-enable pipe CRC. */ 6936 intel_crtc_enable_pipe_crc(crtc); 6937 } 6938 6939 static void intel_pre_update_crtc(struct intel_atomic_state *state, 6940 struct intel_crtc *crtc) 6941 { 6942 struct drm_i915_private *i915 = to_i915(state->base.dev); 6943 const struct intel_crtc_state *old_crtc_state = 6944 intel_atomic_get_old_crtc_state(state, crtc); 6945 struct intel_crtc_state *new_crtc_state = 6946 intel_atomic_get_new_crtc_state(state, crtc); 6947 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6948 6949 if (old_crtc_state->inherited || 6950 intel_crtc_needs_modeset(new_crtc_state)) { 6951 if (HAS_DPT(i915)) 6952 intel_dpt_configure(crtc); 6953 } 6954 6955 if (!modeset) { 6956 if (new_crtc_state->preload_luts && 6957 intel_crtc_needs_color_update(new_crtc_state)) 6958 intel_color_load_luts(new_crtc_state); 6959 6960 intel_pre_plane_update(state, crtc); 6961 6962 if (intel_crtc_needs_fastset(new_crtc_state)) 6963 intel_encoders_update_pipe(state, crtc); 6964 6965 if (DISPLAY_VER(i915) >= 11 && 6966 intel_crtc_needs_fastset(new_crtc_state)) 6967 icl_set_pipe_chicken(new_crtc_state); 6968 6969 if (vrr_params_changed(old_crtc_state, new_crtc_state) || 6970 cmrr_params_changed(old_crtc_state, new_crtc_state)) 6971 intel_vrr_set_transcoder_timings(new_crtc_state); 6972 } 6973 6974 intel_fbc_update(state, crtc); 6975 6976 drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); 6977 6978 if (!modeset && 6979 intel_crtc_needs_color_update(new_crtc_state)) 6980 intel_color_commit_noarm(new_crtc_state); 6981 6982 intel_crtc_planes_update_noarm(state, crtc); 6983 } 6984 6985 static void intel_update_crtc(struct intel_atomic_state *state, 6986 struct intel_crtc *crtc) 6987 { 6988 const struct intel_crtc_state *old_crtc_state = 6989 intel_atomic_get_old_crtc_state(state, crtc); 6990 struct intel_crtc_state *new_crtc_state = 6991 intel_atomic_get_new_crtc_state(state, crtc); 6992 6993 /* Perform vblank evasion around commit operation */ 6994 intel_pipe_update_start(state, crtc); 6995 6996 commit_pipe_pre_planes(state, crtc); 6997 6998 intel_crtc_planes_update_arm(state, crtc); 6999 7000 commit_pipe_post_planes(state, crtc); 7001 7002 intel_pipe_update_end(state, crtc); 7003 7004 /* 7005 * VRR/Seamless M/N update may need to update frame timings. 7006 * 7007 * FIXME Should be synchronized with the start of vblank somehow... 7008 */ 7009 if (intel_crtc_vrr_enabling(state, crtc) || 7010 new_crtc_state->update_m_n || new_crtc_state->update_lrr) 7011 intel_crtc_update_active_timings(new_crtc_state, 7012 new_crtc_state->vrr.enable); 7013 7014 /* 7015 * We usually enable FIFO underrun interrupts as part of the 7016 * CRTC enable sequence during modesets. But when we inherit a 7017 * valid pipe configuration from the BIOS we need to take care 7018 * of enabling them on the CRTC's first fastset. 7019 */ 7020 if (intel_crtc_needs_fastset(new_crtc_state) && 7021 old_crtc_state->inherited) 7022 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 7023 } 7024 7025 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 7026 struct intel_crtc *crtc) 7027 { 7028 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7029 const struct intel_crtc_state *old_crtc_state = 7030 intel_atomic_get_old_crtc_state(state, crtc); 7031 struct intel_crtc *pipe_crtc; 7032 7033 /* 7034 * We need to disable pipe CRC before disabling the pipe, 7035 * or we race against vblank off. 7036 */ 7037 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, 7038 intel_crtc_joined_pipe_mask(old_crtc_state)) 7039 intel_crtc_disable_pipe_crc(pipe_crtc); 7040 7041 dev_priv->display.funcs.display->crtc_disable(state, crtc); 7042 7043 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, 7044 intel_crtc_joined_pipe_mask(old_crtc_state)) { 7045 const struct intel_crtc_state *new_pipe_crtc_state = 7046 intel_atomic_get_new_crtc_state(state, pipe_crtc); 7047 7048 pipe_crtc->active = false; 7049 intel_fbc_disable(pipe_crtc); 7050 7051 if (!new_pipe_crtc_state->hw.active) 7052 intel_initial_watermarks(state, pipe_crtc); 7053 } 7054 } 7055 7056 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 7057 { 7058 struct drm_i915_private *i915 = to_i915(state->base.dev); 7059 const struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7060 struct intel_crtc *crtc; 7061 u8 disable_pipes = 0; 7062 int i; 7063 7064 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7065 new_crtc_state, i) { 7066 if (!intel_crtc_needs_modeset(new_crtc_state)) 7067 continue; 7068 7069 /* 7070 * Needs to be done even for pipes 7071 * that weren't enabled previously. 7072 */ 7073 intel_pre_plane_update(state, crtc); 7074 7075 if (!old_crtc_state->hw.active) 7076 continue; 7077 7078 disable_pipes |= BIT(crtc->pipe); 7079 } 7080 7081 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7082 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7083 continue; 7084 7085 intel_crtc_disable_planes(state, crtc); 7086 7087 drm_vblank_work_flush_all(&crtc->base); 7088 } 7089 7090 /* Only disable port sync and MST slaves */ 7091 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7092 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7093 continue; 7094 7095 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 7096 continue; 7097 7098 /* In case of Transcoder port Sync master slave CRTCs can be 7099 * assigned in any order and we need to make sure that 7100 * slave CRTCs are disabled first and then master CRTC since 7101 * Slave vblanks are masked till Master Vblanks. 7102 */ 7103 if (!is_trans_port_sync_slave(old_crtc_state) && 7104 !intel_dp_mst_is_slave_trans(old_crtc_state)) 7105 continue; 7106 7107 intel_old_crtc_state_disables(state, crtc); 7108 7109 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 7110 } 7111 7112 /* Disable everything else left on */ 7113 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7114 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7115 continue; 7116 7117 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 7118 continue; 7119 7120 intel_old_crtc_state_disables(state, crtc); 7121 7122 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 7123 } 7124 7125 drm_WARN_ON(&i915->drm, disable_pipes); 7126 } 7127 7128 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 7129 { 7130 struct intel_crtc_state *new_crtc_state; 7131 struct intel_crtc *crtc; 7132 int i; 7133 7134 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7135 if (!new_crtc_state->hw.active) 7136 continue; 7137 7138 intel_enable_crtc(state, crtc); 7139 intel_pre_update_crtc(state, crtc); 7140 } 7141 7142 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7143 if (!new_crtc_state->hw.active) 7144 continue; 7145 7146 intel_update_crtc(state, crtc); 7147 } 7148 } 7149 7150 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 7151 { 7152 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7153 struct intel_crtc *crtc; 7154 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7155 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 7156 u8 update_pipes = 0, modeset_pipes = 0; 7157 int i; 7158 7159 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7160 enum pipe pipe = crtc->pipe; 7161 7162 if (!new_crtc_state->hw.active) 7163 continue; 7164 7165 /* ignore allocations for crtc's that have been turned off. */ 7166 if (!intel_crtc_needs_modeset(new_crtc_state)) { 7167 entries[pipe] = old_crtc_state->wm.skl.ddb; 7168 update_pipes |= BIT(pipe); 7169 } else { 7170 modeset_pipes |= BIT(pipe); 7171 } 7172 } 7173 7174 /* 7175 * Whenever the number of active pipes changes, we need to make sure we 7176 * update the pipes in the right order so that their ddb allocations 7177 * never overlap with each other between CRTC updates. Otherwise we'll 7178 * cause pipe underruns and other bad stuff. 7179 * 7180 * So first lets enable all pipes that do not need a fullmodeset as 7181 * those don't have any external dependency. 7182 */ 7183 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7184 enum pipe pipe = crtc->pipe; 7185 7186 if ((update_pipes & BIT(pipe)) == 0) 7187 continue; 7188 7189 intel_pre_update_crtc(state, crtc); 7190 } 7191 7192 intel_dbuf_mbus_pre_ddb_update(state); 7193 7194 while (update_pipes) { 7195 /* 7196 * Commit in reverse order to make joiner primary 7197 * send the uapi events after secondaries are done. 7198 */ 7199 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, 7200 new_crtc_state, i) { 7201 enum pipe pipe = crtc->pipe; 7202 7203 if ((update_pipes & BIT(pipe)) == 0) 7204 continue; 7205 7206 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7207 entries, I915_MAX_PIPES, pipe)) 7208 continue; 7209 7210 entries[pipe] = new_crtc_state->wm.skl.ddb; 7211 update_pipes &= ~BIT(pipe); 7212 7213 intel_update_crtc(state, crtc); 7214 7215 /* 7216 * If this is an already active pipe, it's DDB changed, 7217 * and this isn't the last pipe that needs updating 7218 * then we need to wait for a vblank to pass for the 7219 * new ddb allocation to take effect. 7220 */ 7221 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 7222 &old_crtc_state->wm.skl.ddb) && 7223 (update_pipes | modeset_pipes)) 7224 intel_crtc_wait_for_next_vblank(crtc); 7225 } 7226 } 7227 7228 intel_dbuf_mbus_post_ddb_update(state); 7229 7230 update_pipes = modeset_pipes; 7231 7232 /* 7233 * Enable all pipes that needs a modeset and do not depends on other 7234 * pipes 7235 */ 7236 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7237 enum pipe pipe = crtc->pipe; 7238 7239 if ((modeset_pipes & BIT(pipe)) == 0) 7240 continue; 7241 7242 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7243 continue; 7244 7245 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 7246 is_trans_port_sync_master(new_crtc_state)) 7247 continue; 7248 7249 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7250 7251 intel_enable_crtc(state, crtc); 7252 } 7253 7254 /* 7255 * Then we enable all remaining pipes that depend on other 7256 * pipes: MST slaves and port sync masters 7257 */ 7258 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7259 enum pipe pipe = crtc->pipe; 7260 7261 if ((modeset_pipes & BIT(pipe)) == 0) 7262 continue; 7263 7264 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7265 continue; 7266 7267 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7268 7269 intel_enable_crtc(state, crtc); 7270 } 7271 7272 /* 7273 * Finally we do the plane updates/etc. for all pipes that got enabled. 7274 */ 7275 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7276 enum pipe pipe = crtc->pipe; 7277 7278 if ((update_pipes & BIT(pipe)) == 0) 7279 continue; 7280 7281 intel_pre_update_crtc(state, crtc); 7282 } 7283 7284 /* 7285 * Commit in reverse order to make joiner primary 7286 * send the uapi events after secondaries are done. 7287 */ 7288 for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) { 7289 enum pipe pipe = crtc->pipe; 7290 7291 if ((update_pipes & BIT(pipe)) == 0) 7292 continue; 7293 7294 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7295 entries, I915_MAX_PIPES, pipe)); 7296 7297 entries[pipe] = new_crtc_state->wm.skl.ddb; 7298 update_pipes &= ~BIT(pipe); 7299 7300 intel_update_crtc(state, crtc); 7301 } 7302 7303 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 7304 drm_WARN_ON(&dev_priv->drm, update_pipes); 7305 } 7306 7307 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 7308 { 7309 struct drm_i915_private *i915 = to_i915(intel_state->base.dev); 7310 struct drm_plane *plane; 7311 struct drm_plane_state *new_plane_state; 7312 int ret, i; 7313 7314 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { 7315 if (new_plane_state->fence) { 7316 ret = dma_fence_wait_timeout(new_plane_state->fence, false, 7317 i915_fence_timeout(i915)); 7318 if (ret <= 0) 7319 break; 7320 7321 dma_fence_put(new_plane_state->fence); 7322 new_plane_state->fence = NULL; 7323 } 7324 } 7325 } 7326 7327 static void intel_atomic_cleanup_work(struct work_struct *work) 7328 { 7329 struct intel_atomic_state *state = 7330 container_of(work, struct intel_atomic_state, base.commit_work); 7331 struct drm_i915_private *i915 = to_i915(state->base.dev); 7332 struct intel_crtc_state *old_crtc_state; 7333 struct intel_crtc *crtc; 7334 int i; 7335 7336 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) 7337 intel_color_cleanup_commit(old_crtc_state); 7338 7339 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 7340 drm_atomic_helper_commit_cleanup_done(&state->base); 7341 drm_atomic_state_put(&state->base); 7342 } 7343 7344 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 7345 { 7346 struct drm_i915_private *i915 = to_i915(state->base.dev); 7347 struct intel_plane *plane; 7348 struct intel_plane_state *plane_state; 7349 int i; 7350 7351 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7352 struct drm_framebuffer *fb = plane_state->hw.fb; 7353 int cc_plane; 7354 int ret; 7355 7356 if (!fb) 7357 continue; 7358 7359 cc_plane = intel_fb_rc_ccs_cc_plane(fb); 7360 if (cc_plane < 0) 7361 continue; 7362 7363 /* 7364 * The layout of the fast clear color value expected by HW 7365 * (the DRM ABI requiring this value to be located in fb at 7366 * offset 0 of cc plane, plane #2 previous generations or 7367 * plane #1 for flat ccs): 7368 * - 4 x 4 bytes per-channel value 7369 * (in surface type specific float/int format provided by the fb user) 7370 * - 8 bytes native color value used by the display 7371 * (converted/written by GPU during a fast clear operation using the 7372 * above per-channel values) 7373 * 7374 * The commit's FB prepare hook already ensured that FB obj is pinned and the 7375 * caller made sure that the object is synced wrt. the related color clear value 7376 * GPU write on it. 7377 */ 7378 ret = i915_gem_object_read_from_page(intel_fb_obj(fb), 7379 fb->offsets[cc_plane] + 16, 7380 &plane_state->ccval, 7381 sizeof(plane_state->ccval)); 7382 /* The above could only fail if the FB obj has an unexpected backing store type. */ 7383 drm_WARN_ON(&i915->drm, ret); 7384 } 7385 } 7386 7387 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 7388 { 7389 struct drm_device *dev = state->base.dev; 7390 struct drm_i915_private *dev_priv = to_i915(dev); 7391 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7392 struct intel_crtc *crtc; 7393 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; 7394 intel_wakeref_t wakeref = 0; 7395 int i; 7396 7397 intel_atomic_commit_fence_wait(state); 7398 7399 intel_td_flush(dev_priv); 7400 7401 drm_atomic_helper_wait_for_dependencies(&state->base); 7402 drm_dp_mst_atomic_wait_for_dependencies(&state->base); 7403 intel_atomic_global_state_wait_for_dependencies(state); 7404 7405 /* 7406 * During full modesets we write a lot of registers, wait 7407 * for PLLs, etc. Doing that while DC states are enabled 7408 * is not a good idea. 7409 * 7410 * During fastsets and other updates we also need to 7411 * disable DC states due to the following scenario: 7412 * 1. DC5 exit and PSR exit happen 7413 * 2. Some or all _noarm() registers are written 7414 * 3. Due to some long delay PSR is re-entered 7415 * 4. DC5 entry -> DMC saves the already written new 7416 * _noarm() registers and the old not yet written 7417 * _arm() registers 7418 * 5. DC5 exit -> DMC restores a mixture of old and 7419 * new register values and arms the update 7420 * 6. PSR exit -> hardware latches a mixture of old and 7421 * new register values -> corrupted frame, or worse 7422 * 7. New _arm() registers are finally written 7423 * 8. Hardware finally latches a complete set of new 7424 * register values, and subsequent frames will be OK again 7425 * 7426 * Also note that due to the pipe CSC hardware issues on 7427 * SKL/GLK DC states must remain off until the pipe CSC 7428 * state readout has happened. Otherwise we risk corrupting 7429 * the CSC latched register values with the readout (see 7430 * skl_read_csc() and skl_color_commit_noarm()). 7431 */ 7432 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); 7433 7434 intel_atomic_prepare_plane_clear_colors(state); 7435 7436 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7437 new_crtc_state, i) { 7438 if (intel_crtc_needs_modeset(new_crtc_state) || 7439 intel_crtc_needs_fastset(new_crtc_state)) 7440 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); 7441 } 7442 7443 intel_commit_modeset_disables(state); 7444 7445 intel_dp_tunnel_atomic_alloc_bw(state); 7446 7447 /* FIXME: Eventually get rid of our crtc->config pointer */ 7448 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7449 crtc->config = new_crtc_state; 7450 7451 /* 7452 * In XE_LPD+ Pmdemand combines many parameters such as voltage index, 7453 * plls, cdclk frequency, QGV point selection parameter etc. Voltage 7454 * index, cdclk/ddiclk frequencies are supposed to be configured before 7455 * the cdclk config is set. 7456 */ 7457 intel_pmdemand_pre_plane_update(state); 7458 7459 if (state->modeset) { 7460 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 7461 7462 intel_set_cdclk_pre_plane_update(state); 7463 7464 intel_modeset_verify_disabled(state); 7465 } 7466 7467 intel_sagv_pre_plane_update(state); 7468 7469 /* Complete the events for pipes that have now been disabled */ 7470 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7471 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7472 7473 /* Complete events for now disable pipes here. */ 7474 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 7475 spin_lock_irq(&dev->event_lock); 7476 drm_crtc_send_vblank_event(&crtc->base, 7477 new_crtc_state->uapi.event); 7478 spin_unlock_irq(&dev->event_lock); 7479 7480 new_crtc_state->uapi.event = NULL; 7481 } 7482 } 7483 7484 intel_encoders_update_prepare(state); 7485 7486 intel_dbuf_pre_plane_update(state); 7487 7488 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7489 if (new_crtc_state->do_async_flip) 7490 intel_crtc_enable_flip_done(state, crtc); 7491 } 7492 7493 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7494 dev_priv->display.funcs.display->commit_modeset_enables(state); 7495 7496 if (state->modeset) 7497 intel_set_cdclk_post_plane_update(state); 7498 7499 intel_wait_for_vblank_workers(state); 7500 7501 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 7502 * already, but still need the state for the delayed optimization. To 7503 * fix this: 7504 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 7505 * - schedule that vblank worker _before_ calling hw_done 7506 * - at the start of commit_tail, cancel it _synchrously 7507 * - switch over to the vblank wait helper in the core after that since 7508 * we don't need out special handling any more. 7509 */ 7510 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 7511 7512 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7513 if (new_crtc_state->do_async_flip) 7514 intel_crtc_disable_flip_done(state, crtc); 7515 7516 intel_color_wait_commit(new_crtc_state); 7517 } 7518 7519 /* 7520 * Now that the vblank has passed, we can go ahead and program the 7521 * optimal watermarks on platforms that need two-step watermark 7522 * programming. 7523 * 7524 * TODO: Move this (and other cleanup) to an async worker eventually. 7525 */ 7526 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7527 new_crtc_state, i) { 7528 /* 7529 * Gen2 reports pipe underruns whenever all planes are disabled. 7530 * So re-enable underrun reporting after some planes get enabled. 7531 * 7532 * We do this before .optimize_watermarks() so that we have a 7533 * chance of catching underruns with the intermediate watermarks 7534 * vs. the new plane configuration. 7535 */ 7536 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 7537 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7538 7539 intel_optimize_watermarks(state, crtc); 7540 } 7541 7542 intel_dbuf_post_plane_update(state); 7543 7544 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7545 intel_post_plane_update(state, crtc); 7546 7547 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); 7548 7549 intel_modeset_verify_crtc(state, crtc); 7550 7551 intel_post_plane_update_after_readout(state, crtc); 7552 7553 /* 7554 * DSB cleanup is done in cleanup_work aligning with framebuffer 7555 * cleanup. So copy and reset the dsb structure to sync with 7556 * commit_done and later do dsb cleanup in cleanup_work. 7557 * 7558 * FIXME get rid of this funny new->old swapping 7559 */ 7560 old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank); 7561 old_crtc_state->dsb_color_commit = fetch_and_zero(&new_crtc_state->dsb_color_commit); 7562 } 7563 7564 /* Underruns don't always raise interrupts, so check manually */ 7565 intel_check_cpu_fifo_underruns(dev_priv); 7566 intel_check_pch_fifo_underruns(dev_priv); 7567 7568 if (state->modeset) 7569 intel_verify_planes(state); 7570 7571 intel_sagv_post_plane_update(state); 7572 intel_pmdemand_post_plane_update(state); 7573 7574 drm_atomic_helper_commit_hw_done(&state->base); 7575 intel_atomic_global_state_commit_done(state); 7576 7577 if (state->modeset) { 7578 /* As one of the primary mmio accessors, KMS has a high 7579 * likelihood of triggering bugs in unclaimed access. After we 7580 * finish modesetting, see if an error has been flagged, and if 7581 * so enable debugging for the next modeset - and hope we catch 7582 * the culprit. 7583 */ 7584 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 7585 } 7586 /* 7587 * Delay re-enabling DC states by 17 ms to avoid the off->on->off 7588 * toggling overhead at and above 60 FPS. 7589 */ 7590 intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17); 7591 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7592 7593 /* 7594 * Defer the cleanup of the old state to a separate worker to not 7595 * impede the current task (userspace for blocking modesets) that 7596 * are executed inline. For out-of-line asynchronous modesets/flips, 7597 * deferring to a new worker seems overkill, but we would place a 7598 * schedule point (cond_resched()) here anyway to keep latencies 7599 * down. 7600 */ 7601 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 7602 queue_work(system_highpri_wq, &state->base.commit_work); 7603 } 7604 7605 static void intel_atomic_commit_work(struct work_struct *work) 7606 { 7607 struct intel_atomic_state *state = 7608 container_of(work, struct intel_atomic_state, base.commit_work); 7609 7610 intel_atomic_commit_tail(state); 7611 } 7612 7613 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7614 { 7615 struct intel_plane_state *old_plane_state, *new_plane_state; 7616 struct intel_plane *plane; 7617 int i; 7618 7619 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7620 new_plane_state, i) 7621 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 7622 to_intel_frontbuffer(new_plane_state->hw.fb), 7623 plane->frontbuffer_bit); 7624 } 7625 7626 static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock) 7627 { 7628 int ret; 7629 7630 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 7631 if (ret) 7632 return ret; 7633 7634 ret = intel_atomic_global_state_setup_commit(state); 7635 if (ret) 7636 return ret; 7637 7638 return 0; 7639 } 7640 7641 static int intel_atomic_swap_state(struct intel_atomic_state *state) 7642 { 7643 int ret; 7644 7645 ret = drm_atomic_helper_swap_state(&state->base, true); 7646 if (ret) 7647 return ret; 7648 7649 intel_atomic_swap_global_state(state); 7650 7651 intel_shared_dpll_swap_state(state); 7652 7653 intel_atomic_track_fbs(state); 7654 7655 return 0; 7656 } 7657 7658 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, 7659 bool nonblock) 7660 { 7661 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7662 struct drm_i915_private *dev_priv = to_i915(dev); 7663 int ret = 0; 7664 7665 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 7666 7667 /* 7668 * The intel_legacy_cursor_update() fast path takes care 7669 * of avoiding the vblank waits for simple cursor 7670 * movement and flips. For cursor on/off and size changes, 7671 * we want to perform the vblank waits so that watermark 7672 * updates happen during the correct frames. Gen9+ have 7673 * double buffered watermarks and so shouldn't need this. 7674 * 7675 * Unset state->legacy_cursor_update before the call to 7676 * drm_atomic_helper_setup_commit() because otherwise 7677 * drm_atomic_helper_wait_for_flip_done() is a noop and 7678 * we get FIFO underruns because we didn't wait 7679 * for vblank. 7680 * 7681 * FIXME doing watermarks and fb cleanup from a vblank worker 7682 * (assuming we had any) would solve these problems. 7683 */ 7684 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) { 7685 struct intel_crtc_state *new_crtc_state; 7686 struct intel_crtc *crtc; 7687 int i; 7688 7689 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7690 if (new_crtc_state->wm.need_postvbl_update || 7691 new_crtc_state->update_wm_post) 7692 state->base.legacy_cursor_update = false; 7693 } 7694 7695 ret = intel_atomic_prepare_commit(state); 7696 if (ret) { 7697 drm_dbg_atomic(&dev_priv->drm, 7698 "Preparing state failed with %i\n", ret); 7699 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7700 return ret; 7701 } 7702 7703 ret = intel_atomic_setup_commit(state, nonblock); 7704 if (!ret) 7705 ret = intel_atomic_swap_state(state); 7706 7707 if (ret) { 7708 struct intel_crtc_state *new_crtc_state; 7709 struct intel_crtc *crtc; 7710 int i; 7711 7712 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7713 intel_color_cleanup_commit(new_crtc_state); 7714 7715 drm_atomic_helper_unprepare_planes(dev, &state->base); 7716 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7717 return ret; 7718 } 7719 7720 drm_atomic_state_get(&state->base); 7721 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 7722 7723 if (nonblock && state->modeset) { 7724 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); 7725 } else if (nonblock) { 7726 queue_work(dev_priv->display.wq.flip, &state->base.commit_work); 7727 } else { 7728 if (state->modeset) 7729 flush_workqueue(dev_priv->display.wq.modeset); 7730 intel_atomic_commit_tail(state); 7731 } 7732 7733 return 0; 7734 } 7735 7736 /** 7737 * intel_plane_destroy - destroy a plane 7738 * @plane: plane to destroy 7739 * 7740 * Common destruction function for all types of planes (primary, cursor, 7741 * sprite). 7742 */ 7743 void intel_plane_destroy(struct drm_plane *plane) 7744 { 7745 drm_plane_cleanup(plane); 7746 kfree(to_intel_plane(plane)); 7747 } 7748 7749 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 7750 { 7751 struct drm_device *dev = encoder->base.dev; 7752 struct intel_encoder *source_encoder; 7753 u32 possible_clones = 0; 7754 7755 for_each_intel_encoder(dev, source_encoder) { 7756 if (encoders_cloneable(encoder, source_encoder)) 7757 possible_clones |= drm_encoder_mask(&source_encoder->base); 7758 } 7759 7760 return possible_clones; 7761 } 7762 7763 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 7764 { 7765 struct drm_device *dev = encoder->base.dev; 7766 struct intel_crtc *crtc; 7767 u32 possible_crtcs = 0; 7768 7769 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) 7770 possible_crtcs |= drm_crtc_mask(&crtc->base); 7771 7772 return possible_crtcs; 7773 } 7774 7775 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 7776 { 7777 if (!IS_MOBILE(dev_priv)) 7778 return false; 7779 7780 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 7781 return false; 7782 7783 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 7784 return false; 7785 7786 return true; 7787 } 7788 7789 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 7790 { 7791 if (DISPLAY_VER(dev_priv) >= 9) 7792 return false; 7793 7794 if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)) 7795 return false; 7796 7797 if (HAS_PCH_LPT_H(dev_priv) && 7798 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 7799 return false; 7800 7801 /* DDI E can't be used if DDI A requires 4 lanes */ 7802 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 7803 return false; 7804 7805 if (!dev_priv->display.vbt.int_crt_support) 7806 return false; 7807 7808 return true; 7809 } 7810 7811 bool assert_port_valid(struct drm_i915_private *i915, enum port port) 7812 { 7813 return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)), 7814 "Platform does not support port %c\n", port_name(port)); 7815 } 7816 7817 void intel_setup_outputs(struct drm_i915_private *dev_priv) 7818 { 7819 struct intel_display *display = &dev_priv->display; 7820 struct intel_encoder *encoder; 7821 bool dpd_is_edp = false; 7822 7823 intel_pps_unlock_regs_wa(display); 7824 7825 if (!HAS_DISPLAY(dev_priv)) 7826 return; 7827 7828 if (HAS_DDI(dev_priv)) { 7829 if (intel_ddi_crt_present(dev_priv)) 7830 intel_crt_init(dev_priv); 7831 7832 intel_bios_for_each_encoder(display, intel_ddi_init); 7833 7834 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 7835 vlv_dsi_init(dev_priv); 7836 } else if (HAS_PCH_SPLIT(dev_priv)) { 7837 int found; 7838 7839 /* 7840 * intel_edp_init_connector() depends on this completing first, 7841 * to prevent the registration of both eDP and LVDS and the 7842 * incorrect sharing of the PPS. 7843 */ 7844 intel_lvds_init(dev_priv); 7845 intel_crt_init(dev_priv); 7846 7847 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 7848 7849 if (ilk_has_edp_a(dev_priv)) 7850 g4x_dp_init(dev_priv, DP_A, PORT_A); 7851 7852 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 7853 /* PCH SDVOB multiplex with HDMIB */ 7854 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 7855 if (!found) 7856 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 7857 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 7858 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B); 7859 } 7860 7861 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 7862 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 7863 7864 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 7865 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 7866 7867 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 7868 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C); 7869 7870 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 7871 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D); 7872 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7873 bool has_edp, has_port; 7874 7875 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) 7876 intel_crt_init(dev_priv); 7877 7878 /* 7879 * The DP_DETECTED bit is the latched state of the DDC 7880 * SDA pin at boot. However since eDP doesn't require DDC 7881 * (no way to plug in a DP->HDMI dongle) the DDC pins for 7882 * eDP ports may have been muxed to an alternate function. 7883 * Thus we can't rely on the DP_DETECTED bit alone to detect 7884 * eDP ports. Consult the VBT as well as DP_DETECTED to 7885 * detect eDP ports. 7886 * 7887 * Sadly the straps seem to be missing sometimes even for HDMI 7888 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 7889 * and VBT for the presence of the port. Additionally we can't 7890 * trust the port type the VBT declares as we've seen at least 7891 * HDMI ports that the VBT claim are DP or eDP. 7892 */ 7893 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 7894 has_port = intel_bios_is_port_present(display, PORT_B); 7895 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 7896 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B); 7897 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 7898 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 7899 7900 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 7901 has_port = intel_bios_is_port_present(display, PORT_C); 7902 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 7903 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C); 7904 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 7905 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 7906 7907 if (IS_CHERRYVIEW(dev_priv)) { 7908 /* 7909 * eDP not supported on port D, 7910 * so no need to worry about it 7911 */ 7912 has_port = intel_bios_is_port_present(display, PORT_D); 7913 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 7914 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D); 7915 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 7916 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 7917 } 7918 7919 vlv_dsi_init(dev_priv); 7920 } else if (IS_PINEVIEW(dev_priv)) { 7921 intel_lvds_init(dev_priv); 7922 intel_crt_init(dev_priv); 7923 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { 7924 bool found = false; 7925 7926 if (IS_MOBILE(dev_priv)) 7927 intel_lvds_init(dev_priv); 7928 7929 intel_crt_init(dev_priv); 7930 7931 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 7932 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 7933 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 7934 if (!found && IS_G4X(dev_priv)) { 7935 drm_dbg_kms(&dev_priv->drm, 7936 "probing HDMI on SDVOB\n"); 7937 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 7938 } 7939 7940 if (!found && IS_G4X(dev_priv)) 7941 g4x_dp_init(dev_priv, DP_B, PORT_B); 7942 } 7943 7944 /* Before G4X SDVOC doesn't have its own detect register */ 7945 7946 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 7947 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 7948 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 7949 } 7950 7951 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 7952 7953 if (IS_G4X(dev_priv)) { 7954 drm_dbg_kms(&dev_priv->drm, 7955 "probing HDMI on SDVOC\n"); 7956 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 7957 } 7958 if (IS_G4X(dev_priv)) 7959 g4x_dp_init(dev_priv, DP_C, PORT_C); 7960 } 7961 7962 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 7963 g4x_dp_init(dev_priv, DP_D, PORT_D); 7964 7965 if (SUPPORTS_TV(dev_priv)) 7966 intel_tv_init(display); 7967 } else if (DISPLAY_VER(dev_priv) == 2) { 7968 if (IS_I85X(dev_priv)) 7969 intel_lvds_init(dev_priv); 7970 7971 intel_crt_init(dev_priv); 7972 intel_dvo_init(dev_priv); 7973 } 7974 7975 for_each_intel_encoder(&dev_priv->drm, encoder) { 7976 encoder->base.possible_crtcs = 7977 intel_encoder_possible_crtcs(encoder); 7978 encoder->base.possible_clones = 7979 intel_encoder_possible_clones(encoder); 7980 } 7981 7982 intel_init_pch_refclk(dev_priv); 7983 7984 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 7985 } 7986 7987 static int max_dotclock(struct drm_i915_private *i915) 7988 { 7989 int max_dotclock = i915->display.cdclk.max_dotclk_freq; 7990 7991 /* icl+ might use joiner */ 7992 if (HAS_BIGJOINER(i915)) 7993 max_dotclock *= 2; 7994 7995 return max_dotclock; 7996 } 7997 7998 enum drm_mode_status intel_mode_valid(struct drm_device *dev, 7999 const struct drm_display_mode *mode) 8000 { 8001 struct drm_i915_private *dev_priv = to_i915(dev); 8002 int hdisplay_max, htotal_max; 8003 int vdisplay_max, vtotal_max; 8004 8005 /* 8006 * Can't reject DBLSCAN here because Xorg ddxen can add piles 8007 * of DBLSCAN modes to the output's mode list when they detect 8008 * the scaling mode property on the connector. And they don't 8009 * ask the kernel to validate those modes in any way until 8010 * modeset time at which point the client gets a protocol error. 8011 * So in order to not upset those clients we silently ignore the 8012 * DBLSCAN flag on such connectors. For other connectors we will 8013 * reject modes with the DBLSCAN flag in encoder->compute_config(). 8014 * And we always reject DBLSCAN modes in connector->mode_valid() 8015 * as we never want such modes on the connector's mode list. 8016 */ 8017 8018 if (mode->vscan > 1) 8019 return MODE_NO_VSCAN; 8020 8021 if (mode->flags & DRM_MODE_FLAG_HSKEW) 8022 return MODE_H_ILLEGAL; 8023 8024 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 8025 DRM_MODE_FLAG_NCSYNC | 8026 DRM_MODE_FLAG_PCSYNC)) 8027 return MODE_HSYNC; 8028 8029 if (mode->flags & (DRM_MODE_FLAG_BCAST | 8030 DRM_MODE_FLAG_PIXMUX | 8031 DRM_MODE_FLAG_CLKDIV2)) 8032 return MODE_BAD; 8033 8034 /* 8035 * Reject clearly excessive dotclocks early to 8036 * avoid having to worry about huge integers later. 8037 */ 8038 if (mode->clock > max_dotclock(dev_priv)) 8039 return MODE_CLOCK_HIGH; 8040 8041 /* Transcoder timing limits */ 8042 if (DISPLAY_VER(dev_priv) >= 11) { 8043 hdisplay_max = 16384; 8044 vdisplay_max = 8192; 8045 htotal_max = 16384; 8046 vtotal_max = 8192; 8047 } else if (DISPLAY_VER(dev_priv) >= 9 || 8048 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 8049 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 8050 vdisplay_max = 4096; 8051 htotal_max = 8192; 8052 vtotal_max = 8192; 8053 } else if (DISPLAY_VER(dev_priv) >= 3) { 8054 hdisplay_max = 4096; 8055 vdisplay_max = 4096; 8056 htotal_max = 8192; 8057 vtotal_max = 8192; 8058 } else { 8059 hdisplay_max = 2048; 8060 vdisplay_max = 2048; 8061 htotal_max = 4096; 8062 vtotal_max = 4096; 8063 } 8064 8065 if (mode->hdisplay > hdisplay_max || 8066 mode->hsync_start > htotal_max || 8067 mode->hsync_end > htotal_max || 8068 mode->htotal > htotal_max) 8069 return MODE_H_ILLEGAL; 8070 8071 if (mode->vdisplay > vdisplay_max || 8072 mode->vsync_start > vtotal_max || 8073 mode->vsync_end > vtotal_max || 8074 mode->vtotal > vtotal_max) 8075 return MODE_V_ILLEGAL; 8076 8077 return MODE_OK; 8078 } 8079 8080 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv, 8081 const struct drm_display_mode *mode) 8082 { 8083 /* 8084 * Additional transcoder timing limits, 8085 * excluding BXT/GLK DSI transcoders. 8086 */ 8087 if (DISPLAY_VER(dev_priv) >= 5) { 8088 if (mode->hdisplay < 64 || 8089 mode->htotal - mode->hdisplay < 32) 8090 return MODE_H_ILLEGAL; 8091 8092 if (mode->vtotal - mode->vdisplay < 5) 8093 return MODE_V_ILLEGAL; 8094 } else { 8095 if (mode->htotal - mode->hdisplay < 32) 8096 return MODE_H_ILLEGAL; 8097 8098 if (mode->vtotal - mode->vdisplay < 3) 8099 return MODE_V_ILLEGAL; 8100 } 8101 8102 /* 8103 * Cantiga+ cannot handle modes with a hsync front porch of 0. 8104 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 8105 */ 8106 if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) && 8107 mode->hsync_start == mode->hdisplay) 8108 return MODE_H_ILLEGAL; 8109 8110 return MODE_OK; 8111 } 8112 8113 enum drm_mode_status 8114 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 8115 const struct drm_display_mode *mode, 8116 int num_joined_pipes) 8117 { 8118 int plane_width_max, plane_height_max; 8119 8120 /* 8121 * intel_mode_valid() should be 8122 * sufficient on older platforms. 8123 */ 8124 if (DISPLAY_VER(dev_priv) < 9) 8125 return MODE_OK; 8126 8127 /* 8128 * Most people will probably want a fullscreen 8129 * plane so let's not advertize modes that are 8130 * too big for that. 8131 */ 8132 if (DISPLAY_VER(dev_priv) >= 11) { 8133 plane_width_max = 5120 * num_joined_pipes; 8134 plane_height_max = 4320; 8135 } else { 8136 plane_width_max = 5120; 8137 plane_height_max = 4096; 8138 } 8139 8140 if (mode->hdisplay > plane_width_max) 8141 return MODE_H_ILLEGAL; 8142 8143 if (mode->vdisplay > plane_height_max) 8144 return MODE_V_ILLEGAL; 8145 8146 return MODE_OK; 8147 } 8148 8149 static const struct intel_display_funcs skl_display_funcs = { 8150 .get_pipe_config = hsw_get_pipe_config, 8151 .crtc_enable = hsw_crtc_enable, 8152 .crtc_disable = hsw_crtc_disable, 8153 .commit_modeset_enables = skl_commit_modeset_enables, 8154 .get_initial_plane_config = skl_get_initial_plane_config, 8155 .fixup_initial_plane_config = skl_fixup_initial_plane_config, 8156 }; 8157 8158 static const struct intel_display_funcs ddi_display_funcs = { 8159 .get_pipe_config = hsw_get_pipe_config, 8160 .crtc_enable = hsw_crtc_enable, 8161 .crtc_disable = hsw_crtc_disable, 8162 .commit_modeset_enables = intel_commit_modeset_enables, 8163 .get_initial_plane_config = i9xx_get_initial_plane_config, 8164 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8165 }; 8166 8167 static const struct intel_display_funcs pch_split_display_funcs = { 8168 .get_pipe_config = ilk_get_pipe_config, 8169 .crtc_enable = ilk_crtc_enable, 8170 .crtc_disable = ilk_crtc_disable, 8171 .commit_modeset_enables = intel_commit_modeset_enables, 8172 .get_initial_plane_config = i9xx_get_initial_plane_config, 8173 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8174 }; 8175 8176 static const struct intel_display_funcs vlv_display_funcs = { 8177 .get_pipe_config = i9xx_get_pipe_config, 8178 .crtc_enable = valleyview_crtc_enable, 8179 .crtc_disable = i9xx_crtc_disable, 8180 .commit_modeset_enables = intel_commit_modeset_enables, 8181 .get_initial_plane_config = i9xx_get_initial_plane_config, 8182 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8183 }; 8184 8185 static const struct intel_display_funcs i9xx_display_funcs = { 8186 .get_pipe_config = i9xx_get_pipe_config, 8187 .crtc_enable = i9xx_crtc_enable, 8188 .crtc_disable = i9xx_crtc_disable, 8189 .commit_modeset_enables = intel_commit_modeset_enables, 8190 .get_initial_plane_config = i9xx_get_initial_plane_config, 8191 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8192 }; 8193 8194 /** 8195 * intel_init_display_hooks - initialize the display modesetting hooks 8196 * @dev_priv: device private 8197 */ 8198 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 8199 { 8200 if (DISPLAY_VER(dev_priv) >= 9) { 8201 dev_priv->display.funcs.display = &skl_display_funcs; 8202 } else if (HAS_DDI(dev_priv)) { 8203 dev_priv->display.funcs.display = &ddi_display_funcs; 8204 } else if (HAS_PCH_SPLIT(dev_priv)) { 8205 dev_priv->display.funcs.display = &pch_split_display_funcs; 8206 } else if (IS_CHERRYVIEW(dev_priv) || 8207 IS_VALLEYVIEW(dev_priv)) { 8208 dev_priv->display.funcs.display = &vlv_display_funcs; 8209 } else { 8210 dev_priv->display.funcs.display = &i9xx_display_funcs; 8211 } 8212 } 8213 8214 int intel_initial_commit(struct drm_device *dev) 8215 { 8216 struct drm_atomic_state *state = NULL; 8217 struct drm_modeset_acquire_ctx ctx; 8218 struct intel_crtc *crtc; 8219 int ret = 0; 8220 8221 state = drm_atomic_state_alloc(dev); 8222 if (!state) 8223 return -ENOMEM; 8224 8225 drm_modeset_acquire_init(&ctx, 0); 8226 8227 state->acquire_ctx = &ctx; 8228 to_intel_atomic_state(state)->internal = true; 8229 8230 retry: 8231 for_each_intel_crtc(dev, crtc) { 8232 struct intel_crtc_state *crtc_state = 8233 intel_atomic_get_crtc_state(state, crtc); 8234 8235 if (IS_ERR(crtc_state)) { 8236 ret = PTR_ERR(crtc_state); 8237 goto out; 8238 } 8239 8240 if (crtc_state->hw.active) { 8241 struct intel_encoder *encoder; 8242 8243 ret = drm_atomic_add_affected_planes(state, &crtc->base); 8244 if (ret) 8245 goto out; 8246 8247 /* 8248 * FIXME hack to force a LUT update to avoid the 8249 * plane update forcing the pipe gamma on without 8250 * having a proper LUT loaded. Remove once we 8251 * have readout for pipe gamma enable. 8252 */ 8253 crtc_state->uapi.color_mgmt_changed = true; 8254 8255 for_each_intel_encoder_mask(dev, encoder, 8256 crtc_state->uapi.encoder_mask) { 8257 if (encoder->initial_fastset_check && 8258 !encoder->initial_fastset_check(encoder, crtc_state)) { 8259 ret = drm_atomic_add_affected_connectors(state, 8260 &crtc->base); 8261 if (ret) 8262 goto out; 8263 } 8264 } 8265 } 8266 } 8267 8268 ret = drm_atomic_commit(state); 8269 8270 out: 8271 if (ret == -EDEADLK) { 8272 drm_atomic_state_clear(state); 8273 drm_modeset_backoff(&ctx); 8274 goto retry; 8275 } 8276 8277 drm_atomic_state_put(state); 8278 8279 drm_modeset_drop_locks(&ctx); 8280 drm_modeset_acquire_fini(&ctx); 8281 8282 return ret; 8283 } 8284 8285 void i830_enable_pipe(struct intel_display *display, enum pipe pipe) 8286 { 8287 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8288 enum transcoder cpu_transcoder = (enum transcoder)pipe; 8289 /* 640x480@60Hz, ~25175 kHz */ 8290 struct dpll clock = { 8291 .m1 = 18, 8292 .m2 = 7, 8293 .p1 = 13, 8294 .p2 = 4, 8295 .n = 2, 8296 }; 8297 u32 dpll, fp; 8298 int i; 8299 8300 drm_WARN_ON(display->drm, 8301 i9xx_calc_dpll_params(48000, &clock) != 25154); 8302 8303 drm_dbg_kms(display->drm, 8304 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 8305 pipe_name(pipe), clock.vco, clock.dot); 8306 8307 fp = i9xx_dpll_compute_fp(&clock); 8308 dpll = DPLL_DVO_2X_MODE | 8309 DPLL_VGA_MODE_DIS | 8310 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 8311 PLL_P2_DIVIDE_BY_4 | 8312 PLL_REF_INPUT_DREFCLK | 8313 DPLL_VCO_ENABLE; 8314 8315 intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder), 8316 HACTIVE(640 - 1) | HTOTAL(800 - 1)); 8317 intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder), 8318 HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); 8319 intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder), 8320 HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); 8321 intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), 8322 VACTIVE(480 - 1) | VTOTAL(525 - 1)); 8323 intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), 8324 VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); 8325 intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder), 8326 VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); 8327 intel_de_write(display, PIPESRC(display, pipe), 8328 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); 8329 8330 intel_de_write(display, FP0(pipe), fp); 8331 intel_de_write(display, FP1(pipe), fp); 8332 8333 /* 8334 * Apparently we need to have VGA mode enabled prior to changing 8335 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 8336 * dividers, even though the register value does change. 8337 */ 8338 intel_de_write(display, DPLL(display, pipe), 8339 dpll & ~DPLL_VGA_MODE_DIS); 8340 intel_de_write(display, DPLL(display, pipe), dpll); 8341 8342 /* Wait for the clocks to stabilize. */ 8343 intel_de_posting_read(display, DPLL(display, pipe)); 8344 udelay(150); 8345 8346 /* The pixel multiplier can only be updated once the 8347 * DPLL is enabled and the clocks are stable. 8348 * 8349 * So write it again. 8350 */ 8351 intel_de_write(display, DPLL(display, pipe), dpll); 8352 8353 /* We do this three times for luck */ 8354 for (i = 0; i < 3 ; i++) { 8355 intel_de_write(display, DPLL(display, pipe), dpll); 8356 intel_de_posting_read(display, DPLL(display, pipe)); 8357 udelay(150); /* wait for warmup */ 8358 } 8359 8360 intel_de_write(display, TRANSCONF(display, pipe), TRANSCONF_ENABLE); 8361 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8362 8363 intel_wait_for_pipe_scanline_moving(crtc); 8364 } 8365 8366 void i830_disable_pipe(struct intel_display *display, enum pipe pipe) 8367 { 8368 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8369 8370 drm_dbg_kms(display->drm, "disabling pipe %c due to force quirk\n", 8371 pipe_name(pipe)); 8372 8373 drm_WARN_ON(display->drm, 8374 intel_de_read(display, DSPCNTR(display, PLANE_A)) & DISP_ENABLE); 8375 drm_WARN_ON(display->drm, 8376 intel_de_read(display, DSPCNTR(display, PLANE_B)) & DISP_ENABLE); 8377 drm_WARN_ON(display->drm, 8378 intel_de_read(display, DSPCNTR(display, PLANE_C)) & DISP_ENABLE); 8379 drm_WARN_ON(display->drm, 8380 intel_de_read(display, CURCNTR(display, PIPE_A)) & MCURSOR_MODE_MASK); 8381 drm_WARN_ON(display->drm, 8382 intel_de_read(display, CURCNTR(display, PIPE_B)) & MCURSOR_MODE_MASK); 8383 8384 intel_de_write(display, TRANSCONF(display, pipe), 0); 8385 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8386 8387 intel_wait_for_pipe_scanline_stopped(crtc); 8388 8389 intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS); 8390 intel_de_posting_read(display, DPLL(display, pipe)); 8391 } 8392 8393 void intel_hpd_poll_fini(struct drm_i915_private *i915) 8394 { 8395 struct intel_connector *connector; 8396 struct drm_connector_list_iter conn_iter; 8397 8398 /* Kill all the work that may have been queued by hpd. */ 8399 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 8400 for_each_intel_connector_iter(connector, &conn_iter) { 8401 if (connector->modeset_retry_work.func && 8402 cancel_work_sync(&connector->modeset_retry_work)) 8403 drm_connector_put(&connector->base); 8404 if (connector->hdcp.shim) { 8405 cancel_delayed_work_sync(&connector->hdcp.check_work); 8406 cancel_work_sync(&connector->hdcp.prop_work); 8407 } 8408 } 8409 drm_connector_list_iter_end(&conn_iter); 8410 } 8411 8412 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915) 8413 { 8414 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915); 8415 } 8416