1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dma-resv.h> 28 #include <linux/i2c.h> 29 #include <linux/input.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 #include <linux/string_helpers.h> 34 35 #include <drm/display/drm_dp_helper.h> 36 #include <drm/display/drm_dp_tunnel.h> 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_atomic_uapi.h> 40 #include <drm/drm_damage_helper.h> 41 #include <drm/drm_edid.h> 42 #include <drm/drm_fixed.h> 43 #include <drm/drm_fourcc.h> 44 #include <drm/drm_probe_helper.h> 45 #include <drm/drm_rect.h> 46 #include <drm/drm_vblank.h> 47 48 #include "g4x_dp.h" 49 #include "g4x_hdmi.h" 50 #include "hsw_ips.h" 51 #include "i915_config.h" 52 #include "i915_drv.h" 53 #include "i915_reg.h" 54 #include "i915_utils.h" 55 #include "i9xx_plane.h" 56 #include "i9xx_plane_regs.h" 57 #include "i9xx_wm.h" 58 #include "intel_atomic.h" 59 #include "intel_atomic_plane.h" 60 #include "intel_audio.h" 61 #include "intel_bo.h" 62 #include "intel_bw.h" 63 #include "intel_cdclk.h" 64 #include "intel_clock_gating.h" 65 #include "intel_color.h" 66 #include "intel_crt.h" 67 #include "intel_crtc.h" 68 #include "intel_crtc_state_dump.h" 69 #include "intel_cursor_regs.h" 70 #include "intel_cx0_phy.h" 71 #include "intel_cursor.h" 72 #include "intel_ddi.h" 73 #include "intel_de.h" 74 #include "intel_display_driver.h" 75 #include "intel_display_power.h" 76 #include "intel_display_types.h" 77 #include "intel_dmc.h" 78 #include "intel_dp.h" 79 #include "intel_dp_link_training.h" 80 #include "intel_dp_mst.h" 81 #include "intel_dp_tunnel.h" 82 #include "intel_dpll.h" 83 #include "intel_dpll_mgr.h" 84 #include "intel_dpt.h" 85 #include "intel_dpt_common.h" 86 #include "intel_drrs.h" 87 #include "intel_dsb.h" 88 #include "intel_dsi.h" 89 #include "intel_dvo.h" 90 #include "intel_fb.h" 91 #include "intel_fbc.h" 92 #include "intel_fdi.h" 93 #include "intel_fifo_underrun.h" 94 #include "intel_frontbuffer.h" 95 #include "intel_hdmi.h" 96 #include "intel_hotplug.h" 97 #include "intel_link_bw.h" 98 #include "intel_lvds.h" 99 #include "intel_lvds_regs.h" 100 #include "intel_modeset_setup.h" 101 #include "intel_modeset_verify.h" 102 #include "intel_overlay.h" 103 #include "intel_panel.h" 104 #include "intel_pch_display.h" 105 #include "intel_pch_refclk.h" 106 #include "intel_pcode.h" 107 #include "intel_pipe_crc.h" 108 #include "intel_plane_initial.h" 109 #include "intel_pmdemand.h" 110 #include "intel_pps.h" 111 #include "intel_psr.h" 112 #include "intel_psr_regs.h" 113 #include "intel_sdvo.h" 114 #include "intel_snps_phy.h" 115 #include "intel_tc.h" 116 #include "intel_tdf.h" 117 #include "intel_tv.h" 118 #include "intel_vblank.h" 119 #include "intel_vdsc.h" 120 #include "intel_vdsc_regs.h" 121 #include "intel_vga.h" 122 #include "intel_vrr.h" 123 #include "intel_wm.h" 124 #include "skl_scaler.h" 125 #include "skl_universal_plane.h" 126 #include "skl_universal_plane_regs.h" 127 #include "skl_watermark.h" 128 #include "vlv_dpio_phy_regs.h" 129 #include "vlv_dsi.h" 130 #include "vlv_dsi_pll.h" 131 #include "vlv_dsi_regs.h" 132 #include "vlv_sideband.h" 133 134 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 135 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 136 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 137 static void bdw_set_pipe_misc(struct intel_dsb *dsb, 138 const struct intel_crtc_state *crtc_state); 139 140 /* returns HPLL frequency in kHz */ 141 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 142 { 143 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 144 145 /* Obtain SKU information */ 146 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 147 CCK_FUSE_HPLL_FREQ_MASK; 148 149 return vco_freq[hpll_freq] * 1000; 150 } 151 152 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 153 const char *name, u32 reg, int ref_freq) 154 { 155 u32 val; 156 int divider; 157 158 val = vlv_cck_read(dev_priv, reg); 159 divider = val & CCK_FREQUENCY_VALUES; 160 161 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 162 (divider << CCK_FREQUENCY_STATUS_SHIFT), 163 "%s change in progress\n", name); 164 165 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 166 } 167 168 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 169 const char *name, u32 reg) 170 { 171 int hpll; 172 173 vlv_cck_get(dev_priv); 174 175 if (dev_priv->hpll_freq == 0) 176 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 177 178 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 179 180 vlv_cck_put(dev_priv); 181 182 return hpll; 183 } 184 185 void intel_update_czclk(struct drm_i915_private *dev_priv) 186 { 187 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 188 return; 189 190 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 191 CCK_CZ_CLOCK_CONTROL); 192 193 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 194 dev_priv->czclk_freq); 195 } 196 197 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 198 { 199 return (crtc_state->active_planes & 200 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 201 } 202 203 /* WA Display #0827: Gen9:all */ 204 static void 205 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 206 { 207 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 208 DUPS1_GATING_DIS | DUPS2_GATING_DIS, 209 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); 210 } 211 212 /* Wa_2006604312:icl,ehl */ 213 static void 214 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 215 bool enable) 216 { 217 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 218 DPFR_GATING_DIS, 219 enable ? DPFR_GATING_DIS : 0); 220 } 221 222 /* Wa_1604331009:icl,jsl,ehl */ 223 static void 224 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 225 bool enable) 226 { 227 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 228 CURSOR_GATING_DIS, 229 enable ? CURSOR_GATING_DIS : 0); 230 } 231 232 static bool 233 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 234 { 235 return crtc_state->master_transcoder != INVALID_TRANSCODER; 236 } 237 238 bool 239 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 240 { 241 return crtc_state->sync_mode_slaves_mask != 0; 242 } 243 244 bool 245 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 246 { 247 return is_trans_port_sync_master(crtc_state) || 248 is_trans_port_sync_slave(crtc_state); 249 } 250 251 static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state) 252 { 253 return ffs(crtc_state->joiner_pipes) - 1; 254 } 255 256 /* 257 * The following helper functions, despite being named for bigjoiner, 258 * are applicable to both bigjoiner and uncompressed joiner configurations. 259 */ 260 static bool is_bigjoiner(const struct intel_crtc_state *crtc_state) 261 { 262 return hweight8(crtc_state->joiner_pipes) >= 2; 263 } 264 265 static u8 bigjoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 266 { 267 if (!is_bigjoiner(crtc_state)) 268 return 0; 269 270 return crtc_state->joiner_pipes & (0b01010101 << joiner_primary_pipe(crtc_state)); 271 } 272 273 static unsigned int bigjoiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 274 { 275 if (!is_bigjoiner(crtc_state)) 276 return 0; 277 278 return crtc_state->joiner_pipes & (0b10101010 << joiner_primary_pipe(crtc_state)); 279 } 280 281 bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state) 282 { 283 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 284 285 if (!is_bigjoiner(crtc_state)) 286 return false; 287 288 return BIT(crtc->pipe) & bigjoiner_primary_pipes(crtc_state); 289 } 290 291 bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state) 292 { 293 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 294 295 if (!is_bigjoiner(crtc_state)) 296 return false; 297 298 return BIT(crtc->pipe) & bigjoiner_secondary_pipes(crtc_state); 299 } 300 301 u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state) 302 { 303 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 304 305 if (!is_bigjoiner(crtc_state)) 306 return BIT(crtc->pipe); 307 308 return bigjoiner_primary_pipes(crtc_state); 309 } 310 311 u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state) 312 { 313 return bigjoiner_secondary_pipes(crtc_state); 314 } 315 316 bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state) 317 { 318 return intel_crtc_num_joined_pipes(crtc_state) >= 4; 319 } 320 321 static u8 ultrajoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 322 { 323 if (!intel_crtc_is_ultrajoiner(crtc_state)) 324 return 0; 325 326 return crtc_state->joiner_pipes & (0b00010001 << joiner_primary_pipe(crtc_state)); 327 } 328 329 bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state) 330 { 331 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 332 333 return intel_crtc_is_ultrajoiner(crtc_state) && 334 BIT(crtc->pipe) & ultrajoiner_primary_pipes(crtc_state); 335 } 336 337 /* 338 * The ultrajoiner enable bit doesn't seem to follow primary/secondary logic or 339 * any other logic, so lets just add helper function to 340 * at least hide this hassle.. 341 */ 342 static u8 ultrajoiner_enable_pipes(const struct intel_crtc_state *crtc_state) 343 { 344 if (!intel_crtc_is_ultrajoiner(crtc_state)) 345 return 0; 346 347 return crtc_state->joiner_pipes & (0b01110111 << joiner_primary_pipe(crtc_state)); 348 } 349 350 bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state) 351 { 352 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 353 354 return intel_crtc_is_ultrajoiner(crtc_state) && 355 BIT(crtc->pipe) & ultrajoiner_enable_pipes(crtc_state); 356 } 357 358 u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 359 { 360 if (crtc_state->joiner_pipes) 361 return crtc_state->joiner_pipes & ~BIT(joiner_primary_pipe(crtc_state)); 362 else 363 return 0; 364 } 365 366 bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state) 367 { 368 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 369 370 return crtc_state->joiner_pipes && 371 crtc->pipe != joiner_primary_pipe(crtc_state); 372 } 373 374 bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state) 375 { 376 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 377 378 return crtc_state->joiner_pipes && 379 crtc->pipe == joiner_primary_pipe(crtc_state); 380 } 381 382 int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state) 383 { 384 return hweight8(intel_crtc_joined_pipe_mask(crtc_state)); 385 } 386 387 u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) 388 { 389 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 390 391 return BIT(crtc->pipe) | crtc_state->joiner_pipes; 392 } 393 394 struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state) 395 { 396 struct intel_display *display = to_intel_display(crtc_state); 397 398 if (intel_crtc_is_joiner_secondary(crtc_state)) 399 return intel_crtc_for_pipe(display, joiner_primary_pipe(crtc_state)); 400 else 401 return to_intel_crtc(crtc_state->uapi.crtc); 402 } 403 404 static void 405 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 406 { 407 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 408 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 409 410 if (DISPLAY_VER(dev_priv) >= 4) { 411 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 412 413 /* Wait for the Pipe State to go off */ 414 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), 415 TRANSCONF_STATE_ENABLE, 100)) 416 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); 417 } else { 418 intel_wait_for_pipe_scanline_stopped(crtc); 419 } 420 } 421 422 void assert_transcoder(struct drm_i915_private *dev_priv, 423 enum transcoder cpu_transcoder, bool state) 424 { 425 struct intel_display *display = &dev_priv->display; 426 bool cur_state; 427 enum intel_display_power_domain power_domain; 428 intel_wakeref_t wakeref; 429 430 /* we keep both pipes enabled on 830 */ 431 if (IS_I830(dev_priv)) 432 state = true; 433 434 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 435 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 436 if (wakeref) { 437 u32 val = intel_de_read(dev_priv, 438 TRANSCONF(dev_priv, cpu_transcoder)); 439 cur_state = !!(val & TRANSCONF_ENABLE); 440 441 intel_display_power_put(dev_priv, power_domain, wakeref); 442 } else { 443 cur_state = false; 444 } 445 446 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 447 "transcoder %s assertion failure (expected %s, current %s)\n", 448 transcoder_name(cpu_transcoder), str_on_off(state), 449 str_on_off(cur_state)); 450 } 451 452 static void assert_plane(struct intel_plane *plane, bool state) 453 { 454 struct intel_display *display = to_intel_display(plane->base.dev); 455 enum pipe pipe; 456 bool cur_state; 457 458 cur_state = plane->get_hw_state(plane, &pipe); 459 460 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 461 "%s assertion failure (expected %s, current %s)\n", 462 plane->base.name, str_on_off(state), 463 str_on_off(cur_state)); 464 } 465 466 #define assert_plane_enabled(p) assert_plane(p, true) 467 #define assert_plane_disabled(p) assert_plane(p, false) 468 469 static void assert_planes_disabled(struct intel_crtc *crtc) 470 { 471 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 472 struct intel_plane *plane; 473 474 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 475 assert_plane_disabled(plane); 476 } 477 478 void vlv_wait_port_ready(struct intel_display *display, 479 struct intel_digital_port *dig_port, 480 unsigned int expected_mask) 481 { 482 u32 port_mask; 483 i915_reg_t dpll_reg; 484 485 switch (dig_port->base.port) { 486 default: 487 MISSING_CASE(dig_port->base.port); 488 fallthrough; 489 case PORT_B: 490 port_mask = DPLL_PORTB_READY_MASK; 491 dpll_reg = DPLL(display, 0); 492 break; 493 case PORT_C: 494 port_mask = DPLL_PORTC_READY_MASK; 495 dpll_reg = DPLL(display, 0); 496 expected_mask <<= 4; 497 break; 498 case PORT_D: 499 port_mask = DPLL_PORTD_READY_MASK; 500 dpll_reg = DPIO_PHY_STATUS; 501 break; 502 } 503 504 if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000)) 505 drm_WARN(display->drm, 1, 506 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 507 dig_port->base.base.base.id, dig_port->base.base.name, 508 intel_de_read(display, dpll_reg) & port_mask, 509 expected_mask); 510 } 511 512 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 513 { 514 struct intel_display *display = to_intel_display(new_crtc_state); 515 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 517 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 518 enum pipe pipe = crtc->pipe; 519 u32 val; 520 521 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 522 523 assert_planes_disabled(crtc); 524 525 /* 526 * A pipe without a PLL won't actually be able to drive bits from 527 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 528 * need the check. 529 */ 530 if (HAS_GMCH(dev_priv)) { 531 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 532 assert_dsi_pll_enabled(dev_priv); 533 else 534 assert_pll_enabled(dev_priv, pipe); 535 } else { 536 if (new_crtc_state->has_pch_encoder) { 537 /* if driving the PCH, we need FDI enabled */ 538 assert_fdi_rx_pll_enabled(dev_priv, 539 intel_crtc_pch_transcoder(crtc)); 540 assert_fdi_tx_pll_enabled(dev_priv, 541 (enum pipe) cpu_transcoder); 542 } 543 /* FIXME: assert CPU port conditions for SNB+ */ 544 } 545 546 /* Wa_22012358565:adl-p */ 547 if (DISPLAY_VER(dev_priv) == 13) 548 intel_de_rmw(dev_priv, PIPE_ARB_CTL(dev_priv, pipe), 549 0, PIPE_ARB_USE_PROG_SLOTS); 550 551 if (DISPLAY_VER(dev_priv) >= 14) { 552 u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA; 553 u32 set = 0; 554 555 if (DISPLAY_VER(dev_priv) == 14) 556 set |= DP_FEC_BS_JITTER_WA; 557 558 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 559 clear, set); 560 } 561 562 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 563 if (val & TRANSCONF_ENABLE) { 564 /* we keep both pipes enabled on 830 */ 565 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 566 return; 567 } 568 569 /* Wa_1409098942:adlp+ */ 570 if (DISPLAY_VER(dev_priv) >= 13 && 571 new_crtc_state->dsc.compression_enable) { 572 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 573 val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK, 574 TRANSCONF_PIXEL_COUNT_SCALING_X4); 575 } 576 577 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), 578 val | TRANSCONF_ENABLE); 579 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 580 581 /* 582 * Until the pipe starts PIPEDSL reads will return a stale value, 583 * which causes an apparent vblank timestamp jump when PIPEDSL 584 * resets to its proper value. That also messes up the frame count 585 * when it's derived from the timestamps. So let's wait for the 586 * pipe to start properly before we call drm_crtc_vblank_on() 587 */ 588 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 589 intel_wait_for_pipe_scanline_moving(crtc); 590 } 591 592 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 593 { 594 struct intel_display *display = to_intel_display(old_crtc_state); 595 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 596 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 597 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 598 enum pipe pipe = crtc->pipe; 599 u32 val; 600 601 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 602 603 /* 604 * Make sure planes won't keep trying to pump pixels to us, 605 * or we might hang the display. 606 */ 607 assert_planes_disabled(crtc); 608 609 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 610 if ((val & TRANSCONF_ENABLE) == 0) 611 return; 612 613 /* 614 * Double wide has implications for planes 615 * so best keep it disabled when not needed. 616 */ 617 if (old_crtc_state->double_wide) 618 val &= ~TRANSCONF_DOUBLE_WIDE; 619 620 /* Don't disable pipe or pipe PLLs if needed */ 621 if (!IS_I830(dev_priv)) 622 val &= ~TRANSCONF_ENABLE; 623 624 /* Wa_1409098942:adlp+ */ 625 if (DISPLAY_VER(dev_priv) >= 13 && 626 old_crtc_state->dsc.compression_enable) 627 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 628 629 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 630 631 if (DISPLAY_VER(dev_priv) >= 12) 632 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 633 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 634 635 if ((val & TRANSCONF_ENABLE) == 0) 636 intel_wait_for_pipe_off(old_crtc_state); 637 } 638 639 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 640 { 641 unsigned int size = 0; 642 int i; 643 644 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 645 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; 646 647 return size; 648 } 649 650 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 651 { 652 unsigned int size = 0; 653 int i; 654 655 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 656 unsigned int plane_size; 657 658 if (rem_info->plane[i].linear) 659 plane_size = rem_info->plane[i].size; 660 else 661 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; 662 663 if (plane_size == 0) 664 continue; 665 666 if (rem_info->plane_alignment) 667 size = ALIGN(size, rem_info->plane_alignment); 668 669 size += plane_size; 670 } 671 672 return size; 673 } 674 675 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 676 { 677 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 678 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 679 680 return DISPLAY_VER(dev_priv) < 4 || 681 (plane->fbc && !plane_state->no_fbc_reason && 682 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); 683 } 684 685 /* 686 * Convert the x/y offsets into a linear offset. 687 * Only valid with 0/180 degree rotation, which is fine since linear 688 * offset is only used with linear buffers on pre-hsw and tiled buffers 689 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 690 */ 691 u32 intel_fb_xy_to_linear(int x, int y, 692 const struct intel_plane_state *state, 693 int color_plane) 694 { 695 const struct drm_framebuffer *fb = state->hw.fb; 696 unsigned int cpp = fb->format->cpp[color_plane]; 697 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; 698 699 return y * pitch + x * cpp; 700 } 701 702 /* 703 * Add the x/y offsets derived from fb->offsets[] to the user 704 * specified plane src x/y offsets. The resulting x/y offsets 705 * specify the start of scanout from the beginning of the gtt mapping. 706 */ 707 void intel_add_fb_offsets(int *x, int *y, 708 const struct intel_plane_state *state, 709 int color_plane) 710 711 { 712 *x += state->view.color_plane[color_plane].x; 713 *y += state->view.color_plane[color_plane].y; 714 } 715 716 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 717 u32 pixel_format, u64 modifier) 718 { 719 struct intel_crtc *crtc; 720 struct intel_plane *plane; 721 722 if (!HAS_DISPLAY(dev_priv)) 723 return 0; 724 725 /* 726 * We assume the primary plane for pipe A has 727 * the highest stride limits of them all, 728 * if in case pipe A is disabled, use the first pipe from pipe_mask. 729 */ 730 crtc = intel_first_crtc(dev_priv); 731 if (!crtc) 732 return 0; 733 734 plane = to_intel_plane(crtc->base.primary); 735 736 return plane->max_stride(plane, pixel_format, modifier, 737 DRM_MODE_ROTATE_0); 738 } 739 740 void intel_set_plane_visible(struct intel_crtc_state *crtc_state, 741 struct intel_plane_state *plane_state, 742 bool visible) 743 { 744 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 745 746 plane_state->uapi.visible = visible; 747 748 if (visible) 749 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 750 else 751 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 752 } 753 754 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) 755 { 756 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 757 struct drm_plane *plane; 758 759 /* 760 * Active_planes aliases if multiple "primary" or cursor planes 761 * have been used on the same (or wrong) pipe. plane_mask uses 762 * unique ids, hence we can use that to reconstruct active_planes. 763 */ 764 crtc_state->enabled_planes = 0; 765 crtc_state->active_planes = 0; 766 767 drm_for_each_plane_mask(plane, &dev_priv->drm, 768 crtc_state->uapi.plane_mask) { 769 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 770 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 771 } 772 } 773 774 void intel_plane_disable_noatomic(struct intel_crtc *crtc, 775 struct intel_plane *plane) 776 { 777 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 778 struct intel_crtc_state *crtc_state = 779 to_intel_crtc_state(crtc->base.state); 780 struct intel_plane_state *plane_state = 781 to_intel_plane_state(plane->base.state); 782 783 drm_dbg_kms(&dev_priv->drm, 784 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 785 plane->base.base.id, plane->base.name, 786 crtc->base.base.id, crtc->base.name); 787 788 intel_set_plane_visible(crtc_state, plane_state, false); 789 intel_plane_fixup_bitmasks(crtc_state); 790 crtc_state->data_rate[plane->id] = 0; 791 crtc_state->data_rate_y[plane->id] = 0; 792 crtc_state->rel_data_rate[plane->id] = 0; 793 crtc_state->rel_data_rate_y[plane->id] = 0; 794 crtc_state->min_cdclk[plane->id] = 0; 795 796 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 797 hsw_ips_disable(crtc_state)) { 798 crtc_state->ips_enabled = false; 799 intel_crtc_wait_for_next_vblank(crtc); 800 } 801 802 /* 803 * Vblank time updates from the shadow to live plane control register 804 * are blocked if the memory self-refresh mode is active at that 805 * moment. So to make sure the plane gets truly disabled, disable 806 * first the self-refresh mode. The self-refresh enable bit in turn 807 * will be checked/applied by the HW only at the next frame start 808 * event which is after the vblank start event, so we need to have a 809 * wait-for-vblank between disabling the plane and the pipe. 810 */ 811 if (HAS_GMCH(dev_priv) && 812 intel_set_memory_cxsr(dev_priv, false)) 813 intel_crtc_wait_for_next_vblank(crtc); 814 815 /* 816 * Gen2 reports pipe underruns whenever all planes are disabled. 817 * So disable underrun reporting before all the planes get disabled. 818 */ 819 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) 820 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 821 822 intel_plane_disable_arm(NULL, plane, crtc_state); 823 intel_crtc_wait_for_next_vblank(crtc); 824 } 825 826 unsigned int 827 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 828 { 829 int x = 0, y = 0; 830 831 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 832 plane_state->view.color_plane[0].offset, 0); 833 834 return y; 835 } 836 837 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 838 { 839 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 840 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 841 enum pipe pipe = crtc->pipe; 842 u32 tmp; 843 844 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 845 846 /* 847 * Display WA #1153: icl 848 * enable hardware to bypass the alpha math 849 * and rounding for per-pixel values 00 and 0xff 850 */ 851 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 852 /* 853 * Display WA # 1605353570: icl 854 * Set the pixel rounding bit to 1 for allowing 855 * passthrough of Frame buffer pixels unmodified 856 * across pipe 857 */ 858 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 859 860 /* 861 * Underrun recovery must always be disabled on display 13+. 862 * DG2 chicken bit meaning is inverted compared to other platforms. 863 */ 864 if (IS_DG2(dev_priv)) 865 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 866 else if ((DISPLAY_VER(dev_priv) >= 13) && (DISPLAY_VER(dev_priv) < 30)) 867 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 868 869 /* Wa_14010547955:dg2 */ 870 if (IS_DG2(dev_priv)) 871 tmp |= DG2_RENDER_CCSTAG_4_3_EN; 872 873 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 874 } 875 876 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 877 { 878 struct drm_crtc *crtc; 879 bool cleanup_done; 880 881 drm_for_each_crtc(crtc, &dev_priv->drm) { 882 struct drm_crtc_commit *commit; 883 spin_lock(&crtc->commit_lock); 884 commit = list_first_entry_or_null(&crtc->commit_list, 885 struct drm_crtc_commit, commit_entry); 886 cleanup_done = commit ? 887 try_wait_for_completion(&commit->cleanup_done) : true; 888 spin_unlock(&crtc->commit_lock); 889 890 if (cleanup_done) 891 continue; 892 893 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 894 895 return true; 896 } 897 898 return false; 899 } 900 901 /* 902 * Finds the encoder associated with the given CRTC. This can only be 903 * used when we know that the CRTC isn't feeding multiple encoders! 904 */ 905 struct intel_encoder * 906 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 907 const struct intel_crtc_state *crtc_state) 908 { 909 const struct drm_connector_state *connector_state; 910 const struct drm_connector *connector; 911 struct intel_encoder *encoder = NULL; 912 struct intel_crtc *primary_crtc; 913 int num_encoders = 0; 914 int i; 915 916 primary_crtc = intel_primary_crtc(crtc_state); 917 918 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 919 if (connector_state->crtc != &primary_crtc->base) 920 continue; 921 922 encoder = to_intel_encoder(connector_state->best_encoder); 923 num_encoders++; 924 } 925 926 drm_WARN(state->base.dev, num_encoders != 1, 927 "%d encoders for pipe %c\n", 928 num_encoders, pipe_name(primary_crtc->pipe)); 929 930 return encoder; 931 } 932 933 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 934 { 935 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 936 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 937 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 938 enum pipe pipe = crtc->pipe; 939 int width = drm_rect_width(dst); 940 int height = drm_rect_height(dst); 941 int x = dst->x1; 942 int y = dst->y1; 943 944 if (!crtc_state->pch_pfit.enabled) 945 return; 946 947 /* Force use of hard-coded filter coefficients 948 * as some pre-programmed values are broken, 949 * e.g. x201. 950 */ 951 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 952 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 953 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 954 else 955 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 956 PF_FILTER_MED_3x3); 957 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 958 PF_WIN_XPOS(x) | PF_WIN_YPOS(y)); 959 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 960 PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height)); 961 } 962 963 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 964 { 965 if (crtc->overlay) 966 (void) intel_overlay_switch_off(crtc->overlay); 967 968 /* Let userspace switch the overlay on again. In most cases userspace 969 * has to recompute where to put it anyway. 970 */ 971 } 972 973 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 974 { 975 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 976 977 if (!crtc_state->nv12_planes) 978 return false; 979 980 /* WA Display #0827: Gen9:all */ 981 if (DISPLAY_VER(dev_priv) == 9) 982 return true; 983 984 return false; 985 } 986 987 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 988 { 989 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 990 991 /* Wa_2006604312:icl,ehl */ 992 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) 993 return true; 994 995 return false; 996 } 997 998 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 999 { 1000 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1001 1002 /* Wa_1604331009:icl,jsl,ehl */ 1003 if (is_hdr_mode(crtc_state) && 1004 crtc_state->active_planes & BIT(PLANE_CURSOR) && 1005 DISPLAY_VER(dev_priv) == 11) 1006 return true; 1007 1008 return false; 1009 } 1010 1011 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, 1012 enum pipe pipe, bool enable) 1013 { 1014 if (DISPLAY_VER(i915) == 9) { 1015 /* 1016 * "Plane N strech max must be programmed to 11b (x1) 1017 * when Async flips are enabled on that plane." 1018 */ 1019 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 1020 SKL_PLANE1_STRETCH_MAX_MASK, 1021 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 1022 } else { 1023 /* Also needed on HSW/BDW albeit undocumented */ 1024 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 1025 HSW_PRI_STRETCH_MAX_MASK, 1026 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 1027 } 1028 } 1029 1030 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 1031 { 1032 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1033 1034 return crtc_state->uapi.async_flip && i915_vtd_active(i915) && 1035 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); 1036 } 1037 1038 static void intel_encoders_audio_enable(struct intel_atomic_state *state, 1039 struct intel_crtc *crtc) 1040 { 1041 const struct intel_crtc_state *crtc_state = 1042 intel_atomic_get_new_crtc_state(state, crtc); 1043 const struct drm_connector_state *conn_state; 1044 struct drm_connector *conn; 1045 int i; 1046 1047 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1048 struct intel_encoder *encoder = 1049 to_intel_encoder(conn_state->best_encoder); 1050 1051 if (conn_state->crtc != &crtc->base) 1052 continue; 1053 1054 if (encoder->audio_enable) 1055 encoder->audio_enable(encoder, crtc_state, conn_state); 1056 } 1057 } 1058 1059 static void intel_encoders_audio_disable(struct intel_atomic_state *state, 1060 struct intel_crtc *crtc) 1061 { 1062 const struct intel_crtc_state *old_crtc_state = 1063 intel_atomic_get_old_crtc_state(state, crtc); 1064 const struct drm_connector_state *old_conn_state; 1065 struct drm_connector *conn; 1066 int i; 1067 1068 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1069 struct intel_encoder *encoder = 1070 to_intel_encoder(old_conn_state->best_encoder); 1071 1072 if (old_conn_state->crtc != &crtc->base) 1073 continue; 1074 1075 if (encoder->audio_disable) 1076 encoder->audio_disable(encoder, old_crtc_state, old_conn_state); 1077 } 1078 } 1079 1080 #define is_enabling(feature, old_crtc_state, new_crtc_state) \ 1081 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ 1082 (new_crtc_state)->feature) 1083 #define is_disabling(feature, old_crtc_state, new_crtc_state) \ 1084 ((old_crtc_state)->feature && \ 1085 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) 1086 1087 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 1088 const struct intel_crtc_state *new_crtc_state) 1089 { 1090 if (!new_crtc_state->hw.active) 1091 return false; 1092 1093 return is_enabling(active_planes, old_crtc_state, new_crtc_state); 1094 } 1095 1096 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 1097 const struct intel_crtc_state *new_crtc_state) 1098 { 1099 if (!old_crtc_state->hw.active) 1100 return false; 1101 1102 return is_disabling(active_planes, old_crtc_state, new_crtc_state); 1103 } 1104 1105 static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, 1106 const struct intel_crtc_state *new_crtc_state) 1107 { 1108 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline || 1109 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || 1110 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || 1111 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || 1112 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full; 1113 } 1114 1115 static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state, 1116 const struct intel_crtc_state *new_crtc_state) 1117 { 1118 return old_crtc_state->cmrr.cmrr_m != new_crtc_state->cmrr.cmrr_m || 1119 old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n; 1120 } 1121 1122 static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state, 1123 struct intel_crtc *crtc) 1124 { 1125 const struct intel_crtc_state *old_crtc_state = 1126 intel_atomic_get_old_crtc_state(state, crtc); 1127 const struct intel_crtc_state *new_crtc_state = 1128 intel_atomic_get_new_crtc_state(state, crtc); 1129 1130 if (!new_crtc_state->hw.active) 1131 return false; 1132 1133 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) || 1134 (new_crtc_state->vrr.enable && 1135 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 1136 vrr_params_changed(old_crtc_state, new_crtc_state))); 1137 } 1138 1139 bool intel_crtc_vrr_disabling(struct intel_atomic_state *state, 1140 struct intel_crtc *crtc) 1141 { 1142 const struct intel_crtc_state *old_crtc_state = 1143 intel_atomic_get_old_crtc_state(state, crtc); 1144 const struct intel_crtc_state *new_crtc_state = 1145 intel_atomic_get_new_crtc_state(state, crtc); 1146 1147 if (!old_crtc_state->hw.active) 1148 return false; 1149 1150 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) || 1151 (old_crtc_state->vrr.enable && 1152 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 1153 vrr_params_changed(old_crtc_state, new_crtc_state))); 1154 } 1155 1156 static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, 1157 const struct intel_crtc_state *new_crtc_state) 1158 { 1159 if (!new_crtc_state->hw.active) 1160 return false; 1161 1162 return is_enabling(has_audio, old_crtc_state, new_crtc_state) || 1163 (new_crtc_state->has_audio && 1164 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1165 } 1166 1167 static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, 1168 const struct intel_crtc_state *new_crtc_state) 1169 { 1170 if (!old_crtc_state->hw.active) 1171 return false; 1172 1173 return is_disabling(has_audio, old_crtc_state, new_crtc_state) || 1174 (old_crtc_state->has_audio && 1175 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1176 } 1177 1178 #undef is_disabling 1179 #undef is_enabling 1180 1181 static void intel_post_plane_update(struct intel_atomic_state *state, 1182 struct intel_crtc *crtc) 1183 { 1184 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1185 const struct intel_crtc_state *old_crtc_state = 1186 intel_atomic_get_old_crtc_state(state, crtc); 1187 const struct intel_crtc_state *new_crtc_state = 1188 intel_atomic_get_new_crtc_state(state, crtc); 1189 enum pipe pipe = crtc->pipe; 1190 1191 intel_psr_post_plane_update(state, crtc); 1192 1193 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 1194 1195 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 1196 intel_update_watermarks(dev_priv); 1197 1198 intel_fbc_post_update(state, crtc); 1199 1200 if (needs_async_flip_vtd_wa(old_crtc_state) && 1201 !needs_async_flip_vtd_wa(new_crtc_state)) 1202 intel_async_flip_vtd_wa(dev_priv, pipe, false); 1203 1204 if (needs_nv12_wa(old_crtc_state) && 1205 !needs_nv12_wa(new_crtc_state)) 1206 skl_wa_827(dev_priv, pipe, false); 1207 1208 if (needs_scalerclk_wa(old_crtc_state) && 1209 !needs_scalerclk_wa(new_crtc_state)) 1210 icl_wa_scalerclkgating(dev_priv, pipe, false); 1211 1212 if (needs_cursorclk_wa(old_crtc_state) && 1213 !needs_cursorclk_wa(new_crtc_state)) 1214 icl_wa_cursorclkgating(dev_priv, pipe, false); 1215 1216 if (intel_crtc_needs_color_update(new_crtc_state)) 1217 intel_color_post_update(new_crtc_state); 1218 1219 if (audio_enabling(old_crtc_state, new_crtc_state)) 1220 intel_encoders_audio_enable(state, crtc); 1221 } 1222 1223 static void intel_post_plane_update_after_readout(struct intel_atomic_state *state, 1224 struct intel_crtc *crtc) 1225 { 1226 const struct intel_crtc_state *new_crtc_state = 1227 intel_atomic_get_new_crtc_state(state, crtc); 1228 1229 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ 1230 hsw_ips_post_update(state, crtc); 1231 1232 /* 1233 * Activate DRRS after state readout to avoid 1234 * dp_m_n vs. dp_m2_n2 confusion on BDW+. 1235 */ 1236 intel_drrs_activate(new_crtc_state); 1237 } 1238 1239 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 1240 struct intel_crtc *crtc) 1241 { 1242 const struct intel_crtc_state *crtc_state = 1243 intel_atomic_get_new_crtc_state(state, crtc); 1244 u8 update_planes = crtc_state->update_planes; 1245 const struct intel_plane_state __maybe_unused *plane_state; 1246 struct intel_plane *plane; 1247 int i; 1248 1249 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1250 if (plane->pipe == crtc->pipe && 1251 update_planes & BIT(plane->id)) 1252 plane->enable_flip_done(plane); 1253 } 1254 } 1255 1256 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 1257 struct intel_crtc *crtc) 1258 { 1259 const struct intel_crtc_state *crtc_state = 1260 intel_atomic_get_new_crtc_state(state, crtc); 1261 u8 update_planes = crtc_state->update_planes; 1262 const struct intel_plane_state __maybe_unused *plane_state; 1263 struct intel_plane *plane; 1264 int i; 1265 1266 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1267 if (plane->pipe == crtc->pipe && 1268 update_planes & BIT(plane->id)) 1269 plane->disable_flip_done(plane); 1270 } 1271 } 1272 1273 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 1274 struct intel_crtc *crtc) 1275 { 1276 const struct intel_crtc_state *old_crtc_state = 1277 intel_atomic_get_old_crtc_state(state, crtc); 1278 const struct intel_crtc_state *new_crtc_state = 1279 intel_atomic_get_new_crtc_state(state, crtc); 1280 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & 1281 ~new_crtc_state->async_flip_planes; 1282 const struct intel_plane_state *old_plane_state; 1283 struct intel_plane *plane; 1284 bool need_vbl_wait = false; 1285 int i; 1286 1287 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1288 if (plane->need_async_flip_toggle_wa && 1289 plane->pipe == crtc->pipe && 1290 disable_async_flip_planes & BIT(plane->id)) { 1291 /* 1292 * Apart from the async flip bit we want to 1293 * preserve the old state for the plane. 1294 */ 1295 intel_plane_async_flip(NULL, plane, 1296 old_crtc_state, old_plane_state, false); 1297 need_vbl_wait = true; 1298 } 1299 } 1300 1301 if (need_vbl_wait) 1302 intel_crtc_wait_for_next_vblank(crtc); 1303 } 1304 1305 static void intel_pre_plane_update(struct intel_atomic_state *state, 1306 struct intel_crtc *crtc) 1307 { 1308 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1309 const struct intel_crtc_state *old_crtc_state = 1310 intel_atomic_get_old_crtc_state(state, crtc); 1311 const struct intel_crtc_state *new_crtc_state = 1312 intel_atomic_get_new_crtc_state(state, crtc); 1313 enum pipe pipe = crtc->pipe; 1314 1315 if (intel_crtc_vrr_disabling(state, crtc)) { 1316 intel_vrr_disable(old_crtc_state); 1317 intel_crtc_update_active_timings(old_crtc_state, false); 1318 } 1319 1320 if (audio_disabling(old_crtc_state, new_crtc_state)) 1321 intel_encoders_audio_disable(state, crtc); 1322 1323 intel_drrs_deactivate(old_crtc_state); 1324 1325 intel_psr_pre_plane_update(state, crtc); 1326 1327 if (hsw_ips_pre_update(state, crtc)) 1328 intel_crtc_wait_for_next_vblank(crtc); 1329 1330 if (intel_fbc_pre_update(state, crtc)) 1331 intel_crtc_wait_for_next_vblank(crtc); 1332 1333 if (!needs_async_flip_vtd_wa(old_crtc_state) && 1334 needs_async_flip_vtd_wa(new_crtc_state)) 1335 intel_async_flip_vtd_wa(dev_priv, pipe, true); 1336 1337 /* Display WA 827 */ 1338 if (!needs_nv12_wa(old_crtc_state) && 1339 needs_nv12_wa(new_crtc_state)) 1340 skl_wa_827(dev_priv, pipe, true); 1341 1342 /* Wa_2006604312:icl,ehl */ 1343 if (!needs_scalerclk_wa(old_crtc_state) && 1344 needs_scalerclk_wa(new_crtc_state)) 1345 icl_wa_scalerclkgating(dev_priv, pipe, true); 1346 1347 /* Wa_1604331009:icl,jsl,ehl */ 1348 if (!needs_cursorclk_wa(old_crtc_state) && 1349 needs_cursorclk_wa(new_crtc_state)) 1350 icl_wa_cursorclkgating(dev_priv, pipe, true); 1351 1352 /* 1353 * Vblank time updates from the shadow to live plane control register 1354 * are blocked if the memory self-refresh mode is active at that 1355 * moment. So to make sure the plane gets truly disabled, disable 1356 * first the self-refresh mode. The self-refresh enable bit in turn 1357 * will be checked/applied by the HW only at the next frame start 1358 * event which is after the vblank start event, so we need to have a 1359 * wait-for-vblank between disabling the plane and the pipe. 1360 */ 1361 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1362 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 1363 intel_crtc_wait_for_next_vblank(crtc); 1364 1365 /* 1366 * IVB workaround: must disable low power watermarks for at least 1367 * one frame before enabling scaling. LP watermarks can be re-enabled 1368 * when scaling is disabled. 1369 * 1370 * WaCxSRDisabledForSpriteScaling:ivb 1371 */ 1372 if (!HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1373 new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv)) 1374 intel_crtc_wait_for_next_vblank(crtc); 1375 1376 /* 1377 * If we're doing a modeset we don't need to do any 1378 * pre-vblank watermark programming here. 1379 */ 1380 if (!intel_crtc_needs_modeset(new_crtc_state)) { 1381 /* 1382 * For platforms that support atomic watermarks, program the 1383 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1384 * will be the intermediate values that are safe for both pre- and 1385 * post- vblank; when vblank happens, the 'active' values will be set 1386 * to the final 'target' values and we'll do this again to get the 1387 * optimal watermarks. For gen9+ platforms, the values we program here 1388 * will be the final target values which will get automatically latched 1389 * at vblank time; no further programming will be necessary. 1390 * 1391 * If a platform hasn't been transitioned to atomic watermarks yet, 1392 * we'll continue to update watermarks the old way, if flags tell 1393 * us to. 1394 */ 1395 if (!intel_initial_watermarks(state, crtc)) 1396 if (new_crtc_state->update_wm_pre) 1397 intel_update_watermarks(dev_priv); 1398 } 1399 1400 /* 1401 * Gen2 reports pipe underruns whenever all planes are disabled. 1402 * So disable underrun reporting before all the planes get disabled. 1403 * 1404 * We do this after .initial_watermarks() so that we have a 1405 * chance of catching underruns with the intermediate watermarks 1406 * vs. the old plane configuration. 1407 */ 1408 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1409 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1410 1411 /* 1412 * WA for platforms where async address update enable bit 1413 * is double buffered and only latched at start of vblank. 1414 */ 1415 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) 1416 intel_crtc_async_flip_disable_wa(state, crtc); 1417 } 1418 1419 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1420 struct intel_crtc *crtc) 1421 { 1422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1423 const struct intel_crtc_state *new_crtc_state = 1424 intel_atomic_get_new_crtc_state(state, crtc); 1425 unsigned int update_mask = new_crtc_state->update_planes; 1426 const struct intel_plane_state *old_plane_state; 1427 struct intel_plane *plane; 1428 unsigned fb_bits = 0; 1429 int i; 1430 1431 intel_crtc_dpms_overlay_disable(crtc); 1432 1433 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1434 if (crtc->pipe != plane->pipe || 1435 !(update_mask & BIT(plane->id))) 1436 continue; 1437 1438 intel_plane_disable_arm(NULL, plane, new_crtc_state); 1439 1440 if (old_plane_state->uapi.visible) 1441 fb_bits |= plane->frontbuffer_bit; 1442 } 1443 1444 intel_frontbuffer_flip(dev_priv, fb_bits); 1445 } 1446 1447 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1448 { 1449 struct drm_i915_private *i915 = to_i915(state->base.dev); 1450 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1451 struct intel_crtc *crtc; 1452 int i; 1453 1454 /* 1455 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 1456 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 1457 */ 1458 if (i915->display.dpll.mgr) { 1459 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1460 if (intel_crtc_needs_modeset(new_crtc_state)) 1461 continue; 1462 1463 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 1464 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1465 } 1466 } 1467 } 1468 1469 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1470 struct intel_crtc *crtc) 1471 { 1472 const struct intel_crtc_state *crtc_state = 1473 intel_atomic_get_new_crtc_state(state, crtc); 1474 const struct drm_connector_state *conn_state; 1475 struct drm_connector *conn; 1476 int i; 1477 1478 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1479 struct intel_encoder *encoder = 1480 to_intel_encoder(conn_state->best_encoder); 1481 1482 if (conn_state->crtc != &crtc->base) 1483 continue; 1484 1485 if (encoder->pre_pll_enable) 1486 encoder->pre_pll_enable(state, encoder, 1487 crtc_state, conn_state); 1488 } 1489 } 1490 1491 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1492 struct intel_crtc *crtc) 1493 { 1494 const struct intel_crtc_state *crtc_state = 1495 intel_atomic_get_new_crtc_state(state, crtc); 1496 const struct drm_connector_state *conn_state; 1497 struct drm_connector *conn; 1498 int i; 1499 1500 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1501 struct intel_encoder *encoder = 1502 to_intel_encoder(conn_state->best_encoder); 1503 1504 if (conn_state->crtc != &crtc->base) 1505 continue; 1506 1507 if (encoder->pre_enable) 1508 encoder->pre_enable(state, encoder, 1509 crtc_state, conn_state); 1510 } 1511 } 1512 1513 static void intel_encoders_enable(struct intel_atomic_state *state, 1514 struct intel_crtc *crtc) 1515 { 1516 const struct intel_crtc_state *crtc_state = 1517 intel_atomic_get_new_crtc_state(state, crtc); 1518 const struct drm_connector_state *conn_state; 1519 struct drm_connector *conn; 1520 int i; 1521 1522 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1523 struct intel_encoder *encoder = 1524 to_intel_encoder(conn_state->best_encoder); 1525 1526 if (conn_state->crtc != &crtc->base) 1527 continue; 1528 1529 if (encoder->enable) 1530 encoder->enable(state, encoder, 1531 crtc_state, conn_state); 1532 intel_opregion_notify_encoder(encoder, true); 1533 } 1534 } 1535 1536 static void intel_encoders_disable(struct intel_atomic_state *state, 1537 struct intel_crtc *crtc) 1538 { 1539 const struct intel_crtc_state *old_crtc_state = 1540 intel_atomic_get_old_crtc_state(state, crtc); 1541 const struct drm_connector_state *old_conn_state; 1542 struct drm_connector *conn; 1543 int i; 1544 1545 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1546 struct intel_encoder *encoder = 1547 to_intel_encoder(old_conn_state->best_encoder); 1548 1549 if (old_conn_state->crtc != &crtc->base) 1550 continue; 1551 1552 intel_opregion_notify_encoder(encoder, false); 1553 if (encoder->disable) 1554 encoder->disable(state, encoder, 1555 old_crtc_state, old_conn_state); 1556 } 1557 } 1558 1559 static void intel_encoders_post_disable(struct intel_atomic_state *state, 1560 struct intel_crtc *crtc) 1561 { 1562 const struct intel_crtc_state *old_crtc_state = 1563 intel_atomic_get_old_crtc_state(state, crtc); 1564 const struct drm_connector_state *old_conn_state; 1565 struct drm_connector *conn; 1566 int i; 1567 1568 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1569 struct intel_encoder *encoder = 1570 to_intel_encoder(old_conn_state->best_encoder); 1571 1572 if (old_conn_state->crtc != &crtc->base) 1573 continue; 1574 1575 if (encoder->post_disable) 1576 encoder->post_disable(state, encoder, 1577 old_crtc_state, old_conn_state); 1578 } 1579 } 1580 1581 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1582 struct intel_crtc *crtc) 1583 { 1584 const struct intel_crtc_state *old_crtc_state = 1585 intel_atomic_get_old_crtc_state(state, crtc); 1586 const struct drm_connector_state *old_conn_state; 1587 struct drm_connector *conn; 1588 int i; 1589 1590 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1591 struct intel_encoder *encoder = 1592 to_intel_encoder(old_conn_state->best_encoder); 1593 1594 if (old_conn_state->crtc != &crtc->base) 1595 continue; 1596 1597 if (encoder->post_pll_disable) 1598 encoder->post_pll_disable(state, encoder, 1599 old_crtc_state, old_conn_state); 1600 } 1601 } 1602 1603 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1604 struct intel_crtc *crtc) 1605 { 1606 const struct intel_crtc_state *crtc_state = 1607 intel_atomic_get_new_crtc_state(state, crtc); 1608 const struct drm_connector_state *conn_state; 1609 struct drm_connector *conn; 1610 int i; 1611 1612 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1613 struct intel_encoder *encoder = 1614 to_intel_encoder(conn_state->best_encoder); 1615 1616 if (conn_state->crtc != &crtc->base) 1617 continue; 1618 1619 if (encoder->update_pipe) 1620 encoder->update_pipe(state, encoder, 1621 crtc_state, conn_state); 1622 } 1623 } 1624 1625 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1626 { 1627 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1628 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1629 1630 if (crtc_state->has_pch_encoder) { 1631 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1632 &crtc_state->fdi_m_n); 1633 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1634 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1635 &crtc_state->dp_m_n); 1636 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1637 &crtc_state->dp_m2_n2); 1638 } 1639 1640 intel_set_transcoder_timings(crtc_state); 1641 1642 ilk_set_pipeconf(crtc_state); 1643 } 1644 1645 static void ilk_crtc_enable(struct intel_atomic_state *state, 1646 struct intel_crtc *crtc) 1647 { 1648 const struct intel_crtc_state *new_crtc_state = 1649 intel_atomic_get_new_crtc_state(state, crtc); 1650 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1651 enum pipe pipe = crtc->pipe; 1652 1653 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1654 return; 1655 1656 /* 1657 * Sometimes spurious CPU pipe underruns happen during FDI 1658 * training, at least with VGA+HDMI cloning. Suppress them. 1659 * 1660 * On ILK we get an occasional spurious CPU pipe underruns 1661 * between eDP port A enable and vdd enable. Also PCH port 1662 * enable seems to result in the occasional CPU pipe underrun. 1663 * 1664 * Spurious PCH underruns also occur during PCH enabling. 1665 */ 1666 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1667 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1668 1669 ilk_configure_cpu_transcoder(new_crtc_state); 1670 1671 intel_set_pipe_src_size(new_crtc_state); 1672 1673 crtc->active = true; 1674 1675 intel_encoders_pre_enable(state, crtc); 1676 1677 if (new_crtc_state->has_pch_encoder) { 1678 ilk_pch_pre_enable(state, crtc); 1679 } else { 1680 assert_fdi_tx_disabled(dev_priv, pipe); 1681 assert_fdi_rx_disabled(dev_priv, pipe); 1682 } 1683 1684 ilk_pfit_enable(new_crtc_state); 1685 1686 /* 1687 * On ILK+ LUT must be loaded before the pipe is running but with 1688 * clocks enabled 1689 */ 1690 intel_color_modeset(new_crtc_state); 1691 1692 intel_initial_watermarks(state, crtc); 1693 intel_enable_transcoder(new_crtc_state); 1694 1695 if (new_crtc_state->has_pch_encoder) 1696 ilk_pch_enable(state, crtc); 1697 1698 intel_crtc_vblank_on(new_crtc_state); 1699 1700 intel_encoders_enable(state, crtc); 1701 1702 if (HAS_PCH_CPT(dev_priv)) 1703 intel_wait_for_pipe_scanline_moving(crtc); 1704 1705 /* 1706 * Must wait for vblank to avoid spurious PCH FIFO underruns. 1707 * And a second vblank wait is needed at least on ILK with 1708 * some interlaced HDMI modes. Let's do the double wait always 1709 * in case there are more corner cases we don't know about. 1710 */ 1711 if (new_crtc_state->has_pch_encoder) { 1712 intel_crtc_wait_for_next_vblank(crtc); 1713 intel_crtc_wait_for_next_vblank(crtc); 1714 } 1715 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1716 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1717 } 1718 1719 /* Display WA #1180: WaDisableScalarClockGating: glk */ 1720 static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state) 1721 { 1722 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1723 1724 return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled; 1725 } 1726 1727 static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable) 1728 { 1729 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1730 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1731 1732 intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe), 1733 mask, enable ? mask : 0); 1734 } 1735 1736 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1737 { 1738 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1739 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1740 1741 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 1742 HSW_LINETIME(crtc_state->linetime) | 1743 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1744 } 1745 1746 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1747 { 1748 struct intel_display *display = to_intel_display(crtc_state); 1749 1750 intel_de_rmw(display, CHICKEN_TRANS(display, crtc_state->cpu_transcoder), 1751 HSW_FRAME_START_DELAY_MASK, 1752 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1753 } 1754 1755 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1756 { 1757 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1758 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1759 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1760 1761 if (crtc_state->has_pch_encoder) { 1762 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1763 &crtc_state->fdi_m_n); 1764 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1765 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1766 &crtc_state->dp_m_n); 1767 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1768 &crtc_state->dp_m2_n2); 1769 } 1770 1771 intel_set_transcoder_timings(crtc_state); 1772 if (HAS_VRR(dev_priv)) 1773 intel_vrr_set_transcoder_timings(crtc_state); 1774 1775 if (cpu_transcoder != TRANSCODER_EDP) 1776 intel_de_write(dev_priv, TRANS_MULT(dev_priv, cpu_transcoder), 1777 crtc_state->pixel_multiplier - 1); 1778 1779 hsw_set_frame_start_delay(crtc_state); 1780 1781 hsw_set_transconf(crtc_state); 1782 } 1783 1784 static void hsw_crtc_enable(struct intel_atomic_state *state, 1785 struct intel_crtc *crtc) 1786 { 1787 struct intel_display *display = to_intel_display(state); 1788 const struct intel_crtc_state *new_crtc_state = 1789 intel_atomic_get_new_crtc_state(state, crtc); 1790 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1791 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1792 struct intel_crtc *pipe_crtc; 1793 int i; 1794 1795 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1796 return; 1797 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) 1798 intel_dmc_enable_pipe(display, pipe_crtc->pipe); 1799 1800 intel_encoders_pre_pll_enable(state, crtc); 1801 1802 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1803 const struct intel_crtc_state *pipe_crtc_state = 1804 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1805 1806 if (pipe_crtc_state->shared_dpll) 1807 intel_enable_shared_dpll(pipe_crtc_state); 1808 } 1809 1810 intel_encoders_pre_enable(state, crtc); 1811 1812 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1813 const struct intel_crtc_state *pipe_crtc_state = 1814 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1815 1816 intel_dsc_enable(pipe_crtc_state); 1817 1818 if (HAS_UNCOMPRESSED_JOINER(dev_priv)) 1819 intel_uncompressed_joiner_enable(pipe_crtc_state); 1820 1821 intel_set_pipe_src_size(pipe_crtc_state); 1822 1823 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 1824 bdw_set_pipe_misc(NULL, pipe_crtc_state); 1825 } 1826 1827 if (!transcoder_is_dsi(cpu_transcoder)) 1828 hsw_configure_cpu_transcoder(new_crtc_state); 1829 1830 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1831 const struct intel_crtc_state *pipe_crtc_state = 1832 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1833 1834 pipe_crtc->active = true; 1835 1836 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) 1837 glk_pipe_scaler_clock_gating_wa(pipe_crtc, true); 1838 1839 if (DISPLAY_VER(dev_priv) >= 9) 1840 skl_pfit_enable(pipe_crtc_state); 1841 else 1842 ilk_pfit_enable(pipe_crtc_state); 1843 1844 /* 1845 * On ILK+ LUT must be loaded before the pipe is running but with 1846 * clocks enabled 1847 */ 1848 intel_color_modeset(pipe_crtc_state); 1849 1850 hsw_set_linetime_wm(pipe_crtc_state); 1851 1852 if (DISPLAY_VER(dev_priv) >= 11) 1853 icl_set_pipe_chicken(pipe_crtc_state); 1854 1855 intel_initial_watermarks(state, pipe_crtc); 1856 } 1857 1858 intel_encoders_enable(state, crtc); 1859 1860 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1861 const struct intel_crtc_state *pipe_crtc_state = 1862 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1863 enum pipe hsw_workaround_pipe; 1864 1865 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) { 1866 intel_crtc_wait_for_next_vblank(pipe_crtc); 1867 glk_pipe_scaler_clock_gating_wa(pipe_crtc, false); 1868 } 1869 1870 /* 1871 * If we change the relative order between pipe/planes 1872 * enabling, we need to change the workaround. 1873 */ 1874 hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe; 1875 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 1876 struct intel_crtc *wa_crtc = 1877 intel_crtc_for_pipe(display, hsw_workaround_pipe); 1878 1879 intel_crtc_wait_for_next_vblank(wa_crtc); 1880 intel_crtc_wait_for_next_vblank(wa_crtc); 1881 } 1882 } 1883 } 1884 1885 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 1886 { 1887 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1888 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1889 enum pipe pipe = crtc->pipe; 1890 1891 /* To avoid upsetting the power well on haswell only disable the pfit if 1892 * it's in use. The hw state code will make sure we get this right. */ 1893 if (!old_crtc_state->pch_pfit.enabled) 1894 return; 1895 1896 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0); 1897 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0); 1898 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0); 1899 } 1900 1901 static void ilk_crtc_disable(struct intel_atomic_state *state, 1902 struct intel_crtc *crtc) 1903 { 1904 const struct intel_crtc_state *old_crtc_state = 1905 intel_atomic_get_old_crtc_state(state, crtc); 1906 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1907 enum pipe pipe = crtc->pipe; 1908 1909 /* 1910 * Sometimes spurious CPU pipe underruns happen when the 1911 * pipe is already disabled, but FDI RX/TX is still enabled. 1912 * Happens at least with VGA+HDMI cloning. Suppress them. 1913 */ 1914 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1915 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1916 1917 intel_encoders_disable(state, crtc); 1918 1919 intel_crtc_vblank_off(old_crtc_state); 1920 1921 intel_disable_transcoder(old_crtc_state); 1922 1923 ilk_pfit_disable(old_crtc_state); 1924 1925 if (old_crtc_state->has_pch_encoder) 1926 ilk_pch_disable(state, crtc); 1927 1928 intel_encoders_post_disable(state, crtc); 1929 1930 if (old_crtc_state->has_pch_encoder) 1931 ilk_pch_post_disable(state, crtc); 1932 1933 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1934 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1935 1936 intel_disable_shared_dpll(old_crtc_state); 1937 } 1938 1939 static void hsw_crtc_disable(struct intel_atomic_state *state, 1940 struct intel_crtc *crtc) 1941 { 1942 struct intel_display *display = to_intel_display(state); 1943 const struct intel_crtc_state *old_crtc_state = 1944 intel_atomic_get_old_crtc_state(state, crtc); 1945 struct intel_crtc *pipe_crtc; 1946 int i; 1947 1948 /* 1949 * FIXME collapse everything to one hook. 1950 * Need care with mst->ddi interactions. 1951 */ 1952 intel_encoders_disable(state, crtc); 1953 intel_encoders_post_disable(state, crtc); 1954 1955 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1956 const struct intel_crtc_state *old_pipe_crtc_state = 1957 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1958 1959 intel_disable_shared_dpll(old_pipe_crtc_state); 1960 } 1961 1962 intel_encoders_post_pll_disable(state, crtc); 1963 1964 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) 1965 intel_dmc_disable_pipe(display, pipe_crtc->pipe); 1966 } 1967 1968 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 1969 { 1970 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1971 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1972 1973 if (!crtc_state->gmch_pfit.control) 1974 return; 1975 1976 /* 1977 * The panel fitter should only be adjusted whilst the pipe is disabled, 1978 * according to register description and PRM. 1979 */ 1980 drm_WARN_ON(&dev_priv->drm, 1981 intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)) & PFIT_ENABLE); 1982 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 1983 1984 intel_de_write(dev_priv, PFIT_PGM_RATIOS(dev_priv), 1985 crtc_state->gmch_pfit.pgm_ratios); 1986 intel_de_write(dev_priv, PFIT_CONTROL(dev_priv), 1987 crtc_state->gmch_pfit.control); 1988 1989 /* Border color in case we don't scale up to the full screen. Black by 1990 * default, change to something else for debugging. */ 1991 intel_de_write(dev_priv, BCLRPAT(dev_priv, crtc->pipe), 0); 1992 } 1993 1994 /* Prefer intel_encoder_is_combo() */ 1995 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 1996 { 1997 if (phy == PHY_NONE) 1998 return false; 1999 else if (IS_ALDERLAKE_S(dev_priv)) 2000 return phy <= PHY_E; 2001 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 2002 return phy <= PHY_D; 2003 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) 2004 return phy <= PHY_C; 2005 else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12)) 2006 return phy <= PHY_B; 2007 else 2008 /* 2009 * DG2 outputs labelled as "combo PHY" in the bspec use 2010 * SNPS PHYs with completely different programming, 2011 * hence we always return false here. 2012 */ 2013 return false; 2014 } 2015 2016 /* Prefer intel_encoder_is_tc() */ 2017 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 2018 { 2019 /* 2020 * Discrete GPU phy's are not attached to FIA's to support TC 2021 * subsystem Legacy or non-legacy, and only support native DP/HDMI 2022 */ 2023 if (IS_DGFX(dev_priv)) 2024 return false; 2025 2026 if (DISPLAY_VER(dev_priv) >= 13) 2027 return phy >= PHY_F && phy <= PHY_I; 2028 else if (IS_TIGERLAKE(dev_priv)) 2029 return phy >= PHY_D && phy <= PHY_I; 2030 else if (IS_ICELAKE(dev_priv)) 2031 return phy >= PHY_C && phy <= PHY_F; 2032 2033 return false; 2034 } 2035 2036 /* Prefer intel_encoder_is_snps() */ 2037 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) 2038 { 2039 /* 2040 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port 2041 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). 2042 */ 2043 return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E; 2044 } 2045 2046 /* Prefer intel_encoder_to_phy() */ 2047 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 2048 { 2049 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) 2050 return PHY_D + port - PORT_D_XELPD; 2051 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) 2052 return PHY_F + port - PORT_TC1; 2053 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) 2054 return PHY_B + port - PORT_TC1; 2055 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) 2056 return PHY_C + port - PORT_TC1; 2057 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 2058 port == PORT_D) 2059 return PHY_A; 2060 2061 return PHY_A + port - PORT_A; 2062 } 2063 2064 /* Prefer intel_encoder_to_tc() */ 2065 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 2066 { 2067 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 2068 return TC_PORT_NONE; 2069 2070 if (DISPLAY_VER(dev_priv) >= 12) 2071 return TC_PORT_1 + port - PORT_TC1; 2072 else 2073 return TC_PORT_1 + port - PORT_C; 2074 } 2075 2076 enum phy intel_encoder_to_phy(struct intel_encoder *encoder) 2077 { 2078 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2079 2080 return intel_port_to_phy(i915, encoder->port); 2081 } 2082 2083 bool intel_encoder_is_combo(struct intel_encoder *encoder) 2084 { 2085 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2086 2087 return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder)); 2088 } 2089 2090 bool intel_encoder_is_snps(struct intel_encoder *encoder) 2091 { 2092 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2093 2094 return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder)); 2095 } 2096 2097 bool intel_encoder_is_tc(struct intel_encoder *encoder) 2098 { 2099 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2100 2101 return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder)); 2102 } 2103 2104 enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder) 2105 { 2106 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2107 2108 return intel_port_to_tc(i915, encoder->port); 2109 } 2110 2111 enum intel_display_power_domain 2112 intel_aux_power_domain(struct intel_digital_port *dig_port) 2113 { 2114 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 2115 2116 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 2117 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); 2118 2119 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 2120 } 2121 2122 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, 2123 struct intel_power_domain_mask *mask) 2124 { 2125 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2126 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2127 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2128 struct drm_encoder *encoder; 2129 enum pipe pipe = crtc->pipe; 2130 2131 bitmap_zero(mask->bits, POWER_DOMAIN_NUM); 2132 2133 if (!crtc_state->hw.active) 2134 return; 2135 2136 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); 2137 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); 2138 if (crtc_state->pch_pfit.enabled || 2139 crtc_state->pch_pfit.force_thru) 2140 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); 2141 2142 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 2143 crtc_state->uapi.encoder_mask) { 2144 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2145 2146 set_bit(intel_encoder->power_domain, mask->bits); 2147 } 2148 2149 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 2150 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); 2151 2152 if (crtc_state->shared_dpll) 2153 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); 2154 2155 if (crtc_state->dsc.compression_enable) 2156 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); 2157 } 2158 2159 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, 2160 struct intel_power_domain_mask *old_domains) 2161 { 2162 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2163 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2164 enum intel_display_power_domain domain; 2165 struct intel_power_domain_mask domains, new_domains; 2166 2167 get_crtc_power_domains(crtc_state, &domains); 2168 2169 bitmap_andnot(new_domains.bits, 2170 domains.bits, 2171 crtc->enabled_power_domains.mask.bits, 2172 POWER_DOMAIN_NUM); 2173 bitmap_andnot(old_domains->bits, 2174 crtc->enabled_power_domains.mask.bits, 2175 domains.bits, 2176 POWER_DOMAIN_NUM); 2177 2178 for_each_power_domain(domain, &new_domains) 2179 intel_display_power_get_in_set(dev_priv, 2180 &crtc->enabled_power_domains, 2181 domain); 2182 } 2183 2184 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, 2185 struct intel_power_domain_mask *domains) 2186 { 2187 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), 2188 &crtc->enabled_power_domains, 2189 domains); 2190 } 2191 2192 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 2193 { 2194 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2195 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2196 2197 if (intel_crtc_has_dp_encoder(crtc_state)) { 2198 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 2199 &crtc_state->dp_m_n); 2200 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 2201 &crtc_state->dp_m2_n2); 2202 } 2203 2204 intel_set_transcoder_timings(crtc_state); 2205 2206 i9xx_set_pipeconf(crtc_state); 2207 } 2208 2209 static void valleyview_crtc_enable(struct intel_atomic_state *state, 2210 struct intel_crtc *crtc) 2211 { 2212 const struct intel_crtc_state *new_crtc_state = 2213 intel_atomic_get_new_crtc_state(state, crtc); 2214 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2215 enum pipe pipe = crtc->pipe; 2216 2217 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2218 return; 2219 2220 i9xx_configure_cpu_transcoder(new_crtc_state); 2221 2222 intel_set_pipe_src_size(new_crtc_state); 2223 2224 intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); 2225 2226 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 2227 intel_de_write(dev_priv, CHV_BLEND(dev_priv, pipe), 2228 CHV_BLEND_LEGACY); 2229 intel_de_write(dev_priv, CHV_CANVAS(dev_priv, pipe), 0); 2230 } 2231 2232 crtc->active = true; 2233 2234 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2235 2236 intel_encoders_pre_pll_enable(state, crtc); 2237 2238 if (IS_CHERRYVIEW(dev_priv)) 2239 chv_enable_pll(new_crtc_state); 2240 else 2241 vlv_enable_pll(new_crtc_state); 2242 2243 intel_encoders_pre_enable(state, crtc); 2244 2245 i9xx_pfit_enable(new_crtc_state); 2246 2247 intel_color_modeset(new_crtc_state); 2248 2249 intel_initial_watermarks(state, crtc); 2250 intel_enable_transcoder(new_crtc_state); 2251 2252 intel_crtc_vblank_on(new_crtc_state); 2253 2254 intel_encoders_enable(state, crtc); 2255 } 2256 2257 static void i9xx_crtc_enable(struct intel_atomic_state *state, 2258 struct intel_crtc *crtc) 2259 { 2260 const struct intel_crtc_state *new_crtc_state = 2261 intel_atomic_get_new_crtc_state(state, crtc); 2262 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2263 enum pipe pipe = crtc->pipe; 2264 2265 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2266 return; 2267 2268 i9xx_configure_cpu_transcoder(new_crtc_state); 2269 2270 intel_set_pipe_src_size(new_crtc_state); 2271 2272 crtc->active = true; 2273 2274 if (DISPLAY_VER(dev_priv) != 2) 2275 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2276 2277 intel_encoders_pre_enable(state, crtc); 2278 2279 i9xx_enable_pll(new_crtc_state); 2280 2281 i9xx_pfit_enable(new_crtc_state); 2282 2283 intel_color_modeset(new_crtc_state); 2284 2285 if (!intel_initial_watermarks(state, crtc)) 2286 intel_update_watermarks(dev_priv); 2287 intel_enable_transcoder(new_crtc_state); 2288 2289 intel_crtc_vblank_on(new_crtc_state); 2290 2291 intel_encoders_enable(state, crtc); 2292 2293 /* prevents spurious underruns */ 2294 if (DISPLAY_VER(dev_priv) == 2) 2295 intel_crtc_wait_for_next_vblank(crtc); 2296 } 2297 2298 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 2299 { 2300 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 2301 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2302 2303 if (!old_crtc_state->gmch_pfit.control) 2304 return; 2305 2306 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); 2307 2308 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 2309 intel_de_read(dev_priv, PFIT_CONTROL(dev_priv))); 2310 intel_de_write(dev_priv, PFIT_CONTROL(dev_priv), 0); 2311 } 2312 2313 static void i9xx_crtc_disable(struct intel_atomic_state *state, 2314 struct intel_crtc *crtc) 2315 { 2316 struct intel_display *display = to_intel_display(state); 2317 struct drm_i915_private *dev_priv = to_i915(display->drm); 2318 struct intel_crtc_state *old_crtc_state = 2319 intel_atomic_get_old_crtc_state(state, crtc); 2320 enum pipe pipe = crtc->pipe; 2321 2322 /* 2323 * On gen2 planes are double buffered but the pipe isn't, so we must 2324 * wait for planes to fully turn off before disabling the pipe. 2325 */ 2326 if (DISPLAY_VER(dev_priv) == 2) 2327 intel_crtc_wait_for_next_vblank(crtc); 2328 2329 intel_encoders_disable(state, crtc); 2330 2331 intel_crtc_vblank_off(old_crtc_state); 2332 2333 intel_disable_transcoder(old_crtc_state); 2334 2335 i9xx_pfit_disable(old_crtc_state); 2336 2337 intel_encoders_post_disable(state, crtc); 2338 2339 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2340 if (IS_CHERRYVIEW(dev_priv)) 2341 chv_disable_pll(dev_priv, pipe); 2342 else if (IS_VALLEYVIEW(dev_priv)) 2343 vlv_disable_pll(dev_priv, pipe); 2344 else 2345 i9xx_disable_pll(old_crtc_state); 2346 } 2347 2348 intel_encoders_post_pll_disable(state, crtc); 2349 2350 if (DISPLAY_VER(dev_priv) != 2) 2351 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 2352 2353 if (!dev_priv->display.funcs.wm->initial_watermarks) 2354 intel_update_watermarks(dev_priv); 2355 2356 /* clock the pipe down to 640x480@60 to potentially save power */ 2357 if (IS_I830(dev_priv)) 2358 i830_enable_pipe(display, pipe); 2359 } 2360 2361 void intel_encoder_destroy(struct drm_encoder *encoder) 2362 { 2363 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2364 2365 drm_encoder_cleanup(encoder); 2366 kfree(intel_encoder); 2367 } 2368 2369 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2370 { 2371 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2372 2373 /* GDG double wide on either pipe, otherwise pipe A only */ 2374 return HAS_DOUBLE_WIDE(dev_priv) && 2375 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 2376 } 2377 2378 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2379 { 2380 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 2381 struct drm_rect src; 2382 2383 /* 2384 * We only use IF-ID interlacing. If we ever use 2385 * PF-ID we'll need to adjust the pixel_rate here. 2386 */ 2387 2388 if (!crtc_state->pch_pfit.enabled) 2389 return pixel_rate; 2390 2391 drm_rect_init(&src, 0, 0, 2392 drm_rect_width(&crtc_state->pipe_src) << 16, 2393 drm_rect_height(&crtc_state->pipe_src) << 16); 2394 2395 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 2396 pixel_rate); 2397 } 2398 2399 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 2400 const struct drm_display_mode *timings) 2401 { 2402 mode->hdisplay = timings->crtc_hdisplay; 2403 mode->htotal = timings->crtc_htotal; 2404 mode->hsync_start = timings->crtc_hsync_start; 2405 mode->hsync_end = timings->crtc_hsync_end; 2406 2407 mode->vdisplay = timings->crtc_vdisplay; 2408 mode->vtotal = timings->crtc_vtotal; 2409 mode->vsync_start = timings->crtc_vsync_start; 2410 mode->vsync_end = timings->crtc_vsync_end; 2411 2412 mode->flags = timings->flags; 2413 mode->type = DRM_MODE_TYPE_DRIVER; 2414 2415 mode->clock = timings->crtc_clock; 2416 2417 drm_mode_set_name(mode); 2418 } 2419 2420 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2421 { 2422 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2423 2424 if (HAS_GMCH(dev_priv)) 2425 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2426 crtc_state->pixel_rate = 2427 crtc_state->hw.pipe_mode.crtc_clock; 2428 else 2429 crtc_state->pixel_rate = 2430 ilk_pipe_pixel_rate(crtc_state); 2431 } 2432 2433 static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state, 2434 struct drm_display_mode *mode) 2435 { 2436 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2437 2438 if (num_pipes == 1) 2439 return; 2440 2441 mode->crtc_clock /= num_pipes; 2442 mode->crtc_hdisplay /= num_pipes; 2443 mode->crtc_hblank_start /= num_pipes; 2444 mode->crtc_hblank_end /= num_pipes; 2445 mode->crtc_hsync_start /= num_pipes; 2446 mode->crtc_hsync_end /= num_pipes; 2447 mode->crtc_htotal /= num_pipes; 2448 } 2449 2450 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, 2451 struct drm_display_mode *mode) 2452 { 2453 int overlap = crtc_state->splitter.pixel_overlap; 2454 int n = crtc_state->splitter.link_count; 2455 2456 if (!crtc_state->splitter.enable) 2457 return; 2458 2459 /* 2460 * eDP MSO uses segment timings from EDID for transcoder 2461 * timings, but full mode for everything else. 2462 * 2463 * h_full = (h_segment - pixel_overlap) * link_count 2464 */ 2465 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; 2466 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; 2467 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; 2468 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; 2469 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; 2470 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; 2471 mode->crtc_clock *= n; 2472 } 2473 2474 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 2475 { 2476 struct drm_display_mode *mode = &crtc_state->hw.mode; 2477 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2478 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2479 2480 /* 2481 * Start with the adjusted_mode crtc timings, which 2482 * have been filled with the transcoder timings. 2483 */ 2484 drm_mode_copy(pipe_mode, adjusted_mode); 2485 2486 /* Expand MSO per-segment transcoder timings to full */ 2487 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2488 2489 /* 2490 * We want the full numbers in adjusted_mode normal timings, 2491 * adjusted_mode crtc timings are left with the raw transcoder 2492 * timings. 2493 */ 2494 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 2495 2496 /* Populate the "user" mode with full numbers */ 2497 drm_mode_copy(mode, pipe_mode); 2498 intel_mode_from_crtc_timings(mode, mode); 2499 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * 2500 intel_crtc_num_joined_pipes(crtc_state); 2501 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); 2502 2503 /* Derive per-pipe timings in case joiner is used */ 2504 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2505 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2506 2507 intel_crtc_compute_pixel_rate(crtc_state); 2508 } 2509 2510 void intel_encoder_get_config(struct intel_encoder *encoder, 2511 struct intel_crtc_state *crtc_state) 2512 { 2513 encoder->get_config(encoder, crtc_state); 2514 2515 intel_crtc_readout_derived_state(crtc_state); 2516 } 2517 2518 static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state) 2519 { 2520 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2521 int width, height; 2522 2523 if (num_pipes == 1) 2524 return; 2525 2526 width = drm_rect_width(&crtc_state->pipe_src); 2527 height = drm_rect_height(&crtc_state->pipe_src); 2528 2529 drm_rect_init(&crtc_state->pipe_src, 0, 0, 2530 width / num_pipes, height); 2531 } 2532 2533 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) 2534 { 2535 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2536 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2537 2538 intel_joiner_compute_pipe_src(crtc_state); 2539 2540 /* 2541 * Pipe horizontal size must be even in: 2542 * - DVO ganged mode 2543 * - LVDS dual channel mode 2544 * - Double wide pipe 2545 */ 2546 if (drm_rect_width(&crtc_state->pipe_src) & 1) { 2547 if (crtc_state->double_wide) { 2548 drm_dbg_kms(&i915->drm, 2549 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", 2550 crtc->base.base.id, crtc->base.name); 2551 return -EINVAL; 2552 } 2553 2554 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 2555 intel_is_dual_link_lvds(i915)) { 2556 drm_dbg_kms(&i915->drm, 2557 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", 2558 crtc->base.base.id, crtc->base.name); 2559 return -EINVAL; 2560 } 2561 } 2562 2563 return 0; 2564 } 2565 2566 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) 2567 { 2568 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2569 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2570 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2571 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2572 int clock_limit = i915->display.cdclk.max_dotclk_freq; 2573 2574 /* 2575 * Start with the adjusted_mode crtc timings, which 2576 * have been filled with the transcoder timings. 2577 */ 2578 drm_mode_copy(pipe_mode, adjusted_mode); 2579 2580 /* Expand MSO per-segment transcoder timings to full */ 2581 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2582 2583 /* Derive per-pipe timings in case joiner is used */ 2584 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2585 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2586 2587 if (DISPLAY_VER(i915) < 4) { 2588 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; 2589 2590 /* 2591 * Enable double wide mode when the dot clock 2592 * is > 90% of the (display) core speed. 2593 */ 2594 if (intel_crtc_supports_double_wide(crtc) && 2595 pipe_mode->crtc_clock > clock_limit) { 2596 clock_limit = i915->display.cdclk.max_dotclk_freq; 2597 crtc_state->double_wide = true; 2598 } 2599 } 2600 2601 if (pipe_mode->crtc_clock > clock_limit) { 2602 drm_dbg_kms(&i915->drm, 2603 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 2604 crtc->base.base.id, crtc->base.name, 2605 pipe_mode->crtc_clock, clock_limit, 2606 str_yes_no(crtc_state->double_wide)); 2607 return -EINVAL; 2608 } 2609 2610 return 0; 2611 } 2612 2613 static bool intel_crtc_needs_wa_14015401596(struct intel_crtc_state *crtc_state) 2614 { 2615 struct intel_display *display = to_intel_display(crtc_state); 2616 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2617 2618 return intel_vrr_possible(crtc_state) && crtc_state->has_psr && 2619 adjusted_mode->crtc_vblank_start == adjusted_mode->crtc_vdisplay && 2620 IS_DISPLAY_VER(display, 13, 14); 2621 } 2622 2623 static int intel_crtc_compute_config(struct intel_atomic_state *state, 2624 struct intel_crtc *crtc) 2625 { 2626 struct intel_crtc_state *crtc_state = 2627 intel_atomic_get_new_crtc_state(state, crtc); 2628 struct drm_display_mode *adjusted_mode = 2629 &crtc_state->hw.adjusted_mode; 2630 int ret; 2631 2632 /* Wa_14015401596 */ 2633 if (intel_crtc_needs_wa_14015401596(crtc_state)) 2634 adjusted_mode->crtc_vblank_start += 1; 2635 2636 ret = intel_dpll_crtc_compute_clock(state, crtc); 2637 if (ret) 2638 return ret; 2639 2640 ret = intel_crtc_compute_pipe_src(crtc_state); 2641 if (ret) 2642 return ret; 2643 2644 ret = intel_crtc_compute_pipe_mode(crtc_state); 2645 if (ret) 2646 return ret; 2647 2648 intel_crtc_compute_pixel_rate(crtc_state); 2649 2650 if (crtc_state->has_pch_encoder) 2651 return ilk_fdi_compute_config(crtc, crtc_state); 2652 2653 return 0; 2654 } 2655 2656 static void 2657 intel_reduce_m_n_ratio(u32 *num, u32 *den) 2658 { 2659 while (*num > DATA_LINK_M_N_MASK || 2660 *den > DATA_LINK_M_N_MASK) { 2661 *num >>= 1; 2662 *den >>= 1; 2663 } 2664 } 2665 2666 static void compute_m_n(u32 *ret_m, u32 *ret_n, 2667 u32 m, u32 n, u32 constant_n) 2668 { 2669 if (constant_n) 2670 *ret_n = constant_n; 2671 else 2672 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2673 2674 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2675 intel_reduce_m_n_ratio(ret_m, ret_n); 2676 } 2677 2678 void 2679 intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, 2680 int pixel_clock, int link_clock, 2681 int bw_overhead, 2682 struct intel_link_m_n *m_n) 2683 { 2684 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); 2685 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, 2686 bw_overhead); 2687 u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); 2688 2689 /* 2690 * Windows/BIOS uses fixed M/N values always. Follow suit. 2691 * 2692 * Also several DP dongles in particular seem to be fussy 2693 * about too large link M/N values. Presumably the 20bit 2694 * value used by Windows/BIOS is acceptable to everyone. 2695 */ 2696 m_n->tu = 64; 2697 compute_m_n(&m_n->data_m, &m_n->data_n, 2698 data_m, data_n, 2699 0x8000000); 2700 2701 compute_m_n(&m_n->link_m, &m_n->link_n, 2702 pixel_clock, link_symbol_clock, 2703 0x80000); 2704 } 2705 2706 void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 2707 { 2708 /* 2709 * There may be no VBT; and if the BIOS enabled SSC we can 2710 * just keep using it to avoid unnecessary flicker. Whereas if the 2711 * BIOS isn't using it, don't assume it will work even if the VBT 2712 * indicates as much. 2713 */ 2714 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 2715 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 2716 PCH_DREF_CONTROL) & 2717 DREF_SSC1_ENABLE; 2718 2719 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2720 drm_dbg_kms(&dev_priv->drm, 2721 "SSC %s by BIOS, overriding VBT which says %s\n", 2722 str_enabled_disabled(bios_lvds_use_ssc), 2723 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); 2724 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; 2725 } 2726 } 2727 } 2728 2729 void intel_zero_m_n(struct intel_link_m_n *m_n) 2730 { 2731 /* corresponds to 0 register value */ 2732 memset(m_n, 0, sizeof(*m_n)); 2733 m_n->tu = 1; 2734 } 2735 2736 void intel_set_m_n(struct drm_i915_private *i915, 2737 const struct intel_link_m_n *m_n, 2738 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 2739 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 2740 { 2741 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 2742 intel_de_write(i915, data_n_reg, m_n->data_n); 2743 intel_de_write(i915, link_m_reg, m_n->link_m); 2744 /* 2745 * On BDW+ writing LINK_N arms the double buffered update 2746 * of all the M/N registers, so it must be written last. 2747 */ 2748 intel_de_write(i915, link_n_reg, m_n->link_n); 2749 } 2750 2751 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 2752 enum transcoder transcoder) 2753 { 2754 if (IS_HASWELL(dev_priv)) 2755 return transcoder == TRANSCODER_EDP; 2756 2757 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); 2758 } 2759 2760 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 2761 enum transcoder transcoder, 2762 const struct intel_link_m_n *m_n) 2763 { 2764 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2765 enum pipe pipe = crtc->pipe; 2766 2767 if (DISPLAY_VER(dev_priv) >= 5) 2768 intel_set_m_n(dev_priv, m_n, 2769 PIPE_DATA_M1(dev_priv, transcoder), 2770 PIPE_DATA_N1(dev_priv, transcoder), 2771 PIPE_LINK_M1(dev_priv, transcoder), 2772 PIPE_LINK_N1(dev_priv, transcoder)); 2773 else 2774 intel_set_m_n(dev_priv, m_n, 2775 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 2776 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 2777 } 2778 2779 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 2780 enum transcoder transcoder, 2781 const struct intel_link_m_n *m_n) 2782 { 2783 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2784 2785 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 2786 return; 2787 2788 intel_set_m_n(dev_priv, m_n, 2789 PIPE_DATA_M2(dev_priv, transcoder), 2790 PIPE_DATA_N2(dev_priv, transcoder), 2791 PIPE_LINK_M2(dev_priv, transcoder), 2792 PIPE_LINK_N2(dev_priv, transcoder)); 2793 } 2794 2795 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 2796 { 2797 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2798 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2799 enum pipe pipe = crtc->pipe; 2800 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2801 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2802 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2803 int vsyncshift = 0; 2804 2805 /* We need to be careful not to changed the adjusted mode, for otherwise 2806 * the hw state checker will get angry at the mismatch. */ 2807 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2808 crtc_vtotal = adjusted_mode->crtc_vtotal; 2809 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2810 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2811 2812 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2813 /* the chip adds 2 halflines automatically */ 2814 crtc_vtotal -= 1; 2815 crtc_vblank_end -= 1; 2816 2817 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2818 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 2819 else 2820 vsyncshift = adjusted_mode->crtc_hsync_start - 2821 adjusted_mode->crtc_htotal / 2; 2822 if (vsyncshift < 0) 2823 vsyncshift += adjusted_mode->crtc_htotal; 2824 } 2825 2826 /* 2827 * VBLANK_START no longer works on ADL+, instead we must use 2828 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. 2829 */ 2830 if (DISPLAY_VER(dev_priv) >= 13) { 2831 intel_de_write(dev_priv, 2832 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder), 2833 crtc_vblank_start - crtc_vdisplay); 2834 2835 /* 2836 * VBLANK_START not used by hw, just clear it 2837 * to make it stand out in register dumps. 2838 */ 2839 crtc_vblank_start = 1; 2840 } 2841 2842 if (DISPLAY_VER(dev_priv) >= 4) 2843 intel_de_write(dev_priv, 2844 TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder), 2845 vsyncshift); 2846 2847 intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder), 2848 HACTIVE(adjusted_mode->crtc_hdisplay - 1) | 2849 HTOTAL(adjusted_mode->crtc_htotal - 1)); 2850 intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder), 2851 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | 2852 HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); 2853 intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder), 2854 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | 2855 HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); 2856 2857 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder), 2858 VACTIVE(crtc_vdisplay - 1) | 2859 VTOTAL(crtc_vtotal - 1)); 2860 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder), 2861 VBLANK_START(crtc_vblank_start - 1) | 2862 VBLANK_END(crtc_vblank_end - 1)); 2863 intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder), 2864 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | 2865 VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); 2866 2867 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2868 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 2869 * documented on the DDI_FUNC_CTL register description, EDP Input Select 2870 * bits. */ 2871 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 2872 (pipe == PIPE_B || pipe == PIPE_C)) 2873 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, pipe), 2874 VACTIVE(crtc_vdisplay - 1) | 2875 VTOTAL(crtc_vtotal - 1)); 2876 } 2877 2878 static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state) 2879 { 2880 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2881 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2882 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2883 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2884 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2885 2886 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2887 crtc_vtotal = adjusted_mode->crtc_vtotal; 2888 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2889 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2890 2891 drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE); 2892 2893 /* 2894 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode. 2895 * But let's write it anyway to keep the state checker happy. 2896 */ 2897 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder), 2898 VBLANK_START(crtc_vblank_start - 1) | 2899 VBLANK_END(crtc_vblank_end - 1)); 2900 /* 2901 * The double buffer latch point for TRANS_VTOTAL 2902 * is the transcoder's undelayed vblank. 2903 */ 2904 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder), 2905 VACTIVE(crtc_vdisplay - 1) | 2906 VTOTAL(crtc_vtotal - 1)); 2907 } 2908 2909 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 2910 { 2911 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2912 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2913 int width = drm_rect_width(&crtc_state->pipe_src); 2914 int height = drm_rect_height(&crtc_state->pipe_src); 2915 enum pipe pipe = crtc->pipe; 2916 2917 /* pipesrc controls the size that is scaled from, which should 2918 * always be the user's requested size. 2919 */ 2920 intel_de_write(dev_priv, PIPESRC(dev_priv, pipe), 2921 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2922 } 2923 2924 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 2925 { 2926 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2927 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2928 2929 if (DISPLAY_VER(dev_priv) == 2) 2930 return false; 2931 2932 if (DISPLAY_VER(dev_priv) >= 9 || 2933 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2934 return intel_de_read(dev_priv, 2935 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; 2936 else 2937 return intel_de_read(dev_priv, 2938 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; 2939 } 2940 2941 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 2942 struct intel_crtc_state *pipe_config) 2943 { 2944 struct drm_device *dev = crtc->base.dev; 2945 struct drm_i915_private *dev_priv = to_i915(dev); 2946 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2947 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2948 u32 tmp; 2949 2950 tmp = intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder)); 2951 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; 2952 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; 2953 2954 if (!transcoder_is_dsi(cpu_transcoder)) { 2955 tmp = intel_de_read(dev_priv, 2956 TRANS_HBLANK(dev_priv, cpu_transcoder)); 2957 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; 2958 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; 2959 } 2960 2961 tmp = intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder)); 2962 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; 2963 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; 2964 2965 tmp = intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder)); 2966 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; 2967 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; 2968 2969 /* FIXME TGL+ DSI transcoders have this! */ 2970 if (!transcoder_is_dsi(cpu_transcoder)) { 2971 tmp = intel_de_read(dev_priv, 2972 TRANS_VBLANK(dev_priv, cpu_transcoder)); 2973 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; 2974 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; 2975 } 2976 tmp = intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder)); 2977 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; 2978 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; 2979 2980 if (intel_pipe_is_interlaced(pipe_config)) { 2981 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; 2982 adjusted_mode->crtc_vtotal += 1; 2983 adjusted_mode->crtc_vblank_end += 1; 2984 } 2985 2986 if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) 2987 adjusted_mode->crtc_vblank_start = 2988 adjusted_mode->crtc_vdisplay + 2989 intel_de_read(dev_priv, 2990 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder)); 2991 } 2992 2993 static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) 2994 { 2995 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2996 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2997 enum pipe primary_pipe, pipe = crtc->pipe; 2998 int width; 2999 3000 if (num_pipes == 1) 3001 return; 3002 3003 primary_pipe = joiner_primary_pipe(crtc_state); 3004 width = drm_rect_width(&crtc_state->pipe_src); 3005 3006 drm_rect_translate_to(&crtc_state->pipe_src, 3007 (pipe - primary_pipe) * width, 0); 3008 } 3009 3010 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 3011 struct intel_crtc_state *pipe_config) 3012 { 3013 struct drm_device *dev = crtc->base.dev; 3014 struct drm_i915_private *dev_priv = to_i915(dev); 3015 u32 tmp; 3016 3017 tmp = intel_de_read(dev_priv, PIPESRC(dev_priv, crtc->pipe)); 3018 3019 drm_rect_init(&pipe_config->pipe_src, 0, 0, 3020 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, 3021 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); 3022 3023 intel_joiner_adjust_pipe_src(pipe_config); 3024 } 3025 3026 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 3027 { 3028 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3029 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3030 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3031 u32 val = 0; 3032 3033 /* 3034 * - We keep both pipes enabled on 830 3035 * - During modeset the pipe is still disabled and must remain so 3036 * - During fastset the pipe is already enabled and must remain so 3037 */ 3038 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) 3039 val |= TRANSCONF_ENABLE; 3040 3041 if (crtc_state->double_wide) 3042 val |= TRANSCONF_DOUBLE_WIDE; 3043 3044 /* only g4x and later have fancy bpc/dither controls */ 3045 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3046 IS_CHERRYVIEW(dev_priv)) { 3047 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 3048 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 3049 val |= TRANSCONF_DITHER_EN | 3050 TRANSCONF_DITHER_TYPE_SP; 3051 3052 switch (crtc_state->pipe_bpp) { 3053 default: 3054 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3055 MISSING_CASE(crtc_state->pipe_bpp); 3056 fallthrough; 3057 case 18: 3058 val |= TRANSCONF_BPC_6; 3059 break; 3060 case 24: 3061 val |= TRANSCONF_BPC_8; 3062 break; 3063 case 30: 3064 val |= TRANSCONF_BPC_10; 3065 break; 3066 } 3067 } 3068 3069 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 3070 if (DISPLAY_VER(dev_priv) < 4 || 3071 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3072 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; 3073 else 3074 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; 3075 } else { 3076 val |= TRANSCONF_INTERLACE_PROGRESSIVE; 3077 } 3078 3079 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3080 crtc_state->limited_color_range) 3081 val |= TRANSCONF_COLOR_RANGE_SELECT; 3082 3083 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3084 3085 if (crtc_state->wgc_enable) 3086 val |= TRANSCONF_WGC_ENABLE; 3087 3088 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3089 3090 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3091 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3092 } 3093 3094 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 3095 { 3096 if (IS_I830(dev_priv)) 3097 return false; 3098 3099 return DISPLAY_VER(dev_priv) >= 4 || 3100 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 3101 } 3102 3103 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 3104 { 3105 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3107 enum pipe pipe; 3108 u32 tmp; 3109 3110 if (!i9xx_has_pfit(dev_priv)) 3111 return; 3112 3113 tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)); 3114 if (!(tmp & PFIT_ENABLE)) 3115 return; 3116 3117 /* Check whether the pfit is attached to our pipe. */ 3118 if (DISPLAY_VER(dev_priv) >= 4) 3119 pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp); 3120 else 3121 pipe = PIPE_B; 3122 3123 if (pipe != crtc->pipe) 3124 return; 3125 3126 crtc_state->gmch_pfit.control = tmp; 3127 crtc_state->gmch_pfit.pgm_ratios = 3128 intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv)); 3129 } 3130 3131 static enum intel_output_format 3132 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) 3133 { 3134 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3135 u32 tmp; 3136 3137 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3138 3139 if (tmp & PIPE_MISC_YUV420_ENABLE) { 3140 /* 3141 * We support 4:2:0 in full blend mode only. 3142 * For xe3_lpd+ this is implied in YUV420 Enable bit. 3143 * Ensure the same for prior platforms in YUV420 Mode bit. 3144 */ 3145 if (DISPLAY_VER(dev_priv) < 30) 3146 drm_WARN_ON(&dev_priv->drm, 3147 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); 3148 3149 return INTEL_OUTPUT_FORMAT_YCBCR420; 3150 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { 3151 return INTEL_OUTPUT_FORMAT_YCBCR444; 3152 } else { 3153 return INTEL_OUTPUT_FORMAT_RGB; 3154 } 3155 } 3156 3157 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 3158 struct intel_crtc_state *pipe_config) 3159 { 3160 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3161 enum intel_display_power_domain power_domain; 3162 intel_wakeref_t wakeref; 3163 u32 tmp; 3164 bool ret; 3165 3166 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3167 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3168 if (!wakeref) 3169 return false; 3170 3171 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3172 pipe_config->sink_format = pipe_config->output_format; 3173 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3174 pipe_config->shared_dpll = NULL; 3175 3176 ret = false; 3177 3178 tmp = intel_de_read(dev_priv, 3179 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 3180 if (!(tmp & TRANSCONF_ENABLE)) 3181 goto out; 3182 3183 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3184 IS_CHERRYVIEW(dev_priv)) { 3185 switch (tmp & TRANSCONF_BPC_MASK) { 3186 case TRANSCONF_BPC_6: 3187 pipe_config->pipe_bpp = 18; 3188 break; 3189 case TRANSCONF_BPC_8: 3190 pipe_config->pipe_bpp = 24; 3191 break; 3192 case TRANSCONF_BPC_10: 3193 pipe_config->pipe_bpp = 30; 3194 break; 3195 default: 3196 MISSING_CASE(tmp); 3197 break; 3198 } 3199 } 3200 3201 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3202 (tmp & TRANSCONF_COLOR_RANGE_SELECT)) 3203 pipe_config->limited_color_range = true; 3204 3205 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); 3206 3207 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3208 3209 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3210 (tmp & TRANSCONF_WGC_ENABLE)) 3211 pipe_config->wgc_enable = true; 3212 3213 intel_color_get_config(pipe_config); 3214 3215 if (HAS_DOUBLE_WIDE(dev_priv)) 3216 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; 3217 3218 intel_get_transcoder_timings(crtc, pipe_config); 3219 intel_get_pipe_src_size(crtc, pipe_config); 3220 3221 i9xx_get_pfit_config(pipe_config); 3222 3223 i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state); 3224 3225 if (DISPLAY_VER(dev_priv) >= 4) { 3226 tmp = pipe_config->dpll_hw_state.i9xx.dpll_md; 3227 pipe_config->pixel_multiplier = 3228 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 3229 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 3230 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 3231 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 3232 tmp = pipe_config->dpll_hw_state.i9xx.dpll; 3233 pipe_config->pixel_multiplier = 3234 ((tmp & SDVO_MULTIPLIER_MASK) 3235 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 3236 } else { 3237 /* Note that on i915G/GM the pixel multiplier is in the sdvo 3238 * port and will be fixed up in the encoder->get_config 3239 * function. */ 3240 pipe_config->pixel_multiplier = 1; 3241 } 3242 3243 if (IS_CHERRYVIEW(dev_priv)) 3244 chv_crtc_clock_get(pipe_config); 3245 else if (IS_VALLEYVIEW(dev_priv)) 3246 vlv_crtc_clock_get(pipe_config); 3247 else 3248 i9xx_crtc_clock_get(pipe_config); 3249 3250 /* 3251 * Normally the dotclock is filled in by the encoder .get_config() 3252 * but in case the pipe is enabled w/o any ports we need a sane 3253 * default. 3254 */ 3255 pipe_config->hw.adjusted_mode.crtc_clock = 3256 pipe_config->port_clock / pipe_config->pixel_multiplier; 3257 3258 ret = true; 3259 3260 out: 3261 intel_display_power_put(dev_priv, power_domain, wakeref); 3262 3263 return ret; 3264 } 3265 3266 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3267 { 3268 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3269 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3270 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3271 u32 val = 0; 3272 3273 /* 3274 * - During modeset the pipe is still disabled and must remain so 3275 * - During fastset the pipe is already enabled and must remain so 3276 */ 3277 if (!intel_crtc_needs_modeset(crtc_state)) 3278 val |= TRANSCONF_ENABLE; 3279 3280 switch (crtc_state->pipe_bpp) { 3281 default: 3282 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3283 MISSING_CASE(crtc_state->pipe_bpp); 3284 fallthrough; 3285 case 18: 3286 val |= TRANSCONF_BPC_6; 3287 break; 3288 case 24: 3289 val |= TRANSCONF_BPC_8; 3290 break; 3291 case 30: 3292 val |= TRANSCONF_BPC_10; 3293 break; 3294 case 36: 3295 val |= TRANSCONF_BPC_12; 3296 break; 3297 } 3298 3299 if (crtc_state->dither) 3300 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3301 3302 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3303 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3304 else 3305 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3306 3307 /* 3308 * This would end up with an odd purple hue over 3309 * the entire display. Make sure we don't do it. 3310 */ 3311 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 3312 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3313 3314 if (crtc_state->limited_color_range && 3315 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3316 val |= TRANSCONF_COLOR_RANGE_SELECT; 3317 3318 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3319 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; 3320 3321 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3322 3323 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3324 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3325 3326 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3327 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3328 } 3329 3330 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3331 { 3332 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3333 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3334 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3335 u32 val = 0; 3336 3337 /* 3338 * - During modeset the pipe is still disabled and must remain so 3339 * - During fastset the pipe is already enabled and must remain so 3340 */ 3341 if (!intel_crtc_needs_modeset(crtc_state)) 3342 val |= TRANSCONF_ENABLE; 3343 3344 if (IS_HASWELL(dev_priv) && crtc_state->dither) 3345 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3346 3347 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3348 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3349 else 3350 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3351 3352 if (IS_HASWELL(dev_priv) && 3353 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3354 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; 3355 3356 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3357 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3358 } 3359 3360 static void bdw_set_pipe_misc(struct intel_dsb *dsb, 3361 const struct intel_crtc_state *crtc_state) 3362 { 3363 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3364 struct intel_display *display = to_intel_display(crtc->base.dev); 3365 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3366 u32 val = 0; 3367 3368 switch (crtc_state->pipe_bpp) { 3369 case 18: 3370 val |= PIPE_MISC_BPC_6; 3371 break; 3372 case 24: 3373 val |= PIPE_MISC_BPC_8; 3374 break; 3375 case 30: 3376 val |= PIPE_MISC_BPC_10; 3377 break; 3378 case 36: 3379 /* Port output 12BPC defined for ADLP+ */ 3380 if (DISPLAY_VER(dev_priv) >= 13) 3381 val |= PIPE_MISC_BPC_12_ADLP; 3382 break; 3383 default: 3384 MISSING_CASE(crtc_state->pipe_bpp); 3385 break; 3386 } 3387 3388 if (crtc_state->dither) 3389 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; 3390 3391 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3392 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3393 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; 3394 3395 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3396 val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE : 3397 PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND; 3398 3399 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) 3400 val |= PIPE_MISC_HDR_MODE_PRECISION; 3401 3402 if (DISPLAY_VER(dev_priv) >= 12) 3403 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; 3404 3405 /* allow PSR with sprite enabled */ 3406 if (IS_BROADWELL(dev_priv)) 3407 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; 3408 3409 intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val); 3410 } 3411 3412 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) 3413 { 3414 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3415 u32 tmp; 3416 3417 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3418 3419 switch (tmp & PIPE_MISC_BPC_MASK) { 3420 case PIPE_MISC_BPC_6: 3421 return 18; 3422 case PIPE_MISC_BPC_8: 3423 return 24; 3424 case PIPE_MISC_BPC_10: 3425 return 30; 3426 /* 3427 * PORT OUTPUT 12 BPC defined for ADLP+. 3428 * 3429 * TODO: 3430 * For previous platforms with DSI interface, bits 5:7 3431 * are used for storing pipe_bpp irrespective of dithering. 3432 * Since the value of 12 BPC is not defined for these bits 3433 * on older platforms, need to find a workaround for 12 BPC 3434 * MIPI DSI HW readout. 3435 */ 3436 case PIPE_MISC_BPC_12_ADLP: 3437 if (DISPLAY_VER(dev_priv) >= 13) 3438 return 36; 3439 fallthrough; 3440 default: 3441 MISSING_CASE(tmp); 3442 return 0; 3443 } 3444 } 3445 3446 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3447 { 3448 /* 3449 * Account for spread spectrum to avoid 3450 * oversubscribing the link. Max center spread 3451 * is 2.5%; use 5% for safety's sake. 3452 */ 3453 u32 bps = target_clock * bpp * 21 / 20; 3454 return DIV_ROUND_UP(bps, link_bw * 8); 3455 } 3456 3457 void intel_get_m_n(struct drm_i915_private *i915, 3458 struct intel_link_m_n *m_n, 3459 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 3460 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3461 { 3462 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; 3463 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; 3464 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; 3465 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; 3466 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; 3467 } 3468 3469 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3470 enum transcoder transcoder, 3471 struct intel_link_m_n *m_n) 3472 { 3473 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3474 enum pipe pipe = crtc->pipe; 3475 3476 if (DISPLAY_VER(dev_priv) >= 5) 3477 intel_get_m_n(dev_priv, m_n, 3478 PIPE_DATA_M1(dev_priv, transcoder), 3479 PIPE_DATA_N1(dev_priv, transcoder), 3480 PIPE_LINK_M1(dev_priv, transcoder), 3481 PIPE_LINK_N1(dev_priv, transcoder)); 3482 else 3483 intel_get_m_n(dev_priv, m_n, 3484 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 3485 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3486 } 3487 3488 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 3489 enum transcoder transcoder, 3490 struct intel_link_m_n *m_n) 3491 { 3492 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3493 3494 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 3495 return; 3496 3497 intel_get_m_n(dev_priv, m_n, 3498 PIPE_DATA_M2(dev_priv, transcoder), 3499 PIPE_DATA_N2(dev_priv, transcoder), 3500 PIPE_LINK_M2(dev_priv, transcoder), 3501 PIPE_LINK_N2(dev_priv, transcoder)); 3502 } 3503 3504 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 3505 { 3506 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3507 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3508 u32 ctl, pos, size; 3509 enum pipe pipe; 3510 3511 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 3512 if ((ctl & PF_ENABLE) == 0) 3513 return; 3514 3515 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 3516 pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl); 3517 else 3518 pipe = crtc->pipe; 3519 3520 crtc_state->pch_pfit.enabled = true; 3521 3522 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 3523 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 3524 3525 drm_rect_init(&crtc_state->pch_pfit.dst, 3526 REG_FIELD_GET(PF_WIN_XPOS_MASK, pos), 3527 REG_FIELD_GET(PF_WIN_YPOS_MASK, pos), 3528 REG_FIELD_GET(PF_WIN_XSIZE_MASK, size), 3529 REG_FIELD_GET(PF_WIN_YSIZE_MASK, size)); 3530 3531 /* 3532 * We currently do not free assignements of panel fitters on 3533 * ivb/hsw (since we don't use the higher upscaling modes which 3534 * differentiates them) so just WARN about this case for now. 3535 */ 3536 drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe); 3537 } 3538 3539 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3540 struct intel_crtc_state *pipe_config) 3541 { 3542 struct drm_device *dev = crtc->base.dev; 3543 struct drm_i915_private *dev_priv = to_i915(dev); 3544 enum intel_display_power_domain power_domain; 3545 intel_wakeref_t wakeref; 3546 u32 tmp; 3547 bool ret; 3548 3549 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3550 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3551 if (!wakeref) 3552 return false; 3553 3554 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3555 pipe_config->shared_dpll = NULL; 3556 3557 ret = false; 3558 tmp = intel_de_read(dev_priv, 3559 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 3560 if (!(tmp & TRANSCONF_ENABLE)) 3561 goto out; 3562 3563 switch (tmp & TRANSCONF_BPC_MASK) { 3564 case TRANSCONF_BPC_6: 3565 pipe_config->pipe_bpp = 18; 3566 break; 3567 case TRANSCONF_BPC_8: 3568 pipe_config->pipe_bpp = 24; 3569 break; 3570 case TRANSCONF_BPC_10: 3571 pipe_config->pipe_bpp = 30; 3572 break; 3573 case TRANSCONF_BPC_12: 3574 pipe_config->pipe_bpp = 36; 3575 break; 3576 default: 3577 break; 3578 } 3579 3580 if (tmp & TRANSCONF_COLOR_RANGE_SELECT) 3581 pipe_config->limited_color_range = true; 3582 3583 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { 3584 case TRANSCONF_OUTPUT_COLORSPACE_YUV601: 3585 case TRANSCONF_OUTPUT_COLORSPACE_YUV709: 3586 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3587 break; 3588 default: 3589 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3590 break; 3591 } 3592 3593 pipe_config->sink_format = pipe_config->output_format; 3594 3595 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); 3596 3597 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3598 3599 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); 3600 3601 intel_color_get_config(pipe_config); 3602 3603 pipe_config->pixel_multiplier = 1; 3604 3605 ilk_pch_get_config(pipe_config); 3606 3607 intel_get_transcoder_timings(crtc, pipe_config); 3608 intel_get_pipe_src_size(crtc, pipe_config); 3609 3610 ilk_get_pfit_config(pipe_config); 3611 3612 ret = true; 3613 3614 out: 3615 intel_display_power_put(dev_priv, power_domain, wakeref); 3616 3617 return ret; 3618 } 3619 3620 static u8 joiner_pipes(struct drm_i915_private *i915) 3621 { 3622 u8 pipes; 3623 3624 if (DISPLAY_VER(i915) >= 12) 3625 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3626 else if (DISPLAY_VER(i915) >= 11) 3627 pipes = BIT(PIPE_B) | BIT(PIPE_C); 3628 else 3629 pipes = 0; 3630 3631 return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask; 3632 } 3633 3634 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, 3635 enum transcoder cpu_transcoder) 3636 { 3637 enum intel_display_power_domain power_domain; 3638 intel_wakeref_t wakeref; 3639 u32 tmp = 0; 3640 3641 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3642 3643 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3644 tmp = intel_de_read(dev_priv, 3645 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); 3646 3647 return tmp & TRANS_DDI_FUNC_ENABLE; 3648 } 3649 3650 static void enabled_uncompressed_joiner_pipes(struct intel_display *display, 3651 u8 *primary_pipes, u8 *secondary_pipes) 3652 { 3653 struct drm_i915_private *i915 = to_i915(display->drm); 3654 struct intel_crtc *crtc; 3655 3656 *primary_pipes = 0; 3657 *secondary_pipes = 0; 3658 3659 if (!HAS_UNCOMPRESSED_JOINER(display)) 3660 return; 3661 3662 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 3663 joiner_pipes(i915)) { 3664 enum intel_display_power_domain power_domain; 3665 enum pipe pipe = crtc->pipe; 3666 intel_wakeref_t wakeref; 3667 3668 power_domain = POWER_DOMAIN_PIPE(pipe); 3669 with_intel_display_power_if_enabled(i915, power_domain, wakeref) { 3670 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3671 3672 if (tmp & UNCOMPRESSED_JOINER_PRIMARY) 3673 *primary_pipes |= BIT(pipe); 3674 if (tmp & UNCOMPRESSED_JOINER_SECONDARY) 3675 *secondary_pipes |= BIT(pipe); 3676 } 3677 } 3678 } 3679 3680 static void enabled_bigjoiner_pipes(struct intel_display *display, 3681 u8 *primary_pipes, u8 *secondary_pipes) 3682 { 3683 struct drm_i915_private *i915 = to_i915(display->drm); 3684 struct intel_crtc *crtc; 3685 3686 *primary_pipes = 0; 3687 *secondary_pipes = 0; 3688 3689 if (!HAS_BIGJOINER(display)) 3690 return; 3691 3692 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 3693 joiner_pipes(i915)) { 3694 enum intel_display_power_domain power_domain; 3695 enum pipe pipe = crtc->pipe; 3696 intel_wakeref_t wakeref; 3697 3698 power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); 3699 with_intel_display_power_if_enabled(i915, power_domain, wakeref) { 3700 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3701 3702 if (!(tmp & BIG_JOINER_ENABLE)) 3703 continue; 3704 3705 if (tmp & PRIMARY_BIG_JOINER_ENABLE) 3706 *primary_pipes |= BIT(pipe); 3707 else 3708 *secondary_pipes |= BIT(pipe); 3709 } 3710 } 3711 } 3712 3713 static u8 expected_secondary_pipes(u8 primary_pipes, int num_pipes) 3714 { 3715 u8 secondary_pipes = 0; 3716 3717 for (int i = 1; i < num_pipes; i++) 3718 secondary_pipes |= primary_pipes << i; 3719 3720 return secondary_pipes; 3721 } 3722 3723 static u8 expected_uncompressed_joiner_secondary_pipes(u8 uncompjoiner_primary_pipes) 3724 { 3725 return expected_secondary_pipes(uncompjoiner_primary_pipes, 2); 3726 } 3727 3728 static u8 expected_bigjoiner_secondary_pipes(u8 bigjoiner_primary_pipes) 3729 { 3730 return expected_secondary_pipes(bigjoiner_primary_pipes, 2); 3731 } 3732 3733 static u8 get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes) 3734 { 3735 primary_pipes &= GENMASK(pipe, 0); 3736 3737 return primary_pipes ? BIT(fls(primary_pipes) - 1) : 0; 3738 } 3739 3740 static u8 expected_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes) 3741 { 3742 return expected_secondary_pipes(ultrajoiner_primary_pipes, 4); 3743 } 3744 3745 static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes, 3746 u8 ultrajoiner_secondary_pipes) 3747 { 3748 return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3; 3749 } 3750 3751 static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915, 3752 u8 *primary_pipes, u8 *secondary_pipes) 3753 { 3754 struct intel_display *display = &i915->display; 3755 struct intel_crtc *crtc; 3756 3757 *primary_pipes = 0; 3758 *secondary_pipes = 0; 3759 3760 if (!HAS_ULTRAJOINER(display)) 3761 return; 3762 3763 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 3764 joiner_pipes(i915)) { 3765 enum intel_display_power_domain power_domain; 3766 enum pipe pipe = crtc->pipe; 3767 intel_wakeref_t wakeref; 3768 3769 power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); 3770 with_intel_display_power_if_enabled(i915, power_domain, wakeref) { 3771 u32 tmp = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe)); 3772 3773 if (!(tmp & ULTRA_JOINER_ENABLE)) 3774 continue; 3775 3776 if (tmp & PRIMARY_ULTRA_JOINER_ENABLE) 3777 *primary_pipes |= BIT(pipe); 3778 else 3779 *secondary_pipes |= BIT(pipe); 3780 } 3781 } 3782 } 3783 3784 static void enabled_joiner_pipes(struct drm_i915_private *dev_priv, 3785 enum pipe pipe, 3786 u8 *primary_pipe, u8 *secondary_pipes) 3787 { 3788 struct intel_display *display = to_intel_display(&dev_priv->drm); 3789 u8 primary_ultrajoiner_pipes; 3790 u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes; 3791 u8 secondary_ultrajoiner_pipes; 3792 u8 secondary_uncompressed_joiner_pipes, secondary_bigjoiner_pipes; 3793 u8 ultrajoiner_pipes; 3794 u8 uncompressed_joiner_pipes, bigjoiner_pipes; 3795 3796 enabled_ultrajoiner_pipes(dev_priv, &primary_ultrajoiner_pipes, 3797 &secondary_ultrajoiner_pipes); 3798 /* 3799 * For some strange reason the last pipe in the set of four 3800 * shouldn't have ultrajoiner enable bit set in hardware. 3801 * Set the bit anyway to make life easier. 3802 */ 3803 drm_WARN_ON(&dev_priv->drm, 3804 expected_secondary_pipes(primary_ultrajoiner_pipes, 3) != 3805 secondary_ultrajoiner_pipes); 3806 secondary_ultrajoiner_pipes = 3807 fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes, 3808 secondary_ultrajoiner_pipes); 3809 3810 drm_WARN_ON(&dev_priv->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0); 3811 3812 enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes, 3813 &secondary_uncompressed_joiner_pipes); 3814 3815 drm_WARN_ON(display->drm, 3816 (primary_uncompressed_joiner_pipes & secondary_uncompressed_joiner_pipes) != 0); 3817 3818 enabled_bigjoiner_pipes(display, &primary_bigjoiner_pipes, 3819 &secondary_bigjoiner_pipes); 3820 3821 drm_WARN_ON(display->drm, 3822 (primary_bigjoiner_pipes & secondary_bigjoiner_pipes) != 0); 3823 3824 ultrajoiner_pipes = primary_ultrajoiner_pipes | secondary_ultrajoiner_pipes; 3825 uncompressed_joiner_pipes = primary_uncompressed_joiner_pipes | 3826 secondary_uncompressed_joiner_pipes; 3827 bigjoiner_pipes = primary_bigjoiner_pipes | secondary_bigjoiner_pipes; 3828 3829 drm_WARN(display->drm, (ultrajoiner_pipes & bigjoiner_pipes) != ultrajoiner_pipes, 3830 "Ultrajoiner pipes(%#x) should be bigjoiner pipes(%#x)\n", 3831 ultrajoiner_pipes, bigjoiner_pipes); 3832 3833 drm_WARN(display->drm, secondary_ultrajoiner_pipes != 3834 expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), 3835 "Wrong secondary ultrajoiner pipes(expected %#x, current %#x)\n", 3836 expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), 3837 secondary_ultrajoiner_pipes); 3838 3839 drm_WARN(display->drm, (uncompressed_joiner_pipes & bigjoiner_pipes) != 0, 3840 "Uncompressed joiner pipes(%#x) and bigjoiner pipes(%#x) can't intersect\n", 3841 uncompressed_joiner_pipes, bigjoiner_pipes); 3842 3843 drm_WARN(display->drm, secondary_bigjoiner_pipes != 3844 expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), 3845 "Wrong secondary bigjoiner pipes(expected %#x, current %#x)\n", 3846 expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), 3847 secondary_bigjoiner_pipes); 3848 3849 drm_WARN(display->drm, secondary_uncompressed_joiner_pipes != 3850 expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), 3851 "Wrong secondary uncompressed joiner pipes(expected %#x, current %#x)\n", 3852 expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), 3853 secondary_uncompressed_joiner_pipes); 3854 3855 *primary_pipe = 0; 3856 *secondary_pipes = 0; 3857 3858 if (ultrajoiner_pipes & BIT(pipe)) { 3859 *primary_pipe = get_joiner_primary_pipe(pipe, primary_ultrajoiner_pipes); 3860 *secondary_pipes = secondary_ultrajoiner_pipes & 3861 expected_ultrajoiner_secondary_pipes(*primary_pipe); 3862 3863 drm_WARN(display->drm, 3864 expected_ultrajoiner_secondary_pipes(*primary_pipe) != 3865 *secondary_pipes, 3866 "Wrong ultrajoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3867 *primary_pipe, 3868 expected_ultrajoiner_secondary_pipes(*primary_pipe), 3869 *secondary_pipes); 3870 return; 3871 } 3872 3873 if (uncompressed_joiner_pipes & BIT(pipe)) { 3874 *primary_pipe = get_joiner_primary_pipe(pipe, primary_uncompressed_joiner_pipes); 3875 *secondary_pipes = secondary_uncompressed_joiner_pipes & 3876 expected_uncompressed_joiner_secondary_pipes(*primary_pipe); 3877 3878 drm_WARN(display->drm, 3879 expected_uncompressed_joiner_secondary_pipes(*primary_pipe) != 3880 *secondary_pipes, 3881 "Wrong uncompressed joiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3882 *primary_pipe, 3883 expected_uncompressed_joiner_secondary_pipes(*primary_pipe), 3884 *secondary_pipes); 3885 return; 3886 } 3887 3888 if (bigjoiner_pipes & BIT(pipe)) { 3889 *primary_pipe = get_joiner_primary_pipe(pipe, primary_bigjoiner_pipes); 3890 *secondary_pipes = secondary_bigjoiner_pipes & 3891 expected_bigjoiner_secondary_pipes(*primary_pipe); 3892 3893 drm_WARN(display->drm, 3894 expected_bigjoiner_secondary_pipes(*primary_pipe) != 3895 *secondary_pipes, 3896 "Wrong bigjoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3897 *primary_pipe, 3898 expected_bigjoiner_secondary_pipes(*primary_pipe), 3899 *secondary_pipes); 3900 return; 3901 } 3902 } 3903 3904 static u8 hsw_panel_transcoders(struct drm_i915_private *i915) 3905 { 3906 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 3907 3908 if (DISPLAY_VER(i915) >= 11) 3909 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 3910 3911 return panel_transcoder_mask; 3912 } 3913 3914 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3915 { 3916 struct drm_device *dev = crtc->base.dev; 3917 struct drm_i915_private *dev_priv = to_i915(dev); 3918 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); 3919 enum transcoder cpu_transcoder; 3920 u8 primary_pipe, secondary_pipes; 3921 u8 enabled_transcoders = 0; 3922 3923 /* 3924 * XXX: Do intel_display_power_get_if_enabled before reading this (for 3925 * consistency and less surprising code; it's in always on power). 3926 */ 3927 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, 3928 panel_transcoder_mask) { 3929 enum intel_display_power_domain power_domain; 3930 intel_wakeref_t wakeref; 3931 enum pipe trans_pipe; 3932 u32 tmp = 0; 3933 3934 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3935 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3936 tmp = intel_de_read(dev_priv, 3937 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); 3938 3939 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 3940 continue; 3941 3942 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 3943 default: 3944 drm_WARN(dev, 1, 3945 "unknown pipe linked to transcoder %s\n", 3946 transcoder_name(cpu_transcoder)); 3947 fallthrough; 3948 case TRANS_DDI_EDP_INPUT_A_ONOFF: 3949 case TRANS_DDI_EDP_INPUT_A_ON: 3950 trans_pipe = PIPE_A; 3951 break; 3952 case TRANS_DDI_EDP_INPUT_B_ONOFF: 3953 trans_pipe = PIPE_B; 3954 break; 3955 case TRANS_DDI_EDP_INPUT_C_ONOFF: 3956 trans_pipe = PIPE_C; 3957 break; 3958 case TRANS_DDI_EDP_INPUT_D_ONOFF: 3959 trans_pipe = PIPE_D; 3960 break; 3961 } 3962 3963 if (trans_pipe == crtc->pipe) 3964 enabled_transcoders |= BIT(cpu_transcoder); 3965 } 3966 3967 /* single pipe or joiner primary */ 3968 cpu_transcoder = (enum transcoder) crtc->pipe; 3969 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3970 enabled_transcoders |= BIT(cpu_transcoder); 3971 3972 /* joiner secondary -> consider the primary pipe's transcoder as well */ 3973 enabled_joiner_pipes(dev_priv, crtc->pipe, &primary_pipe, &secondary_pipes); 3974 if (secondary_pipes & BIT(crtc->pipe)) { 3975 cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1; 3976 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3977 enabled_transcoders |= BIT(cpu_transcoder); 3978 } 3979 3980 return enabled_transcoders; 3981 } 3982 3983 static bool has_edp_transcoders(u8 enabled_transcoders) 3984 { 3985 return enabled_transcoders & BIT(TRANSCODER_EDP); 3986 } 3987 3988 static bool has_dsi_transcoders(u8 enabled_transcoders) 3989 { 3990 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 3991 BIT(TRANSCODER_DSI_1)); 3992 } 3993 3994 static bool has_pipe_transcoders(u8 enabled_transcoders) 3995 { 3996 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 3997 BIT(TRANSCODER_DSI_0) | 3998 BIT(TRANSCODER_DSI_1)); 3999 } 4000 4001 static void assert_enabled_transcoders(struct drm_i915_private *i915, 4002 u8 enabled_transcoders) 4003 { 4004 /* Only one type of transcoder please */ 4005 drm_WARN_ON(&i915->drm, 4006 has_edp_transcoders(enabled_transcoders) + 4007 has_dsi_transcoders(enabled_transcoders) + 4008 has_pipe_transcoders(enabled_transcoders) > 1); 4009 4010 /* Only DSI transcoders can be ganged */ 4011 drm_WARN_ON(&i915->drm, 4012 !has_dsi_transcoders(enabled_transcoders) && 4013 !is_power_of_2(enabled_transcoders)); 4014 } 4015 4016 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 4017 struct intel_crtc_state *pipe_config, 4018 struct intel_display_power_domain_set *power_domain_set) 4019 { 4020 struct drm_device *dev = crtc->base.dev; 4021 struct drm_i915_private *dev_priv = to_i915(dev); 4022 unsigned long enabled_transcoders; 4023 u32 tmp; 4024 4025 enabled_transcoders = hsw_enabled_transcoders(crtc); 4026 if (!enabled_transcoders) 4027 return false; 4028 4029 assert_enabled_transcoders(dev_priv, enabled_transcoders); 4030 4031 /* 4032 * With the exception of DSI we should only ever have 4033 * a single enabled transcoder. With DSI let's just 4034 * pick the first one. 4035 */ 4036 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 4037 4038 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 4039 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 4040 return false; 4041 4042 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { 4043 tmp = intel_de_read(dev_priv, 4044 TRANS_DDI_FUNC_CTL(dev_priv, pipe_config->cpu_transcoder)); 4045 4046 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 4047 pipe_config->pch_pfit.force_thru = true; 4048 } 4049 4050 tmp = intel_de_read(dev_priv, 4051 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 4052 4053 return tmp & TRANSCONF_ENABLE; 4054 } 4055 4056 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 4057 struct intel_crtc_state *pipe_config, 4058 struct intel_display_power_domain_set *power_domain_set) 4059 { 4060 struct intel_display *display = to_intel_display(crtc); 4061 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4062 enum transcoder cpu_transcoder; 4063 enum port port; 4064 u32 tmp; 4065 4066 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 4067 if (port == PORT_A) 4068 cpu_transcoder = TRANSCODER_DSI_A; 4069 else 4070 cpu_transcoder = TRANSCODER_DSI_C; 4071 4072 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 4073 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 4074 continue; 4075 4076 /* 4077 * The PLL needs to be enabled with a valid divider 4078 * configuration, otherwise accessing DSI registers will hang 4079 * the machine. See BSpec North Display Engine 4080 * registers/MIPI[BXT]. We can break out here early, since we 4081 * need the same DSI PLL to be enabled for both DSI ports. 4082 */ 4083 if (!bxt_dsi_pll_is_enabled(dev_priv)) 4084 break; 4085 4086 /* XXX: this works for video mode only */ 4087 tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port)); 4088 if (!(tmp & DPI_ENABLE)) 4089 continue; 4090 4091 tmp = intel_de_read(display, MIPI_CTRL(display, port)); 4092 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 4093 continue; 4094 4095 pipe_config->cpu_transcoder = cpu_transcoder; 4096 break; 4097 } 4098 4099 return transcoder_is_dsi(pipe_config->cpu_transcoder); 4100 } 4101 4102 static void intel_joiner_get_config(struct intel_crtc_state *crtc_state) 4103 { 4104 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4105 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4106 u8 primary_pipe, secondary_pipes; 4107 enum pipe pipe = crtc->pipe; 4108 4109 enabled_joiner_pipes(i915, pipe, &primary_pipe, &secondary_pipes); 4110 4111 if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0) 4112 return; 4113 4114 crtc_state->joiner_pipes = primary_pipe | secondary_pipes; 4115 } 4116 4117 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 4118 struct intel_crtc_state *pipe_config) 4119 { 4120 struct intel_display *display = to_intel_display(crtc); 4121 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4122 bool active; 4123 u32 tmp; 4124 4125 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 4126 POWER_DOMAIN_PIPE(crtc->pipe))) 4127 return false; 4128 4129 pipe_config->shared_dpll = NULL; 4130 4131 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains); 4132 4133 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4134 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) { 4135 drm_WARN_ON(&dev_priv->drm, active); 4136 active = true; 4137 } 4138 4139 if (!active) 4140 goto out; 4141 4142 intel_joiner_get_config(pipe_config); 4143 intel_dsc_get_config(pipe_config); 4144 4145 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 4146 DISPLAY_VER(dev_priv) >= 11) 4147 intel_get_transcoder_timings(crtc, pipe_config); 4148 4149 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 4150 intel_vrr_get_config(pipe_config); 4151 4152 intel_get_pipe_src_size(crtc, pipe_config); 4153 4154 if (IS_HASWELL(dev_priv)) { 4155 u32 tmp = intel_de_read(dev_priv, 4156 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 4157 4158 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) 4159 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 4160 else 4161 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 4162 } else { 4163 pipe_config->output_format = 4164 bdw_get_pipe_misc_output_format(crtc); 4165 } 4166 4167 pipe_config->sink_format = pipe_config->output_format; 4168 4169 intel_color_get_config(pipe_config); 4170 4171 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 4172 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 4173 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 4174 pipe_config->ips_linetime = 4175 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 4176 4177 if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 4178 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 4179 if (DISPLAY_VER(dev_priv) >= 9) 4180 skl_scaler_get_config(pipe_config); 4181 else 4182 ilk_get_pfit_config(pipe_config); 4183 } 4184 4185 hsw_ips_get_config(pipe_config); 4186 4187 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 4188 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4189 pipe_config->pixel_multiplier = 4190 intel_de_read(dev_priv, 4191 TRANS_MULT(dev_priv, pipe_config->cpu_transcoder)) + 1; 4192 } else { 4193 pipe_config->pixel_multiplier = 1; 4194 } 4195 4196 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4197 tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder)); 4198 4199 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 4200 } else { 4201 /* no idea if this is correct */ 4202 pipe_config->framestart_delay = 1; 4203 } 4204 4205 out: 4206 intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains); 4207 4208 return active; 4209 } 4210 4211 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 4212 { 4213 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4214 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4215 4216 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state)) 4217 return false; 4218 4219 crtc_state->hw.active = true; 4220 4221 intel_crtc_readout_derived_state(crtc_state); 4222 4223 return true; 4224 } 4225 4226 int intel_dotclock_calculate(int link_freq, 4227 const struct intel_link_m_n *m_n) 4228 { 4229 /* 4230 * The calculation for the data clock -> pixel clock is: 4231 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 4232 * But we want to avoid losing precison if possible, so: 4233 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 4234 * 4235 * and for link freq (10kbs units) -> pixel clock it is: 4236 * link_symbol_clock = link_freq * 10 / link_symbol_size 4237 * pixel_clock = (m * link_symbol_clock) / n 4238 * or for more precision: 4239 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size) 4240 */ 4241 4242 if (!m_n->link_n) 4243 return 0; 4244 4245 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10), 4246 m_n->link_n * intel_dp_link_symbol_size(link_freq)); 4247 } 4248 4249 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) 4250 { 4251 int dotclock; 4252 4253 if (intel_crtc_has_dp_encoder(pipe_config)) 4254 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 4255 &pipe_config->dp_m_n); 4256 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) 4257 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, 4258 pipe_config->pipe_bpp); 4259 else 4260 dotclock = pipe_config->port_clock; 4261 4262 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 4263 !intel_crtc_has_dp_encoder(pipe_config)) 4264 dotclock *= 2; 4265 4266 if (pipe_config->pixel_multiplier) 4267 dotclock /= pipe_config->pixel_multiplier; 4268 4269 return dotclock; 4270 } 4271 4272 /* Returns the currently programmed mode of the given encoder. */ 4273 struct drm_display_mode * 4274 intel_encoder_current_mode(struct intel_encoder *encoder) 4275 { 4276 struct intel_display *display = to_intel_display(encoder); 4277 struct intel_crtc_state *crtc_state; 4278 struct drm_display_mode *mode; 4279 struct intel_crtc *crtc; 4280 enum pipe pipe; 4281 4282 if (!encoder->get_hw_state(encoder, &pipe)) 4283 return NULL; 4284 4285 crtc = intel_crtc_for_pipe(display, pipe); 4286 4287 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 4288 if (!mode) 4289 return NULL; 4290 4291 crtc_state = intel_crtc_state_alloc(crtc); 4292 if (!crtc_state) { 4293 kfree(mode); 4294 return NULL; 4295 } 4296 4297 if (!intel_crtc_get_pipe_config(crtc_state)) { 4298 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4299 kfree(mode); 4300 return NULL; 4301 } 4302 4303 intel_encoder_get_config(encoder, crtc_state); 4304 4305 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 4306 4307 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4308 4309 return mode; 4310 } 4311 4312 static bool encoders_cloneable(const struct intel_encoder *a, 4313 const struct intel_encoder *b) 4314 { 4315 /* masks could be asymmetric, so check both ways */ 4316 return a == b || (a->cloneable & BIT(b->type) && 4317 b->cloneable & BIT(a->type)); 4318 } 4319 4320 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 4321 struct intel_crtc *crtc, 4322 struct intel_encoder *encoder) 4323 { 4324 struct intel_encoder *source_encoder; 4325 struct drm_connector *connector; 4326 struct drm_connector_state *connector_state; 4327 int i; 4328 4329 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4330 if (connector_state->crtc != &crtc->base) 4331 continue; 4332 4333 source_encoder = 4334 to_intel_encoder(connector_state->best_encoder); 4335 if (!encoders_cloneable(encoder, source_encoder)) 4336 return false; 4337 } 4338 4339 return true; 4340 } 4341 4342 static int icl_add_linked_planes(struct intel_atomic_state *state) 4343 { 4344 struct intel_plane *plane, *linked; 4345 struct intel_plane_state *plane_state, *linked_plane_state; 4346 int i; 4347 4348 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4349 linked = plane_state->planar_linked_plane; 4350 4351 if (!linked) 4352 continue; 4353 4354 linked_plane_state = intel_atomic_get_plane_state(state, linked); 4355 if (IS_ERR(linked_plane_state)) 4356 return PTR_ERR(linked_plane_state); 4357 4358 drm_WARN_ON(state->base.dev, 4359 linked_plane_state->planar_linked_plane != plane); 4360 drm_WARN_ON(state->base.dev, 4361 linked_plane_state->planar_slave == plane_state->planar_slave); 4362 } 4363 4364 return 0; 4365 } 4366 4367 static int icl_check_nv12_planes(struct intel_atomic_state *state, 4368 struct intel_crtc *crtc) 4369 { 4370 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4371 struct intel_crtc_state *crtc_state = 4372 intel_atomic_get_new_crtc_state(state, crtc); 4373 struct intel_plane *plane, *linked; 4374 struct intel_plane_state *plane_state; 4375 int i; 4376 4377 if (DISPLAY_VER(dev_priv) < 11) 4378 return 0; 4379 4380 /* 4381 * Destroy all old plane links and make the slave plane invisible 4382 * in the crtc_state->active_planes mask. 4383 */ 4384 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4385 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 4386 continue; 4387 4388 plane_state->planar_linked_plane = NULL; 4389 if (plane_state->planar_slave && !plane_state->uapi.visible) { 4390 crtc_state->enabled_planes &= ~BIT(plane->id); 4391 crtc_state->active_planes &= ~BIT(plane->id); 4392 crtc_state->update_planes |= BIT(plane->id); 4393 crtc_state->data_rate[plane->id] = 0; 4394 crtc_state->rel_data_rate[plane->id] = 0; 4395 } 4396 4397 plane_state->planar_slave = false; 4398 } 4399 4400 if (!crtc_state->nv12_planes) 4401 return 0; 4402 4403 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4404 struct intel_plane_state *linked_state = NULL; 4405 4406 if (plane->pipe != crtc->pipe || 4407 !(crtc_state->nv12_planes & BIT(plane->id))) 4408 continue; 4409 4410 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 4411 if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 4412 continue; 4413 4414 if (crtc_state->active_planes & BIT(linked->id)) 4415 continue; 4416 4417 linked_state = intel_atomic_get_plane_state(state, linked); 4418 if (IS_ERR(linked_state)) 4419 return PTR_ERR(linked_state); 4420 4421 break; 4422 } 4423 4424 if (!linked_state) { 4425 drm_dbg_kms(&dev_priv->drm, 4426 "Need %d free Y planes for planar YUV\n", 4427 hweight8(crtc_state->nv12_planes)); 4428 4429 return -EINVAL; 4430 } 4431 4432 plane_state->planar_linked_plane = linked; 4433 4434 linked_state->planar_slave = true; 4435 linked_state->planar_linked_plane = plane; 4436 crtc_state->enabled_planes |= BIT(linked->id); 4437 crtc_state->active_planes |= BIT(linked->id); 4438 crtc_state->update_planes |= BIT(linked->id); 4439 crtc_state->data_rate[linked->id] = 4440 crtc_state->data_rate_y[plane->id]; 4441 crtc_state->rel_data_rate[linked->id] = 4442 crtc_state->rel_data_rate_y[plane->id]; 4443 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 4444 linked->base.name, plane->base.name); 4445 4446 /* Copy parameters to slave plane */ 4447 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 4448 linked_state->color_ctl = plane_state->color_ctl; 4449 linked_state->view = plane_state->view; 4450 linked_state->decrypt = plane_state->decrypt; 4451 4452 intel_plane_copy_hw_state(linked_state, plane_state); 4453 linked_state->uapi.src = plane_state->uapi.src; 4454 linked_state->uapi.dst = plane_state->uapi.dst; 4455 4456 if (icl_is_hdr_plane(dev_priv, plane->id)) { 4457 if (linked->id == PLANE_7) 4458 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; 4459 else if (linked->id == PLANE_6) 4460 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; 4461 else if (linked->id == PLANE_5) 4462 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; 4463 else if (linked->id == PLANE_4) 4464 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; 4465 else 4466 MISSING_CASE(linked->id); 4467 } 4468 } 4469 4470 return 0; 4471 } 4472 4473 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4474 { 4475 const struct drm_display_mode *pipe_mode = 4476 &crtc_state->hw.pipe_mode; 4477 int linetime_wm; 4478 4479 if (!crtc_state->hw.enable) 4480 return 0; 4481 4482 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4483 pipe_mode->crtc_clock); 4484 4485 return min(linetime_wm, 0x1ff); 4486 } 4487 4488 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4489 const struct intel_cdclk_state *cdclk_state) 4490 { 4491 const struct drm_display_mode *pipe_mode = 4492 &crtc_state->hw.pipe_mode; 4493 int linetime_wm; 4494 4495 if (!crtc_state->hw.enable) 4496 return 0; 4497 4498 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4499 cdclk_state->logical.cdclk); 4500 4501 return min(linetime_wm, 0x1ff); 4502 } 4503 4504 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4505 { 4506 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4507 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4508 const struct drm_display_mode *pipe_mode = 4509 &crtc_state->hw.pipe_mode; 4510 int linetime_wm; 4511 4512 if (!crtc_state->hw.enable) 4513 return 0; 4514 4515 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4516 crtc_state->pixel_rate); 4517 4518 /* Display WA #1135: BXT:ALL GLK:ALL */ 4519 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4520 skl_watermark_ipc_enabled(dev_priv)) 4521 linetime_wm /= 2; 4522 4523 return min(linetime_wm, 0x1ff); 4524 } 4525 4526 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4527 struct intel_crtc *crtc) 4528 { 4529 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4530 struct intel_crtc_state *crtc_state = 4531 intel_atomic_get_new_crtc_state(state, crtc); 4532 const struct intel_cdclk_state *cdclk_state; 4533 4534 if (DISPLAY_VER(dev_priv) >= 9) 4535 crtc_state->linetime = skl_linetime_wm(crtc_state); 4536 else 4537 crtc_state->linetime = hsw_linetime_wm(crtc_state); 4538 4539 if (!hsw_crtc_supports_ips(crtc)) 4540 return 0; 4541 4542 cdclk_state = intel_atomic_get_cdclk_state(state); 4543 if (IS_ERR(cdclk_state)) 4544 return PTR_ERR(cdclk_state); 4545 4546 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4547 cdclk_state); 4548 4549 return 0; 4550 } 4551 4552 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4553 struct intel_crtc *crtc) 4554 { 4555 struct intel_display *display = to_intel_display(crtc); 4556 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4557 struct intel_crtc_state *crtc_state = 4558 intel_atomic_get_new_crtc_state(state, crtc); 4559 int ret; 4560 4561 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) && 4562 intel_crtc_needs_modeset(crtc_state) && 4563 !crtc_state->hw.active) 4564 crtc_state->update_wm_post = true; 4565 4566 if (intel_crtc_needs_modeset(crtc_state)) { 4567 ret = intel_dpll_crtc_get_shared_dpll(state, crtc); 4568 if (ret) 4569 return ret; 4570 } 4571 4572 ret = intel_color_check(state, crtc); 4573 if (ret) 4574 return ret; 4575 4576 ret = intel_wm_compute(state, crtc); 4577 if (ret) { 4578 drm_dbg_kms(&dev_priv->drm, 4579 "[CRTC:%d:%s] watermarks are invalid\n", 4580 crtc->base.base.id, crtc->base.name); 4581 return ret; 4582 } 4583 4584 if (DISPLAY_VER(dev_priv) >= 9) { 4585 if (intel_crtc_needs_modeset(crtc_state) || 4586 intel_crtc_needs_fastset(crtc_state)) { 4587 ret = skl_update_scaler_crtc(crtc_state); 4588 if (ret) 4589 return ret; 4590 } 4591 4592 ret = intel_atomic_setup_scalers(state, crtc); 4593 if (ret) 4594 return ret; 4595 } 4596 4597 if (HAS_IPS(display)) { 4598 ret = hsw_ips_compute_config(state, crtc); 4599 if (ret) 4600 return ret; 4601 } 4602 4603 if (DISPLAY_VER(dev_priv) >= 9 || 4604 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 4605 ret = hsw_compute_linetime_wm(state, crtc); 4606 if (ret) 4607 return ret; 4608 4609 } 4610 4611 ret = intel_psr2_sel_fetch_update(state, crtc); 4612 if (ret) 4613 return ret; 4614 4615 return 0; 4616 } 4617 4618 static int 4619 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 4620 struct intel_crtc_state *crtc_state) 4621 { 4622 struct drm_connector *connector = conn_state->connector; 4623 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 4624 const struct drm_display_info *info = &connector->display_info; 4625 int bpp; 4626 4627 switch (conn_state->max_bpc) { 4628 case 6 ... 7: 4629 bpp = 6 * 3; 4630 break; 4631 case 8 ... 9: 4632 bpp = 8 * 3; 4633 break; 4634 case 10 ... 11: 4635 bpp = 10 * 3; 4636 break; 4637 case 12 ... 16: 4638 bpp = 12 * 3; 4639 break; 4640 default: 4641 MISSING_CASE(conn_state->max_bpc); 4642 return -EINVAL; 4643 } 4644 4645 if (bpp < crtc_state->pipe_bpp) { 4646 drm_dbg_kms(&i915->drm, 4647 "[CONNECTOR:%d:%s] Limiting display bpp to %d " 4648 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", 4649 connector->base.id, connector->name, 4650 bpp, 3 * info->bpc, 4651 3 * conn_state->max_requested_bpc, 4652 crtc_state->pipe_bpp); 4653 4654 crtc_state->pipe_bpp = bpp; 4655 } 4656 4657 return 0; 4658 } 4659 4660 static int 4661 compute_baseline_pipe_bpp(struct intel_atomic_state *state, 4662 struct intel_crtc *crtc) 4663 { 4664 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4665 struct intel_crtc_state *crtc_state = 4666 intel_atomic_get_new_crtc_state(state, crtc); 4667 struct drm_connector *connector; 4668 struct drm_connector_state *connector_state; 4669 int bpp, i; 4670 4671 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 4672 IS_CHERRYVIEW(dev_priv))) 4673 bpp = 10*3; 4674 else if (DISPLAY_VER(dev_priv) >= 5) 4675 bpp = 12*3; 4676 else 4677 bpp = 8*3; 4678 4679 crtc_state->pipe_bpp = bpp; 4680 4681 /* Clamp display bpp to connector max bpp */ 4682 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4683 int ret; 4684 4685 if (connector_state->crtc != &crtc->base) 4686 continue; 4687 4688 ret = compute_sink_pipe_bpp(connector_state, crtc_state); 4689 if (ret) 4690 return ret; 4691 } 4692 4693 return 0; 4694 } 4695 4696 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 4697 { 4698 struct drm_device *dev = state->base.dev; 4699 struct drm_connector *connector; 4700 struct drm_connector_list_iter conn_iter; 4701 unsigned int used_ports = 0; 4702 unsigned int used_mst_ports = 0; 4703 bool ret = true; 4704 4705 /* 4706 * We're going to peek into connector->state, 4707 * hence connection_mutex must be held. 4708 */ 4709 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 4710 4711 /* 4712 * Walk the connector list instead of the encoder 4713 * list to detect the problem on ddi platforms 4714 * where there's just one encoder per digital port. 4715 */ 4716 drm_connector_list_iter_begin(dev, &conn_iter); 4717 drm_for_each_connector_iter(connector, &conn_iter) { 4718 struct drm_connector_state *connector_state; 4719 struct intel_encoder *encoder; 4720 4721 connector_state = 4722 drm_atomic_get_new_connector_state(&state->base, 4723 connector); 4724 if (!connector_state) 4725 connector_state = connector->state; 4726 4727 if (!connector_state->best_encoder) 4728 continue; 4729 4730 encoder = to_intel_encoder(connector_state->best_encoder); 4731 4732 drm_WARN_ON(dev, !connector_state->crtc); 4733 4734 switch (encoder->type) { 4735 case INTEL_OUTPUT_DDI: 4736 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 4737 break; 4738 fallthrough; 4739 case INTEL_OUTPUT_DP: 4740 case INTEL_OUTPUT_HDMI: 4741 case INTEL_OUTPUT_EDP: 4742 /* the same port mustn't appear more than once */ 4743 if (used_ports & BIT(encoder->port)) 4744 ret = false; 4745 4746 used_ports |= BIT(encoder->port); 4747 break; 4748 case INTEL_OUTPUT_DP_MST: 4749 used_mst_ports |= 4750 1 << encoder->port; 4751 break; 4752 default: 4753 break; 4754 } 4755 } 4756 drm_connector_list_iter_end(&conn_iter); 4757 4758 /* can't mix MST and SST/HDMI on the same port */ 4759 if (used_ports & used_mst_ports) 4760 return false; 4761 4762 return ret; 4763 } 4764 4765 static void 4766 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 4767 struct intel_crtc *crtc) 4768 { 4769 struct intel_crtc_state *crtc_state = 4770 intel_atomic_get_new_crtc_state(state, crtc); 4771 4772 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4773 4774 drm_property_replace_blob(&crtc_state->hw.degamma_lut, 4775 crtc_state->uapi.degamma_lut); 4776 drm_property_replace_blob(&crtc_state->hw.gamma_lut, 4777 crtc_state->uapi.gamma_lut); 4778 drm_property_replace_blob(&crtc_state->hw.ctm, 4779 crtc_state->uapi.ctm); 4780 } 4781 4782 static void 4783 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 4784 struct intel_crtc *crtc) 4785 { 4786 struct intel_crtc_state *crtc_state = 4787 intel_atomic_get_new_crtc_state(state, crtc); 4788 4789 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4790 4791 crtc_state->hw.enable = crtc_state->uapi.enable; 4792 crtc_state->hw.active = crtc_state->uapi.active; 4793 drm_mode_copy(&crtc_state->hw.mode, 4794 &crtc_state->uapi.mode); 4795 drm_mode_copy(&crtc_state->hw.adjusted_mode, 4796 &crtc_state->uapi.adjusted_mode); 4797 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 4798 4799 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 4800 } 4801 4802 static void 4803 copy_joiner_crtc_state_nomodeset(struct intel_atomic_state *state, 4804 struct intel_crtc *secondary_crtc) 4805 { 4806 struct intel_crtc_state *secondary_crtc_state = 4807 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4808 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4809 const struct intel_crtc_state *primary_crtc_state = 4810 intel_atomic_get_new_crtc_state(state, primary_crtc); 4811 4812 drm_property_replace_blob(&secondary_crtc_state->hw.degamma_lut, 4813 primary_crtc_state->hw.degamma_lut); 4814 drm_property_replace_blob(&secondary_crtc_state->hw.gamma_lut, 4815 primary_crtc_state->hw.gamma_lut); 4816 drm_property_replace_blob(&secondary_crtc_state->hw.ctm, 4817 primary_crtc_state->hw.ctm); 4818 4819 secondary_crtc_state->uapi.color_mgmt_changed = primary_crtc_state->uapi.color_mgmt_changed; 4820 } 4821 4822 static int 4823 copy_joiner_crtc_state_modeset(struct intel_atomic_state *state, 4824 struct intel_crtc *secondary_crtc) 4825 { 4826 struct intel_crtc_state *secondary_crtc_state = 4827 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4828 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4829 const struct intel_crtc_state *primary_crtc_state = 4830 intel_atomic_get_new_crtc_state(state, primary_crtc); 4831 struct intel_crtc_state *saved_state; 4832 4833 WARN_ON(primary_crtc_state->joiner_pipes != 4834 secondary_crtc_state->joiner_pipes); 4835 4836 saved_state = kmemdup(primary_crtc_state, sizeof(*saved_state), GFP_KERNEL); 4837 if (!saved_state) 4838 return -ENOMEM; 4839 4840 /* preserve some things from the slave's original crtc state */ 4841 saved_state->uapi = secondary_crtc_state->uapi; 4842 saved_state->scaler_state = secondary_crtc_state->scaler_state; 4843 saved_state->shared_dpll = secondary_crtc_state->shared_dpll; 4844 saved_state->crc_enabled = secondary_crtc_state->crc_enabled; 4845 4846 intel_crtc_free_hw_state(secondary_crtc_state); 4847 if (secondary_crtc_state->dp_tunnel_ref.tunnel) 4848 drm_dp_tunnel_ref_put(&secondary_crtc_state->dp_tunnel_ref); 4849 memcpy(secondary_crtc_state, saved_state, sizeof(*secondary_crtc_state)); 4850 kfree(saved_state); 4851 4852 /* Re-init hw state */ 4853 memset(&secondary_crtc_state->hw, 0, sizeof(secondary_crtc_state->hw)); 4854 secondary_crtc_state->hw.enable = primary_crtc_state->hw.enable; 4855 secondary_crtc_state->hw.active = primary_crtc_state->hw.active; 4856 drm_mode_copy(&secondary_crtc_state->hw.mode, 4857 &primary_crtc_state->hw.mode); 4858 drm_mode_copy(&secondary_crtc_state->hw.pipe_mode, 4859 &primary_crtc_state->hw.pipe_mode); 4860 drm_mode_copy(&secondary_crtc_state->hw.adjusted_mode, 4861 &primary_crtc_state->hw.adjusted_mode); 4862 secondary_crtc_state->hw.scaling_filter = primary_crtc_state->hw.scaling_filter; 4863 4864 if (primary_crtc_state->dp_tunnel_ref.tunnel) 4865 drm_dp_tunnel_ref_get(primary_crtc_state->dp_tunnel_ref.tunnel, 4866 &secondary_crtc_state->dp_tunnel_ref); 4867 4868 copy_joiner_crtc_state_nomodeset(state, secondary_crtc); 4869 4870 secondary_crtc_state->uapi.mode_changed = primary_crtc_state->uapi.mode_changed; 4871 secondary_crtc_state->uapi.connectors_changed = primary_crtc_state->uapi.connectors_changed; 4872 secondary_crtc_state->uapi.active_changed = primary_crtc_state->uapi.active_changed; 4873 4874 WARN_ON(primary_crtc_state->joiner_pipes != 4875 secondary_crtc_state->joiner_pipes); 4876 4877 return 0; 4878 } 4879 4880 static int 4881 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 4882 struct intel_crtc *crtc) 4883 { 4884 struct intel_crtc_state *crtc_state = 4885 intel_atomic_get_new_crtc_state(state, crtc); 4886 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4887 struct intel_crtc_state *saved_state; 4888 4889 saved_state = intel_crtc_state_alloc(crtc); 4890 if (!saved_state) 4891 return -ENOMEM; 4892 4893 /* free the old crtc_state->hw members */ 4894 intel_crtc_free_hw_state(crtc_state); 4895 4896 intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state); 4897 4898 /* FIXME: before the switch to atomic started, a new pipe_config was 4899 * kzalloc'd. Code that depends on any field being zero should be 4900 * fixed, so that the crtc_state can be safely duplicated. For now, 4901 * only fields that are know to not cause problems are preserved. */ 4902 4903 saved_state->uapi = crtc_state->uapi; 4904 saved_state->inherited = crtc_state->inherited; 4905 saved_state->scaler_state = crtc_state->scaler_state; 4906 saved_state->shared_dpll = crtc_state->shared_dpll; 4907 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 4908 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 4909 sizeof(saved_state->icl_port_dplls)); 4910 saved_state->crc_enabled = crtc_state->crc_enabled; 4911 if (IS_G4X(dev_priv) || 4912 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4913 saved_state->wm = crtc_state->wm; 4914 4915 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 4916 kfree(saved_state); 4917 4918 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 4919 4920 return 0; 4921 } 4922 4923 static int 4924 intel_modeset_pipe_config(struct intel_atomic_state *state, 4925 struct intel_crtc *crtc, 4926 const struct intel_link_bw_limits *limits) 4927 { 4928 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4929 struct intel_crtc_state *crtc_state = 4930 intel_atomic_get_new_crtc_state(state, crtc); 4931 struct drm_connector *connector; 4932 struct drm_connector_state *connector_state; 4933 int pipe_src_w, pipe_src_h; 4934 int base_bpp, ret, i; 4935 4936 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; 4937 4938 crtc_state->framestart_delay = 1; 4939 4940 /* 4941 * Sanitize sync polarity flags based on requested ones. If neither 4942 * positive or negative polarity is requested, treat this as meaning 4943 * negative polarity. 4944 */ 4945 if (!(crtc_state->hw.adjusted_mode.flags & 4946 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 4947 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 4948 4949 if (!(crtc_state->hw.adjusted_mode.flags & 4950 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 4951 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 4952 4953 ret = compute_baseline_pipe_bpp(state, crtc); 4954 if (ret) 4955 return ret; 4956 4957 crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe); 4958 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; 4959 4960 if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) { 4961 drm_dbg_kms(&i915->drm, 4962 "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n", 4963 crtc->base.base.id, crtc->base.name, 4964 FXP_Q4_ARGS(crtc_state->max_link_bpp_x16)); 4965 crtc_state->bw_constrained = true; 4966 } 4967 4968 base_bpp = crtc_state->pipe_bpp; 4969 4970 /* 4971 * Determine the real pipe dimensions. Note that stereo modes can 4972 * increase the actual pipe size due to the frame doubling and 4973 * insertion of additional space for blanks between the frame. This 4974 * is stored in the crtc timings. We use the requested mode to do this 4975 * computation to clearly distinguish it from the adjusted mode, which 4976 * can be changed by the connectors in the below retry loop. 4977 */ 4978 drm_mode_get_hv_timing(&crtc_state->hw.mode, 4979 &pipe_src_w, &pipe_src_h); 4980 drm_rect_init(&crtc_state->pipe_src, 0, 0, 4981 pipe_src_w, pipe_src_h); 4982 4983 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4984 struct intel_encoder *encoder = 4985 to_intel_encoder(connector_state->best_encoder); 4986 4987 if (connector_state->crtc != &crtc->base) 4988 continue; 4989 4990 if (!check_single_encoder_cloning(state, crtc, encoder)) { 4991 drm_dbg_kms(&i915->drm, 4992 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", 4993 encoder->base.base.id, encoder->base.name); 4994 return -EINVAL; 4995 } 4996 4997 /* 4998 * Determine output_types before calling the .compute_config() 4999 * hooks so that the hooks can use this information safely. 5000 */ 5001 if (encoder->compute_output_type) 5002 crtc_state->output_types |= 5003 BIT(encoder->compute_output_type(encoder, crtc_state, 5004 connector_state)); 5005 else 5006 crtc_state->output_types |= BIT(encoder->type); 5007 } 5008 5009 /* Ensure the port clock defaults are reset when retrying. */ 5010 crtc_state->port_clock = 0; 5011 crtc_state->pixel_multiplier = 1; 5012 5013 /* Fill in default crtc timings, allow encoders to overwrite them. */ 5014 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, 5015 CRTC_STEREO_DOUBLE); 5016 5017 /* Pass our mode to the connectors and the CRTC to give them a chance to 5018 * adjust it according to limitations or connector properties, and also 5019 * a chance to reject the mode entirely. 5020 */ 5021 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5022 struct intel_encoder *encoder = 5023 to_intel_encoder(connector_state->best_encoder); 5024 5025 if (connector_state->crtc != &crtc->base) 5026 continue; 5027 5028 ret = encoder->compute_config(encoder, crtc_state, 5029 connector_state); 5030 if (ret == -EDEADLK) 5031 return ret; 5032 if (ret < 0) { 5033 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n", 5034 encoder->base.base.id, encoder->base.name, ret); 5035 return ret; 5036 } 5037 } 5038 5039 /* Set default port clock if not overwritten by the encoder. Needs to be 5040 * done afterwards in case the encoder adjusts the mode. */ 5041 if (!crtc_state->port_clock) 5042 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock 5043 * crtc_state->pixel_multiplier; 5044 5045 ret = intel_crtc_compute_config(state, crtc); 5046 if (ret == -EDEADLK) 5047 return ret; 5048 if (ret < 0) { 5049 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n", 5050 crtc->base.base.id, crtc->base.name, ret); 5051 return ret; 5052 } 5053 5054 /* Dithering seems to not pass-through bits correctly when it should, so 5055 * only enable it on 6bpc panels and when its not a compliance 5056 * test requesting 6bpc video pattern. 5057 */ 5058 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && 5059 !crtc_state->dither_force_disable; 5060 drm_dbg_kms(&i915->drm, 5061 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 5062 crtc->base.base.id, crtc->base.name, 5063 base_bpp, crtc_state->pipe_bpp, crtc_state->dither); 5064 5065 return 0; 5066 } 5067 5068 static int 5069 intel_modeset_pipe_config_late(struct intel_atomic_state *state, 5070 struct intel_crtc *crtc) 5071 { 5072 struct intel_crtc_state *crtc_state = 5073 intel_atomic_get_new_crtc_state(state, crtc); 5074 struct drm_connector_state *conn_state; 5075 struct drm_connector *connector; 5076 int i; 5077 5078 intel_vrr_compute_config_late(crtc_state); 5079 5080 for_each_new_connector_in_state(&state->base, connector, 5081 conn_state, i) { 5082 struct intel_encoder *encoder = 5083 to_intel_encoder(conn_state->best_encoder); 5084 int ret; 5085 5086 if (conn_state->crtc != &crtc->base || 5087 !encoder->compute_config_late) 5088 continue; 5089 5090 ret = encoder->compute_config_late(encoder, crtc_state, 5091 conn_state); 5092 if (ret) 5093 return ret; 5094 } 5095 5096 return 0; 5097 } 5098 5099 bool intel_fuzzy_clock_check(int clock1, int clock2) 5100 { 5101 int diff; 5102 5103 if (clock1 == clock2) 5104 return true; 5105 5106 if (!clock1 || !clock2) 5107 return false; 5108 5109 diff = abs(clock1 - clock2); 5110 5111 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 5112 return true; 5113 5114 return false; 5115 } 5116 5117 static bool 5118 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 5119 const struct intel_link_m_n *m2_n2) 5120 { 5121 return m_n->tu == m2_n2->tu && 5122 m_n->data_m == m2_n2->data_m && 5123 m_n->data_n == m2_n2->data_n && 5124 m_n->link_m == m2_n2->link_m && 5125 m_n->link_n == m2_n2->link_n; 5126 } 5127 5128 static bool 5129 intel_compare_infoframe(const union hdmi_infoframe *a, 5130 const union hdmi_infoframe *b) 5131 { 5132 return memcmp(a, b, sizeof(*a)) == 0; 5133 } 5134 5135 static bool 5136 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 5137 const struct drm_dp_vsc_sdp *b) 5138 { 5139 return a->pixelformat == b->pixelformat && 5140 a->colorimetry == b->colorimetry && 5141 a->bpc == b->bpc && 5142 a->dynamic_range == b->dynamic_range && 5143 a->content_type == b->content_type; 5144 } 5145 5146 static bool 5147 intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a, 5148 const struct drm_dp_as_sdp *b) 5149 { 5150 return a->vtotal == b->vtotal && 5151 a->target_rr == b->target_rr && 5152 a->duration_incr_ms == b->duration_incr_ms && 5153 a->duration_decr_ms == b->duration_decr_ms && 5154 a->mode == b->mode; 5155 } 5156 5157 static bool 5158 intel_compare_buffer(const u8 *a, const u8 *b, size_t len) 5159 { 5160 return memcmp(a, b, len) == 0; 5161 } 5162 5163 static void __printf(5, 6) 5164 pipe_config_mismatch(struct drm_printer *p, bool fastset, 5165 const struct intel_crtc *crtc, 5166 const char *name, const char *format, ...) 5167 { 5168 struct va_format vaf; 5169 va_list args; 5170 5171 va_start(args, format); 5172 vaf.fmt = format; 5173 vaf.va = &args; 5174 5175 if (fastset) 5176 drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", 5177 crtc->base.base.id, crtc->base.name, name, &vaf); 5178 else 5179 drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n", 5180 crtc->base.base.id, crtc->base.name, name, &vaf); 5181 5182 va_end(args); 5183 } 5184 5185 static void 5186 pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset, 5187 const struct intel_crtc *crtc, 5188 const char *name, 5189 const union hdmi_infoframe *a, 5190 const union hdmi_infoframe *b) 5191 { 5192 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5193 const char *loglevel; 5194 5195 if (fastset) { 5196 if (!drm_debug_enabled(DRM_UT_KMS)) 5197 return; 5198 5199 loglevel = KERN_DEBUG; 5200 } else { 5201 loglevel = KERN_ERR; 5202 } 5203 5204 pipe_config_mismatch(p, fastset, crtc, name, "infoframe"); 5205 5206 drm_printf(p, "expected:\n"); 5207 hdmi_infoframe_log(loglevel, i915->drm.dev, a); 5208 drm_printf(p, "found:\n"); 5209 hdmi_infoframe_log(loglevel, i915->drm.dev, b); 5210 } 5211 5212 static void 5213 pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset, 5214 const struct intel_crtc *crtc, 5215 const char *name, 5216 const struct drm_dp_vsc_sdp *a, 5217 const struct drm_dp_vsc_sdp *b) 5218 { 5219 pipe_config_mismatch(p, fastset, crtc, name, "dp vsc sdp"); 5220 5221 drm_printf(p, "expected:\n"); 5222 drm_dp_vsc_sdp_log(p, a); 5223 drm_printf(p, "found:\n"); 5224 drm_dp_vsc_sdp_log(p, b); 5225 } 5226 5227 static void 5228 pipe_config_dp_as_sdp_mismatch(struct drm_printer *p, bool fastset, 5229 const struct intel_crtc *crtc, 5230 const char *name, 5231 const struct drm_dp_as_sdp *a, 5232 const struct drm_dp_as_sdp *b) 5233 { 5234 pipe_config_mismatch(p, fastset, crtc, name, "dp as sdp"); 5235 5236 drm_printf(p, "expected:\n"); 5237 drm_dp_as_sdp_log(p, a); 5238 drm_printf(p, "found:\n"); 5239 drm_dp_as_sdp_log(p, b); 5240 } 5241 5242 /* Returns the length up to and including the last differing byte */ 5243 static size_t 5244 memcmp_diff_len(const u8 *a, const u8 *b, size_t len) 5245 { 5246 int i; 5247 5248 for (i = len - 1; i >= 0; i--) { 5249 if (a[i] != b[i]) 5250 return i + 1; 5251 } 5252 5253 return 0; 5254 } 5255 5256 static void 5257 pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset, 5258 const struct intel_crtc *crtc, 5259 const char *name, 5260 const u8 *a, const u8 *b, size_t len) 5261 { 5262 pipe_config_mismatch(p, fastset, crtc, name, "buffer"); 5263 5264 /* only dump up to the last difference */ 5265 len = memcmp_diff_len(a, b, len); 5266 5267 drm_print_hex_dump(p, "expected: ", a, len); 5268 drm_print_hex_dump(p, "found: ", b, len); 5269 } 5270 5271 static void 5272 pipe_config_pll_mismatch(struct drm_printer *p, bool fastset, 5273 const struct intel_crtc *crtc, 5274 const char *name, 5275 const struct intel_dpll_hw_state *a, 5276 const struct intel_dpll_hw_state *b) 5277 { 5278 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5279 5280 pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */ 5281 5282 drm_printf(p, "expected:\n"); 5283 intel_dpll_dump_hw_state(i915, p, a); 5284 drm_printf(p, "found:\n"); 5285 intel_dpll_dump_hw_state(i915, p, b); 5286 } 5287 5288 static void 5289 pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset, 5290 const struct intel_crtc *crtc, 5291 const char *name, 5292 const struct intel_cx0pll_state *a, 5293 const struct intel_cx0pll_state *b) 5294 { 5295 struct intel_display *display = to_intel_display(crtc); 5296 char *chipname = a->use_c10 ? "C10" : "C20"; 5297 5298 pipe_config_mismatch(p, fastset, crtc, name, chipname); 5299 5300 drm_printf(p, "expected:\n"); 5301 intel_cx0pll_dump_hw_state(display, a); 5302 drm_printf(p, "found:\n"); 5303 intel_cx0pll_dump_hw_state(display, b); 5304 } 5305 5306 bool 5307 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 5308 const struct intel_crtc_state *pipe_config, 5309 bool fastset) 5310 { 5311 struct intel_display *display = to_intel_display(current_config); 5312 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 5313 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 5314 struct drm_printer p; 5315 bool ret = true; 5316 5317 if (fastset) 5318 p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL); 5319 else 5320 p = drm_err_printer(&dev_priv->drm, NULL); 5321 5322 #define PIPE_CONF_CHECK_X(name) do { \ 5323 if (current_config->name != pipe_config->name) { \ 5324 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5325 __stringify(name) " is bool"); \ 5326 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5327 "(expected 0x%08x, found 0x%08x)", \ 5328 current_config->name, \ 5329 pipe_config->name); \ 5330 ret = false; \ 5331 } \ 5332 } while (0) 5333 5334 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 5335 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 5336 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5337 __stringify(name) " is bool"); \ 5338 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5339 "(expected 0x%08x, found 0x%08x)", \ 5340 current_config->name & (mask), \ 5341 pipe_config->name & (mask)); \ 5342 ret = false; \ 5343 } \ 5344 } while (0) 5345 5346 #define PIPE_CONF_CHECK_I(name) do { \ 5347 if (current_config->name != pipe_config->name) { \ 5348 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5349 __stringify(name) " is bool"); \ 5350 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5351 "(expected %i, found %i)", \ 5352 current_config->name, \ 5353 pipe_config->name); \ 5354 ret = false; \ 5355 } \ 5356 } while (0) 5357 5358 #define PIPE_CONF_CHECK_LLI(name) do { \ 5359 if (current_config->name != pipe_config->name) { \ 5360 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5361 "(expected %lli, found %lli)", \ 5362 current_config->name, \ 5363 pipe_config->name); \ 5364 ret = false; \ 5365 } \ 5366 } while (0) 5367 5368 #define PIPE_CONF_CHECK_BOOL(name) do { \ 5369 if (current_config->name != pipe_config->name) { \ 5370 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ 5371 __stringify(name) " is not bool"); \ 5372 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5373 "(expected %s, found %s)", \ 5374 str_yes_no(current_config->name), \ 5375 str_yes_no(pipe_config->name)); \ 5376 ret = false; \ 5377 } \ 5378 } while (0) 5379 5380 #define PIPE_CONF_CHECK_P(name) do { \ 5381 if (current_config->name != pipe_config->name) { \ 5382 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5383 "(expected %p, found %p)", \ 5384 current_config->name, \ 5385 pipe_config->name); \ 5386 ret = false; \ 5387 } \ 5388 } while (0) 5389 5390 #define PIPE_CONF_CHECK_M_N(name) do { \ 5391 if (!intel_compare_link_m_n(¤t_config->name, \ 5392 &pipe_config->name)) { \ 5393 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5394 "(expected tu %i data %i/%i link %i/%i, " \ 5395 "found tu %i, data %i/%i link %i/%i)", \ 5396 current_config->name.tu, \ 5397 current_config->name.data_m, \ 5398 current_config->name.data_n, \ 5399 current_config->name.link_m, \ 5400 current_config->name.link_n, \ 5401 pipe_config->name.tu, \ 5402 pipe_config->name.data_m, \ 5403 pipe_config->name.data_n, \ 5404 pipe_config->name.link_m, \ 5405 pipe_config->name.link_n); \ 5406 ret = false; \ 5407 } \ 5408 } while (0) 5409 5410 #define PIPE_CONF_CHECK_PLL(name) do { \ 5411 if (!intel_dpll_compare_hw_state(dev_priv, ¤t_config->name, \ 5412 &pipe_config->name)) { \ 5413 pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5414 ¤t_config->name, \ 5415 &pipe_config->name); \ 5416 ret = false; \ 5417 } \ 5418 } while (0) 5419 5420 #define PIPE_CONF_CHECK_PLL_CX0(name) do { \ 5421 if (!intel_cx0pll_compare_hw_state(¤t_config->name, \ 5422 &pipe_config->name)) { \ 5423 pipe_config_cx0pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5424 ¤t_config->name, \ 5425 &pipe_config->name); \ 5426 ret = false; \ 5427 } \ 5428 } while (0) 5429 5430 #define PIPE_CONF_CHECK_TIMINGS(name) do { \ 5431 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ 5432 PIPE_CONF_CHECK_I(name.crtc_htotal); \ 5433 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ 5434 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ 5435 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ 5436 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ 5437 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ 5438 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ 5439 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ 5440 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ 5441 if (!fastset || !pipe_config->update_lrr) { \ 5442 PIPE_CONF_CHECK_I(name.crtc_vtotal); \ 5443 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ 5444 } \ 5445 } while (0) 5446 5447 #define PIPE_CONF_CHECK_RECT(name) do { \ 5448 PIPE_CONF_CHECK_I(name.x1); \ 5449 PIPE_CONF_CHECK_I(name.x2); \ 5450 PIPE_CONF_CHECK_I(name.y1); \ 5451 PIPE_CONF_CHECK_I(name.y2); \ 5452 } while (0) 5453 5454 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 5455 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 5456 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5457 "(%x) (expected %i, found %i)", \ 5458 (mask), \ 5459 current_config->name & (mask), \ 5460 pipe_config->name & (mask)); \ 5461 ret = false; \ 5462 } \ 5463 } while (0) 5464 5465 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 5466 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 5467 &pipe_config->infoframes.name)) { \ 5468 pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \ 5469 ¤t_config->infoframes.name, \ 5470 &pipe_config->infoframes.name); \ 5471 ret = false; \ 5472 } \ 5473 } while (0) 5474 5475 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 5476 if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 5477 &pipe_config->infoframes.name)) { \ 5478 pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5479 ¤t_config->infoframes.name, \ 5480 &pipe_config->infoframes.name); \ 5481 ret = false; \ 5482 } \ 5483 } while (0) 5484 5485 #define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \ 5486 if (!intel_compare_dp_as_sdp(¤t_config->infoframes.name, \ 5487 &pipe_config->infoframes.name)) { \ 5488 pipe_config_dp_as_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5489 ¤t_config->infoframes.name, \ 5490 &pipe_config->infoframes.name); \ 5491 ret = false; \ 5492 } \ 5493 } while (0) 5494 5495 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \ 5496 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ 5497 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ 5498 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ 5499 pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \ 5500 current_config->name, \ 5501 pipe_config->name, \ 5502 (len)); \ 5503 ret = false; \ 5504 } \ 5505 } while (0) 5506 5507 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \ 5508 if (current_config->gamma_mode == pipe_config->gamma_mode && \ 5509 !intel_color_lut_equal(current_config, \ 5510 current_config->lut, pipe_config->lut, \ 5511 is_pre_csc_lut)) { \ 5512 pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \ 5513 "hw_state doesn't match sw_state"); \ 5514 ret = false; \ 5515 } \ 5516 } while (0) 5517 5518 #define PIPE_CONF_CHECK_CSC(name) do { \ 5519 PIPE_CONF_CHECK_X(name.preoff[0]); \ 5520 PIPE_CONF_CHECK_X(name.preoff[1]); \ 5521 PIPE_CONF_CHECK_X(name.preoff[2]); \ 5522 PIPE_CONF_CHECK_X(name.coeff[0]); \ 5523 PIPE_CONF_CHECK_X(name.coeff[1]); \ 5524 PIPE_CONF_CHECK_X(name.coeff[2]); \ 5525 PIPE_CONF_CHECK_X(name.coeff[3]); \ 5526 PIPE_CONF_CHECK_X(name.coeff[4]); \ 5527 PIPE_CONF_CHECK_X(name.coeff[5]); \ 5528 PIPE_CONF_CHECK_X(name.coeff[6]); \ 5529 PIPE_CONF_CHECK_X(name.coeff[7]); \ 5530 PIPE_CONF_CHECK_X(name.coeff[8]); \ 5531 PIPE_CONF_CHECK_X(name.postoff[0]); \ 5532 PIPE_CONF_CHECK_X(name.postoff[1]); \ 5533 PIPE_CONF_CHECK_X(name.postoff[2]); \ 5534 } while (0) 5535 5536 #define PIPE_CONF_QUIRK(quirk) \ 5537 ((current_config->quirks | pipe_config->quirks) & (quirk)) 5538 5539 PIPE_CONF_CHECK_BOOL(hw.enable); 5540 PIPE_CONF_CHECK_BOOL(hw.active); 5541 5542 PIPE_CONF_CHECK_I(cpu_transcoder); 5543 PIPE_CONF_CHECK_I(mst_master_transcoder); 5544 5545 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 5546 PIPE_CONF_CHECK_I(fdi_lanes); 5547 PIPE_CONF_CHECK_M_N(fdi_m_n); 5548 5549 PIPE_CONF_CHECK_I(lane_count); 5550 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 5551 5552 if (HAS_DOUBLE_BUFFERED_M_N(display)) { 5553 if (!fastset || !pipe_config->update_m_n) 5554 PIPE_CONF_CHECK_M_N(dp_m_n); 5555 } else { 5556 PIPE_CONF_CHECK_M_N(dp_m_n); 5557 PIPE_CONF_CHECK_M_N(dp_m2_n2); 5558 } 5559 5560 PIPE_CONF_CHECK_X(output_types); 5561 5562 PIPE_CONF_CHECK_I(framestart_delay); 5563 PIPE_CONF_CHECK_I(msa_timing_delay); 5564 5565 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); 5566 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); 5567 5568 PIPE_CONF_CHECK_I(pixel_multiplier); 5569 5570 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5571 DRM_MODE_FLAG_INTERLACE); 5572 5573 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 5574 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5575 DRM_MODE_FLAG_PHSYNC); 5576 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5577 DRM_MODE_FLAG_NHSYNC); 5578 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5579 DRM_MODE_FLAG_PVSYNC); 5580 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5581 DRM_MODE_FLAG_NVSYNC); 5582 } 5583 5584 PIPE_CONF_CHECK_I(output_format); 5585 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 5586 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 5587 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5588 PIPE_CONF_CHECK_BOOL(limited_color_range); 5589 5590 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 5591 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 5592 PIPE_CONF_CHECK_BOOL(has_infoframe); 5593 PIPE_CONF_CHECK_BOOL(enhanced_framing); 5594 PIPE_CONF_CHECK_BOOL(fec_enable); 5595 5596 if (!fastset) { 5597 PIPE_CONF_CHECK_BOOL(has_audio); 5598 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5599 } 5600 5601 PIPE_CONF_CHECK_X(gmch_pfit.control); 5602 /* pfit ratios are autocomputed by the hw on gen4+ */ 5603 if (DISPLAY_VER(dev_priv) < 4) 5604 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 5605 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 5606 5607 /* 5608 * Changing the EDP transcoder input mux 5609 * (A_ONOFF vs. A_ON) requires a full modeset. 5610 */ 5611 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 5612 5613 if (!fastset) { 5614 PIPE_CONF_CHECK_RECT(pipe_src); 5615 5616 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 5617 PIPE_CONF_CHECK_RECT(pch_pfit.dst); 5618 5619 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 5620 PIPE_CONF_CHECK_I(pixel_rate); 5621 5622 PIPE_CONF_CHECK_X(gamma_mode); 5623 if (IS_CHERRYVIEW(dev_priv)) 5624 PIPE_CONF_CHECK_X(cgm_mode); 5625 else 5626 PIPE_CONF_CHECK_X(csc_mode); 5627 PIPE_CONF_CHECK_BOOL(gamma_enable); 5628 PIPE_CONF_CHECK_BOOL(csc_enable); 5629 PIPE_CONF_CHECK_BOOL(wgc_enable); 5630 5631 PIPE_CONF_CHECK_I(linetime); 5632 PIPE_CONF_CHECK_I(ips_linetime); 5633 5634 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); 5635 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); 5636 5637 PIPE_CONF_CHECK_CSC(csc); 5638 PIPE_CONF_CHECK_CSC(output_csc); 5639 } 5640 5641 /* 5642 * Panel replay has to be enabled before link training. PSR doesn't have 5643 * this requirement -> check these only if using panel replay 5644 */ 5645 if (current_config->active_planes && 5646 (current_config->has_panel_replay || 5647 pipe_config->has_panel_replay)) { 5648 PIPE_CONF_CHECK_BOOL(has_psr); 5649 PIPE_CONF_CHECK_BOOL(has_sel_update); 5650 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); 5651 PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et); 5652 PIPE_CONF_CHECK_BOOL(has_panel_replay); 5653 } 5654 5655 PIPE_CONF_CHECK_BOOL(double_wide); 5656 5657 if (dev_priv->display.dpll.mgr) 5658 PIPE_CONF_CHECK_P(shared_dpll); 5659 5660 /* FIXME convert everything over the dpll_mgr */ 5661 if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv)) 5662 PIPE_CONF_CHECK_PLL(dpll_hw_state); 5663 5664 /* FIXME convert MTL+ platforms over to dpll_mgr */ 5665 if (DISPLAY_VER(dev_priv) >= 14) 5666 PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll); 5667 5668 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 5669 PIPE_CONF_CHECK_X(dsi_pll.div); 5670 5671 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 5672 PIPE_CONF_CHECK_I(pipe_bpp); 5673 5674 if (!fastset || !pipe_config->update_m_n) { 5675 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); 5676 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); 5677 } 5678 PIPE_CONF_CHECK_I(port_clock); 5679 5680 PIPE_CONF_CHECK_I(min_voltage_level); 5681 5682 if (current_config->has_psr || pipe_config->has_psr) 5683 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, 5684 ~intel_hdmi_infoframe_enable(DP_SDP_VSC)); 5685 else 5686 PIPE_CONF_CHECK_X(infoframes.enable); 5687 5688 PIPE_CONF_CHECK_X(infoframes.gcp); 5689 PIPE_CONF_CHECK_INFOFRAME(avi); 5690 PIPE_CONF_CHECK_INFOFRAME(spd); 5691 PIPE_CONF_CHECK_INFOFRAME(hdmi); 5692 if (!fastset) 5693 PIPE_CONF_CHECK_INFOFRAME(drm); 5694 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 5695 PIPE_CONF_CHECK_DP_AS_SDP(as_sdp); 5696 5697 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 5698 PIPE_CONF_CHECK_I(master_transcoder); 5699 PIPE_CONF_CHECK_X(joiner_pipes); 5700 5701 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable); 5702 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb); 5703 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422); 5704 PIPE_CONF_CHECK_BOOL(dsc.config.native_422); 5705 PIPE_CONF_CHECK_BOOL(dsc.config.native_420); 5706 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable); 5707 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth); 5708 PIPE_CONF_CHECK_I(dsc.config.bits_per_component); 5709 PIPE_CONF_CHECK_I(dsc.config.pic_width); 5710 PIPE_CONF_CHECK_I(dsc.config.pic_height); 5711 PIPE_CONF_CHECK_I(dsc.config.slice_width); 5712 PIPE_CONF_CHECK_I(dsc.config.slice_height); 5713 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay); 5714 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay); 5715 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval); 5716 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval); 5717 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value); 5718 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset); 5719 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp); 5720 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp); 5721 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset); 5722 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset); 5723 PIPE_CONF_CHECK_I(dsc.config.initial_offset); 5724 PIPE_CONF_CHECK_I(dsc.config.final_offset); 5725 PIPE_CONF_CHECK_I(dsc.config.rc_model_size); 5726 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0); 5727 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1); 5728 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size); 5729 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset); 5730 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset); 5731 5732 PIPE_CONF_CHECK_BOOL(dsc.compression_enable); 5733 PIPE_CONF_CHECK_I(dsc.num_streams); 5734 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16); 5735 5736 PIPE_CONF_CHECK_BOOL(splitter.enable); 5737 PIPE_CONF_CHECK_I(splitter.link_count); 5738 PIPE_CONF_CHECK_I(splitter.pixel_overlap); 5739 5740 if (!fastset) { 5741 PIPE_CONF_CHECK_BOOL(vrr.enable); 5742 PIPE_CONF_CHECK_I(vrr.vmin); 5743 PIPE_CONF_CHECK_I(vrr.vmax); 5744 PIPE_CONF_CHECK_I(vrr.flipline); 5745 PIPE_CONF_CHECK_I(vrr.pipeline_full); 5746 PIPE_CONF_CHECK_I(vrr.guardband); 5747 PIPE_CONF_CHECK_I(vrr.vsync_start); 5748 PIPE_CONF_CHECK_I(vrr.vsync_end); 5749 PIPE_CONF_CHECK_LLI(cmrr.cmrr_m); 5750 PIPE_CONF_CHECK_LLI(cmrr.cmrr_n); 5751 PIPE_CONF_CHECK_BOOL(cmrr.enable); 5752 } 5753 5754 #undef PIPE_CONF_CHECK_X 5755 #undef PIPE_CONF_CHECK_I 5756 #undef PIPE_CONF_CHECK_LLI 5757 #undef PIPE_CONF_CHECK_BOOL 5758 #undef PIPE_CONF_CHECK_P 5759 #undef PIPE_CONF_CHECK_FLAGS 5760 #undef PIPE_CONF_CHECK_COLOR_LUT 5761 #undef PIPE_CONF_CHECK_TIMINGS 5762 #undef PIPE_CONF_CHECK_RECT 5763 #undef PIPE_CONF_QUIRK 5764 5765 return ret; 5766 } 5767 5768 static void 5769 intel_verify_planes(struct intel_atomic_state *state) 5770 { 5771 struct intel_plane *plane; 5772 const struct intel_plane_state *plane_state; 5773 int i; 5774 5775 for_each_new_intel_plane_in_state(state, plane, 5776 plane_state, i) 5777 assert_plane(plane, plane_state->planar_slave || 5778 plane_state->uapi.visible); 5779 } 5780 5781 static int intel_modeset_pipe(struct intel_atomic_state *state, 5782 struct intel_crtc_state *crtc_state, 5783 const char *reason) 5784 { 5785 struct drm_i915_private *i915 = to_i915(state->base.dev); 5786 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5787 int ret; 5788 5789 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n", 5790 crtc->base.base.id, crtc->base.name, reason); 5791 5792 ret = drm_atomic_add_affected_connectors(&state->base, 5793 &crtc->base); 5794 if (ret) 5795 return ret; 5796 5797 ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc); 5798 if (ret) 5799 return ret; 5800 5801 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); 5802 if (ret) 5803 return ret; 5804 5805 ret = intel_atomic_add_affected_planes(state, crtc); 5806 if (ret) 5807 return ret; 5808 5809 crtc_state->uapi.mode_changed = true; 5810 5811 return 0; 5812 } 5813 5814 /** 5815 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes 5816 * @state: intel atomic state 5817 * @reason: the reason for the full modeset 5818 * @mask: mask of pipes to modeset 5819 * 5820 * Add pipes in @mask to @state and force a full modeset on the enabled ones 5821 * due to the description in @reason. 5822 * This function can be called only before new plane states are computed. 5823 * 5824 * Returns 0 in case of success, negative error code otherwise. 5825 */ 5826 int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state, 5827 const char *reason, u8 mask) 5828 { 5829 struct drm_i915_private *i915 = to_i915(state->base.dev); 5830 struct intel_crtc *crtc; 5831 5832 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) { 5833 struct intel_crtc_state *crtc_state; 5834 int ret; 5835 5836 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5837 if (IS_ERR(crtc_state)) 5838 return PTR_ERR(crtc_state); 5839 5840 if (!crtc_state->hw.enable || 5841 intel_crtc_needs_modeset(crtc_state)) 5842 continue; 5843 5844 ret = intel_modeset_pipe(state, crtc_state, reason); 5845 if (ret) 5846 return ret; 5847 } 5848 5849 return 0; 5850 } 5851 5852 static void 5853 intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state) 5854 { 5855 crtc_state->uapi.mode_changed = true; 5856 5857 crtc_state->update_pipe = false; 5858 crtc_state->update_m_n = false; 5859 crtc_state->update_lrr = false; 5860 } 5861 5862 /** 5863 * intel_modeset_all_pipes_late - force a full modeset on all pipes 5864 * @state: intel atomic state 5865 * @reason: the reason for the full modeset 5866 * 5867 * Add all pipes to @state and force a full modeset on the active ones due to 5868 * the description in @reason. 5869 * This function can be called only after new plane states are computed already. 5870 * 5871 * Returns 0 in case of success, negative error code otherwise. 5872 */ 5873 int intel_modeset_all_pipes_late(struct intel_atomic_state *state, 5874 const char *reason) 5875 { 5876 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5877 struct intel_crtc *crtc; 5878 5879 for_each_intel_crtc(&dev_priv->drm, crtc) { 5880 struct intel_crtc_state *crtc_state; 5881 int ret; 5882 5883 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5884 if (IS_ERR(crtc_state)) 5885 return PTR_ERR(crtc_state); 5886 5887 if (!crtc_state->hw.active || 5888 intel_crtc_needs_modeset(crtc_state)) 5889 continue; 5890 5891 ret = intel_modeset_pipe(state, crtc_state, reason); 5892 if (ret) 5893 return ret; 5894 5895 intel_crtc_flag_modeset(crtc_state); 5896 5897 crtc_state->update_planes |= crtc_state->active_planes; 5898 crtc_state->async_flip_planes = 0; 5899 crtc_state->do_async_flip = false; 5900 } 5901 5902 return 0; 5903 } 5904 5905 int intel_modeset_commit_pipes(struct drm_i915_private *i915, 5906 u8 pipe_mask, 5907 struct drm_modeset_acquire_ctx *ctx) 5908 { 5909 struct drm_atomic_state *state; 5910 struct intel_crtc *crtc; 5911 int ret; 5912 5913 state = drm_atomic_state_alloc(&i915->drm); 5914 if (!state) 5915 return -ENOMEM; 5916 5917 state->acquire_ctx = ctx; 5918 to_intel_atomic_state(state)->internal = true; 5919 5920 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { 5921 struct intel_crtc_state *crtc_state = 5922 intel_atomic_get_crtc_state(state, crtc); 5923 5924 if (IS_ERR(crtc_state)) { 5925 ret = PTR_ERR(crtc_state); 5926 goto out; 5927 } 5928 5929 crtc_state->uapi.connectors_changed = true; 5930 } 5931 5932 ret = drm_atomic_commit(state); 5933 out: 5934 drm_atomic_state_put(state); 5935 5936 return ret; 5937 } 5938 5939 /* 5940 * This implements the workaround described in the "notes" section of the mode 5941 * set sequence documentation. When going from no pipes or single pipe to 5942 * multiple pipes, and planes are enabled after the pipe, we need to wait at 5943 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 5944 */ 5945 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 5946 { 5947 struct intel_crtc_state *crtc_state; 5948 struct intel_crtc *crtc; 5949 struct intel_crtc_state *first_crtc_state = NULL; 5950 struct intel_crtc_state *other_crtc_state = NULL; 5951 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 5952 int i; 5953 5954 /* look at all crtc's that are going to be enabled in during modeset */ 5955 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5956 if (!crtc_state->hw.active || 5957 !intel_crtc_needs_modeset(crtc_state)) 5958 continue; 5959 5960 if (first_crtc_state) { 5961 other_crtc_state = crtc_state; 5962 break; 5963 } else { 5964 first_crtc_state = crtc_state; 5965 first_pipe = crtc->pipe; 5966 } 5967 } 5968 5969 /* No workaround needed? */ 5970 if (!first_crtc_state) 5971 return 0; 5972 5973 /* w/a possibly needed, check how many crtc's are already enabled. */ 5974 for_each_intel_crtc(state->base.dev, crtc) { 5975 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5976 if (IS_ERR(crtc_state)) 5977 return PTR_ERR(crtc_state); 5978 5979 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 5980 5981 if (!crtc_state->hw.active || 5982 intel_crtc_needs_modeset(crtc_state)) 5983 continue; 5984 5985 /* 2 or more enabled crtcs means no need for w/a */ 5986 if (enabled_pipe != INVALID_PIPE) 5987 return 0; 5988 5989 enabled_pipe = crtc->pipe; 5990 } 5991 5992 if (enabled_pipe != INVALID_PIPE) 5993 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 5994 else if (other_crtc_state) 5995 other_crtc_state->hsw_workaround_pipe = first_pipe; 5996 5997 return 0; 5998 } 5999 6000 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 6001 u8 active_pipes) 6002 { 6003 const struct intel_crtc_state *crtc_state; 6004 struct intel_crtc *crtc; 6005 int i; 6006 6007 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6008 if (crtc_state->hw.active) 6009 active_pipes |= BIT(crtc->pipe); 6010 else 6011 active_pipes &= ~BIT(crtc->pipe); 6012 } 6013 6014 return active_pipes; 6015 } 6016 6017 static int intel_modeset_checks(struct intel_atomic_state *state) 6018 { 6019 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6020 6021 state->modeset = true; 6022 6023 if (IS_HASWELL(dev_priv)) 6024 return hsw_mode_set_planes_workaround(state); 6025 6026 return 0; 6027 } 6028 6029 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 6030 struct intel_crtc_state *new_crtc_state) 6031 { 6032 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6033 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 6034 6035 /* only allow LRR when the timings stay within the VRR range */ 6036 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range) 6037 new_crtc_state->update_lrr = false; 6038 6039 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 6040 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n", 6041 crtc->base.base.id, crtc->base.name); 6042 else 6043 new_crtc_state->uapi.mode_changed = false; 6044 6045 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n, 6046 &new_crtc_state->dp_m_n)) 6047 new_crtc_state->update_m_n = false; 6048 6049 if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal && 6050 old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end)) 6051 new_crtc_state->update_lrr = false; 6052 6053 if (intel_crtc_needs_modeset(new_crtc_state)) 6054 intel_crtc_flag_modeset(new_crtc_state); 6055 else 6056 new_crtc_state->update_pipe = true; 6057 } 6058 6059 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 6060 struct intel_crtc *crtc, 6061 u8 plane_ids_mask) 6062 { 6063 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6064 struct intel_plane *plane; 6065 6066 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 6067 struct intel_plane_state *plane_state; 6068 6069 if ((plane_ids_mask & BIT(plane->id)) == 0) 6070 continue; 6071 6072 plane_state = intel_atomic_get_plane_state(state, plane); 6073 if (IS_ERR(plane_state)) 6074 return PTR_ERR(plane_state); 6075 } 6076 6077 return 0; 6078 } 6079 6080 int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 6081 struct intel_crtc *crtc) 6082 { 6083 const struct intel_crtc_state *old_crtc_state = 6084 intel_atomic_get_old_crtc_state(state, crtc); 6085 const struct intel_crtc_state *new_crtc_state = 6086 intel_atomic_get_new_crtc_state(state, crtc); 6087 6088 return intel_crtc_add_planes_to_state(state, crtc, 6089 old_crtc_state->enabled_planes | 6090 new_crtc_state->enabled_planes); 6091 } 6092 6093 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 6094 { 6095 /* See {hsw,vlv,ivb}_plane_ratio() */ 6096 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 6097 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 6098 IS_IVYBRIDGE(dev_priv); 6099 } 6100 6101 static int intel_crtc_add_joiner_planes(struct intel_atomic_state *state, 6102 struct intel_crtc *crtc, 6103 struct intel_crtc *other) 6104 { 6105 const struct intel_plane_state __maybe_unused *plane_state; 6106 struct intel_plane *plane; 6107 u8 plane_ids = 0; 6108 int i; 6109 6110 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 6111 if (plane->pipe == crtc->pipe) 6112 plane_ids |= BIT(plane->id); 6113 } 6114 6115 return intel_crtc_add_planes_to_state(state, other, plane_ids); 6116 } 6117 6118 static int intel_joiner_add_affected_planes(struct intel_atomic_state *state) 6119 { 6120 struct drm_i915_private *i915 = to_i915(state->base.dev); 6121 const struct intel_crtc_state *crtc_state; 6122 struct intel_crtc *crtc; 6123 int i; 6124 6125 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6126 struct intel_crtc *other; 6127 6128 for_each_intel_crtc_in_pipe_mask(&i915->drm, other, 6129 crtc_state->joiner_pipes) { 6130 int ret; 6131 6132 if (crtc == other) 6133 continue; 6134 6135 ret = intel_crtc_add_joiner_planes(state, crtc, other); 6136 if (ret) 6137 return ret; 6138 } 6139 } 6140 6141 return 0; 6142 } 6143 6144 static int intel_atomic_check_planes(struct intel_atomic_state *state) 6145 { 6146 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6147 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6148 struct intel_plane_state __maybe_unused *plane_state; 6149 struct intel_plane *plane; 6150 struct intel_crtc *crtc; 6151 int i, ret; 6152 6153 ret = icl_add_linked_planes(state); 6154 if (ret) 6155 return ret; 6156 6157 ret = intel_joiner_add_affected_planes(state); 6158 if (ret) 6159 return ret; 6160 6161 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 6162 ret = intel_plane_atomic_check(state, plane); 6163 if (ret) { 6164 drm_dbg_atomic(&dev_priv->drm, 6165 "[PLANE:%d:%s] atomic driver check failed\n", 6166 plane->base.base.id, plane->base.name); 6167 return ret; 6168 } 6169 } 6170 6171 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6172 new_crtc_state, i) { 6173 u8 old_active_planes, new_active_planes; 6174 6175 ret = icl_check_nv12_planes(state, crtc); 6176 if (ret) 6177 return ret; 6178 6179 /* 6180 * On some platforms the number of active planes affects 6181 * the planes' minimum cdclk calculation. Add such planes 6182 * to the state before we compute the minimum cdclk. 6183 */ 6184 if (!active_planes_affects_min_cdclk(dev_priv)) 6185 continue; 6186 6187 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 6188 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 6189 6190 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 6191 continue; 6192 6193 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 6194 if (ret) 6195 return ret; 6196 } 6197 6198 return 0; 6199 } 6200 6201 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 6202 { 6203 struct intel_crtc_state __maybe_unused *crtc_state; 6204 struct intel_crtc *crtc; 6205 int i; 6206 6207 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6208 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 6209 int ret; 6210 6211 ret = intel_crtc_atomic_check(state, crtc); 6212 if (ret) { 6213 drm_dbg_atomic(&i915->drm, 6214 "[CRTC:%d:%s] atomic driver check failed\n", 6215 crtc->base.base.id, crtc->base.name); 6216 return ret; 6217 } 6218 } 6219 6220 return 0; 6221 } 6222 6223 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 6224 u8 transcoders) 6225 { 6226 const struct intel_crtc_state *new_crtc_state; 6227 struct intel_crtc *crtc; 6228 int i; 6229 6230 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6231 if (new_crtc_state->hw.enable && 6232 transcoders & BIT(new_crtc_state->cpu_transcoder) && 6233 intel_crtc_needs_modeset(new_crtc_state)) 6234 return true; 6235 } 6236 6237 return false; 6238 } 6239 6240 static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 6241 u8 pipes) 6242 { 6243 const struct intel_crtc_state *new_crtc_state; 6244 struct intel_crtc *crtc; 6245 int i; 6246 6247 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6248 if (new_crtc_state->hw.enable && 6249 pipes & BIT(crtc->pipe) && 6250 intel_crtc_needs_modeset(new_crtc_state)) 6251 return true; 6252 } 6253 6254 return false; 6255 } 6256 6257 static int intel_atomic_check_joiner(struct intel_atomic_state *state, 6258 struct intel_crtc *primary_crtc) 6259 { 6260 struct drm_i915_private *i915 = to_i915(state->base.dev); 6261 struct intel_crtc_state *primary_crtc_state = 6262 intel_atomic_get_new_crtc_state(state, primary_crtc); 6263 struct intel_crtc *secondary_crtc; 6264 6265 if (!primary_crtc_state->joiner_pipes) 6266 return 0; 6267 6268 /* sanity check */ 6269 if (drm_WARN_ON(&i915->drm, 6270 primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state))) 6271 return -EINVAL; 6272 6273 if (primary_crtc_state->joiner_pipes & ~joiner_pipes(i915)) { 6274 drm_dbg_kms(&i915->drm, 6275 "[CRTC:%d:%s] Cannot act as joiner primary " 6276 "(need 0x%x as pipes, only 0x%x possible)\n", 6277 primary_crtc->base.base.id, primary_crtc->base.name, 6278 primary_crtc_state->joiner_pipes, joiner_pipes(i915)); 6279 return -EINVAL; 6280 } 6281 6282 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc, 6283 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 6284 struct intel_crtc_state *secondary_crtc_state; 6285 int ret; 6286 6287 secondary_crtc_state = intel_atomic_get_crtc_state(&state->base, secondary_crtc); 6288 if (IS_ERR(secondary_crtc_state)) 6289 return PTR_ERR(secondary_crtc_state); 6290 6291 /* primary being enabled, secondary was already configured? */ 6292 if (secondary_crtc_state->uapi.enable) { 6293 drm_dbg_kms(&i915->drm, 6294 "[CRTC:%d:%s] secondary is enabled as normal CRTC, but " 6295 "[CRTC:%d:%s] claiming this CRTC for joiner.\n", 6296 secondary_crtc->base.base.id, secondary_crtc->base.name, 6297 primary_crtc->base.base.id, primary_crtc->base.name); 6298 return -EINVAL; 6299 } 6300 6301 /* 6302 * The state copy logic assumes the primary crtc gets processed 6303 * before the secondary crtc during the main compute_config loop. 6304 * This works because the crtcs are created in pipe order, 6305 * and the hardware requires primary pipe < secondary pipe as well. 6306 * Should that change we need to rethink the logic. 6307 */ 6308 if (WARN_ON(drm_crtc_index(&primary_crtc->base) > 6309 drm_crtc_index(&secondary_crtc->base))) 6310 return -EINVAL; 6311 6312 drm_dbg_kms(&i915->drm, 6313 "[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n", 6314 secondary_crtc->base.base.id, secondary_crtc->base.name, 6315 primary_crtc->base.base.id, primary_crtc->base.name); 6316 6317 secondary_crtc_state->joiner_pipes = 6318 primary_crtc_state->joiner_pipes; 6319 6320 ret = copy_joiner_crtc_state_modeset(state, secondary_crtc); 6321 if (ret) 6322 return ret; 6323 } 6324 6325 return 0; 6326 } 6327 6328 static void kill_joiner_secondaries(struct intel_atomic_state *state, 6329 struct intel_crtc *primary_crtc) 6330 { 6331 struct drm_i915_private *i915 = to_i915(state->base.dev); 6332 struct intel_crtc_state *primary_crtc_state = 6333 intel_atomic_get_new_crtc_state(state, primary_crtc); 6334 struct intel_crtc *secondary_crtc; 6335 6336 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc, 6337 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 6338 struct intel_crtc_state *secondary_crtc_state = 6339 intel_atomic_get_new_crtc_state(state, secondary_crtc); 6340 6341 secondary_crtc_state->joiner_pipes = 0; 6342 6343 intel_crtc_copy_uapi_to_hw_state_modeset(state, secondary_crtc); 6344 } 6345 6346 primary_crtc_state->joiner_pipes = 0; 6347 } 6348 6349 /** 6350 * DOC: asynchronous flip implementation 6351 * 6352 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 6353 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 6354 * Correspondingly, support is currently added for primary plane only. 6355 * 6356 * Async flip can only change the plane surface address, so anything else 6357 * changing is rejected from the intel_async_flip_check_hw() function. 6358 * Once this check is cleared, flip done interrupt is enabled using 6359 * the intel_crtc_enable_flip_done() function. 6360 * 6361 * As soon as the surface address register is written, flip done interrupt is 6362 * generated and the requested events are sent to the usersapce in the interrupt 6363 * handler itself. The timestamp and sequence sent during the flip done event 6364 * correspond to the last vblank and have no relation to the actual time when 6365 * the flip done event was sent. 6366 */ 6367 static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 6368 struct intel_crtc *crtc) 6369 { 6370 struct drm_i915_private *i915 = to_i915(state->base.dev); 6371 const struct intel_crtc_state *new_crtc_state = 6372 intel_atomic_get_new_crtc_state(state, crtc); 6373 const struct intel_plane_state *old_plane_state; 6374 struct intel_plane_state *new_plane_state; 6375 struct intel_plane *plane; 6376 int i; 6377 6378 if (!new_crtc_state->uapi.async_flip) 6379 return 0; 6380 6381 if (!new_crtc_state->uapi.active) { 6382 drm_dbg_kms(&i915->drm, 6383 "[CRTC:%d:%s] not active\n", 6384 crtc->base.base.id, crtc->base.name); 6385 return -EINVAL; 6386 } 6387 6388 if (intel_crtc_needs_modeset(new_crtc_state)) { 6389 drm_dbg_kms(&i915->drm, 6390 "[CRTC:%d:%s] modeset required\n", 6391 crtc->base.base.id, crtc->base.name); 6392 return -EINVAL; 6393 } 6394 6395 /* 6396 * FIXME: joiner+async flip is busted currently. 6397 * Remove this check once the issues are fixed. 6398 */ 6399 if (new_crtc_state->joiner_pipes) { 6400 drm_dbg_kms(&i915->drm, 6401 "[CRTC:%d:%s] async flip disallowed with joiner\n", 6402 crtc->base.base.id, crtc->base.name); 6403 return -EINVAL; 6404 } 6405 6406 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6407 new_plane_state, i) { 6408 if (plane->pipe != crtc->pipe) 6409 continue; 6410 6411 /* 6412 * TODO: Async flip is only supported through the page flip IOCTL 6413 * as of now. So support currently added for primary plane only. 6414 * Support for other planes on platforms on which supports 6415 * this(vlv/chv and icl+) should be added when async flip is 6416 * enabled in the atomic IOCTL path. 6417 */ 6418 if (!plane->async_flip) { 6419 drm_dbg_kms(&i915->drm, 6420 "[PLANE:%d:%s] async flip not supported\n", 6421 plane->base.base.id, plane->base.name); 6422 return -EINVAL; 6423 } 6424 6425 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 6426 drm_dbg_kms(&i915->drm, 6427 "[PLANE:%d:%s] no old or new framebuffer\n", 6428 plane->base.base.id, plane->base.name); 6429 return -EINVAL; 6430 } 6431 } 6432 6433 return 0; 6434 } 6435 6436 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 6437 { 6438 struct drm_i915_private *i915 = to_i915(state->base.dev); 6439 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6440 const struct intel_plane_state *new_plane_state, *old_plane_state; 6441 struct intel_plane *plane; 6442 int i; 6443 6444 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6445 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6446 6447 if (!new_crtc_state->uapi.async_flip) 6448 return 0; 6449 6450 if (!new_crtc_state->hw.active) { 6451 drm_dbg_kms(&i915->drm, 6452 "[CRTC:%d:%s] not active\n", 6453 crtc->base.base.id, crtc->base.name); 6454 return -EINVAL; 6455 } 6456 6457 if (intel_crtc_needs_modeset(new_crtc_state)) { 6458 drm_dbg_kms(&i915->drm, 6459 "[CRTC:%d:%s] modeset required\n", 6460 crtc->base.base.id, crtc->base.name); 6461 return -EINVAL; 6462 } 6463 6464 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 6465 drm_dbg_kms(&i915->drm, 6466 "[CRTC:%d:%s] Active planes cannot be in async flip\n", 6467 crtc->base.base.id, crtc->base.name); 6468 return -EINVAL; 6469 } 6470 6471 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6472 new_plane_state, i) { 6473 if (plane->pipe != crtc->pipe) 6474 continue; 6475 6476 /* 6477 * Only async flip capable planes should be in the state 6478 * if we're really about to ask the hardware to perform 6479 * an async flip. We should never get this far otherwise. 6480 */ 6481 if (drm_WARN_ON(&i915->drm, 6482 new_crtc_state->do_async_flip && !plane->async_flip)) 6483 return -EINVAL; 6484 6485 /* 6486 * Only check async flip capable planes other planes 6487 * may be involved in the initial commit due to 6488 * the wm0/ddb optimization. 6489 * 6490 * TODO maybe should track which planes actually 6491 * were requested to do the async flip... 6492 */ 6493 if (!plane->async_flip) 6494 continue; 6495 6496 /* 6497 * FIXME: This check is kept generic for all platforms. 6498 * Need to verify this for all gen9 platforms to enable 6499 * this selectively if required. 6500 */ 6501 switch (new_plane_state->hw.fb->modifier) { 6502 case DRM_FORMAT_MOD_LINEAR: 6503 /* 6504 * FIXME: Async on Linear buffer is supported on ICL as 6505 * but with additional alignment and fbc restrictions 6506 * need to be taken care of. These aren't applicable for 6507 * gen12+. 6508 */ 6509 if (DISPLAY_VER(i915) < 12) { 6510 drm_dbg_kms(&i915->drm, 6511 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n", 6512 plane->base.base.id, plane->base.name, 6513 new_plane_state->hw.fb->modifier, DISPLAY_VER(i915)); 6514 return -EINVAL; 6515 } 6516 break; 6517 6518 case I915_FORMAT_MOD_X_TILED: 6519 case I915_FORMAT_MOD_Y_TILED: 6520 case I915_FORMAT_MOD_Yf_TILED: 6521 case I915_FORMAT_MOD_4_TILED: 6522 case I915_FORMAT_MOD_4_TILED_BMG_CCS: 6523 case I915_FORMAT_MOD_4_TILED_LNL_CCS: 6524 break; 6525 default: 6526 drm_dbg_kms(&i915->drm, 6527 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n", 6528 plane->base.base.id, plane->base.name, 6529 new_plane_state->hw.fb->modifier); 6530 return -EINVAL; 6531 } 6532 6533 if (new_plane_state->hw.fb->format->num_planes > 1) { 6534 drm_dbg_kms(&i915->drm, 6535 "[PLANE:%d:%s] Planar formats do not support async flips\n", 6536 plane->base.base.id, plane->base.name); 6537 return -EINVAL; 6538 } 6539 6540 /* 6541 * We turn the first async flip request into a sync flip 6542 * so that we can reconfigure the plane (eg. change modifier). 6543 */ 6544 if (!new_crtc_state->do_async_flip) 6545 continue; 6546 6547 if (old_plane_state->view.color_plane[0].mapping_stride != 6548 new_plane_state->view.color_plane[0].mapping_stride) { 6549 drm_dbg_kms(&i915->drm, 6550 "[PLANE:%d:%s] Stride cannot be changed in async flip\n", 6551 plane->base.base.id, plane->base.name); 6552 return -EINVAL; 6553 } 6554 6555 if (old_plane_state->hw.fb->modifier != 6556 new_plane_state->hw.fb->modifier) { 6557 drm_dbg_kms(&i915->drm, 6558 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", 6559 plane->base.base.id, plane->base.name); 6560 return -EINVAL; 6561 } 6562 6563 if (old_plane_state->hw.fb->format != 6564 new_plane_state->hw.fb->format) { 6565 drm_dbg_kms(&i915->drm, 6566 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", 6567 plane->base.base.id, plane->base.name); 6568 return -EINVAL; 6569 } 6570 6571 if (old_plane_state->hw.rotation != 6572 new_plane_state->hw.rotation) { 6573 drm_dbg_kms(&i915->drm, 6574 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", 6575 plane->base.base.id, plane->base.name); 6576 return -EINVAL; 6577 } 6578 6579 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 6580 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 6581 drm_dbg_kms(&i915->drm, 6582 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", 6583 plane->base.base.id, plane->base.name); 6584 return -EINVAL; 6585 } 6586 6587 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 6588 drm_dbg_kms(&i915->drm, 6589 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", 6590 plane->base.base.id, plane->base.name); 6591 return -EINVAL; 6592 } 6593 6594 if (old_plane_state->hw.pixel_blend_mode != 6595 new_plane_state->hw.pixel_blend_mode) { 6596 drm_dbg_kms(&i915->drm, 6597 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", 6598 plane->base.base.id, plane->base.name); 6599 return -EINVAL; 6600 } 6601 6602 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 6603 drm_dbg_kms(&i915->drm, 6604 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", 6605 plane->base.base.id, plane->base.name); 6606 return -EINVAL; 6607 } 6608 6609 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 6610 drm_dbg_kms(&i915->drm, 6611 "[PLANE:%d:%s] Color range cannot be changed in async flip\n", 6612 plane->base.base.id, plane->base.name); 6613 return -EINVAL; 6614 } 6615 6616 /* plane decryption is allow to change only in synchronous flips */ 6617 if (old_plane_state->decrypt != new_plane_state->decrypt) { 6618 drm_dbg_kms(&i915->drm, 6619 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", 6620 plane->base.base.id, plane->base.name); 6621 return -EINVAL; 6622 } 6623 } 6624 6625 return 0; 6626 } 6627 6628 static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state) 6629 { 6630 struct drm_i915_private *i915 = to_i915(state->base.dev); 6631 struct intel_crtc_state *crtc_state; 6632 struct intel_crtc *crtc; 6633 u8 affected_pipes = 0; 6634 u8 modeset_pipes = 0; 6635 int i; 6636 6637 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6638 affected_pipes |= crtc_state->joiner_pipes; 6639 if (intel_crtc_needs_modeset(crtc_state)) 6640 modeset_pipes |= crtc_state->joiner_pipes; 6641 } 6642 6643 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { 6644 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6645 if (IS_ERR(crtc_state)) 6646 return PTR_ERR(crtc_state); 6647 } 6648 6649 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { 6650 int ret; 6651 6652 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6653 6654 crtc_state->uapi.mode_changed = true; 6655 6656 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6657 if (ret) 6658 return ret; 6659 6660 ret = intel_atomic_add_affected_planes(state, crtc); 6661 if (ret) 6662 return ret; 6663 } 6664 6665 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6666 /* Kill old joiner link, we may re-establish afterwards */ 6667 if (intel_crtc_needs_modeset(crtc_state) && 6668 intel_crtc_is_joiner_primary(crtc_state)) 6669 kill_joiner_secondaries(state, crtc); 6670 } 6671 6672 return 0; 6673 } 6674 6675 static int intel_atomic_check_config(struct intel_atomic_state *state, 6676 struct intel_link_bw_limits *limits, 6677 enum pipe *failed_pipe) 6678 { 6679 struct drm_i915_private *i915 = to_i915(state->base.dev); 6680 struct intel_crtc_state *new_crtc_state; 6681 struct intel_crtc *crtc; 6682 int ret; 6683 int i; 6684 6685 *failed_pipe = INVALID_PIPE; 6686 6687 ret = intel_joiner_add_affected_crtcs(state); 6688 if (ret) 6689 return ret; 6690 6691 ret = intel_fdi_add_affected_crtcs(state); 6692 if (ret) 6693 return ret; 6694 6695 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6696 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6697 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 6698 copy_joiner_crtc_state_nomodeset(state, crtc); 6699 else 6700 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 6701 continue; 6702 } 6703 6704 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6705 continue; 6706 6707 ret = intel_crtc_prepare_cleared_state(state, crtc); 6708 if (ret) 6709 goto fail; 6710 6711 if (!new_crtc_state->hw.enable) 6712 continue; 6713 6714 ret = intel_modeset_pipe_config(state, crtc, limits); 6715 if (ret) 6716 goto fail; 6717 } 6718 6719 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6720 if (!intel_crtc_needs_modeset(new_crtc_state)) 6721 continue; 6722 6723 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6724 continue; 6725 6726 if (!new_crtc_state->hw.enable) 6727 continue; 6728 6729 ret = intel_modeset_pipe_config_late(state, crtc); 6730 if (ret) 6731 goto fail; 6732 } 6733 6734 fail: 6735 if (ret) 6736 *failed_pipe = crtc->pipe; 6737 6738 return ret; 6739 } 6740 6741 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state) 6742 { 6743 struct intel_link_bw_limits new_limits; 6744 struct intel_link_bw_limits old_limits; 6745 int ret; 6746 6747 intel_link_bw_init_limits(state, &new_limits); 6748 old_limits = new_limits; 6749 6750 while (true) { 6751 enum pipe failed_pipe; 6752 6753 ret = intel_atomic_check_config(state, &new_limits, 6754 &failed_pipe); 6755 if (ret) { 6756 /* 6757 * The bpp limit for a pipe is below the minimum it supports, set the 6758 * limit to the minimum and recalculate the config. 6759 */ 6760 if (ret == -EINVAL && 6761 intel_link_bw_set_bpp_limit_for_pipe(state, 6762 &old_limits, 6763 &new_limits, 6764 failed_pipe)) 6765 continue; 6766 6767 break; 6768 } 6769 6770 old_limits = new_limits; 6771 6772 ret = intel_link_bw_atomic_check(state, &new_limits); 6773 if (ret != -EAGAIN) 6774 break; 6775 } 6776 6777 return ret; 6778 } 6779 /** 6780 * intel_atomic_check - validate state object 6781 * @dev: drm device 6782 * @_state: state to validate 6783 */ 6784 int intel_atomic_check(struct drm_device *dev, 6785 struct drm_atomic_state *_state) 6786 { 6787 struct intel_display *display = to_intel_display(dev); 6788 struct drm_i915_private *dev_priv = to_i915(dev); 6789 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6790 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6791 struct intel_crtc *crtc; 6792 int ret, i; 6793 bool any_ms = false; 6794 6795 if (!intel_display_driver_check_access(display)) 6796 return -ENODEV; 6797 6798 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6799 new_crtc_state, i) { 6800 /* 6801 * crtc's state no longer considered to be inherited 6802 * after the first userspace/client initiated commit. 6803 */ 6804 if (!state->internal) 6805 new_crtc_state->inherited = false; 6806 6807 if (new_crtc_state->inherited != old_crtc_state->inherited) 6808 new_crtc_state->uapi.mode_changed = true; 6809 6810 if (new_crtc_state->uapi.scaling_filter != 6811 old_crtc_state->uapi.scaling_filter) 6812 new_crtc_state->uapi.mode_changed = true; 6813 } 6814 6815 intel_vrr_check_modeset(state); 6816 6817 ret = drm_atomic_helper_check_modeset(dev, &state->base); 6818 if (ret) 6819 goto fail; 6820 6821 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6822 ret = intel_async_flip_check_uapi(state, crtc); 6823 if (ret) 6824 return ret; 6825 } 6826 6827 ret = intel_atomic_check_config_and_link(state); 6828 if (ret) 6829 goto fail; 6830 6831 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6832 if (!intel_crtc_needs_modeset(new_crtc_state)) 6833 continue; 6834 6835 if (intel_crtc_is_joiner_secondary(new_crtc_state)) { 6836 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable); 6837 continue; 6838 } 6839 6840 ret = intel_atomic_check_joiner(state, crtc); 6841 if (ret) 6842 goto fail; 6843 } 6844 6845 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6846 new_crtc_state, i) { 6847 if (!intel_crtc_needs_modeset(new_crtc_state)) 6848 continue; 6849 6850 intel_joiner_adjust_pipe_src(new_crtc_state); 6851 6852 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 6853 } 6854 6855 /** 6856 * Check if fastset is allowed by external dependencies like other 6857 * pipes and transcoders. 6858 * 6859 * Right now it only forces a fullmodeset when the MST master 6860 * transcoder did not changed but the pipe of the master transcoder 6861 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 6862 * in case of port synced crtcs, if one of the synced crtcs 6863 * needs a full modeset, all other synced crtcs should be 6864 * forced a full modeset. 6865 */ 6866 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6867 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6868 continue; 6869 6870 if (intel_dp_mst_crtc_needs_modeset(state, crtc)) 6871 intel_crtc_flag_modeset(new_crtc_state); 6872 6873 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6874 enum transcoder master = new_crtc_state->mst_master_transcoder; 6875 6876 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) 6877 intel_crtc_flag_modeset(new_crtc_state); 6878 } 6879 6880 if (is_trans_port_sync_mode(new_crtc_state)) { 6881 u8 trans = new_crtc_state->sync_mode_slaves_mask; 6882 6883 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6884 trans |= BIT(new_crtc_state->master_transcoder); 6885 6886 if (intel_cpu_transcoders_need_modeset(state, trans)) 6887 intel_crtc_flag_modeset(new_crtc_state); 6888 } 6889 6890 if (new_crtc_state->joiner_pipes) { 6891 if (intel_pipes_need_modeset(state, new_crtc_state->joiner_pipes)) 6892 intel_crtc_flag_modeset(new_crtc_state); 6893 } 6894 } 6895 6896 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6897 new_crtc_state, i) { 6898 if (!intel_crtc_needs_modeset(new_crtc_state)) 6899 continue; 6900 6901 any_ms = true; 6902 6903 intel_release_shared_dplls(state, crtc); 6904 } 6905 6906 if (any_ms && !check_digital_port_conflicts(state)) { 6907 drm_dbg_kms(&dev_priv->drm, 6908 "rejecting conflicting digital port configuration\n"); 6909 ret = -EINVAL; 6910 goto fail; 6911 } 6912 6913 ret = intel_atomic_check_planes(state); 6914 if (ret) 6915 goto fail; 6916 6917 ret = intel_compute_global_watermarks(state); 6918 if (ret) 6919 goto fail; 6920 6921 ret = intel_bw_atomic_check(state); 6922 if (ret) 6923 goto fail; 6924 6925 ret = intel_cdclk_atomic_check(state, &any_ms); 6926 if (ret) 6927 goto fail; 6928 6929 if (intel_any_crtc_needs_modeset(state)) 6930 any_ms = true; 6931 6932 if (any_ms) { 6933 ret = intel_modeset_checks(state); 6934 if (ret) 6935 goto fail; 6936 6937 ret = intel_modeset_calc_cdclk(state); 6938 if (ret) 6939 return ret; 6940 } 6941 6942 ret = intel_pmdemand_atomic_check(state); 6943 if (ret) 6944 goto fail; 6945 6946 ret = intel_atomic_check_crtcs(state); 6947 if (ret) 6948 goto fail; 6949 6950 ret = intel_fbc_atomic_check(state); 6951 if (ret) 6952 goto fail; 6953 6954 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6955 new_crtc_state, i) { 6956 intel_color_assert_luts(new_crtc_state); 6957 6958 ret = intel_async_flip_check_hw(state, crtc); 6959 if (ret) 6960 goto fail; 6961 6962 /* Either full modeset or fastset (or neither), never both */ 6963 drm_WARN_ON(&dev_priv->drm, 6964 intel_crtc_needs_modeset(new_crtc_state) && 6965 intel_crtc_needs_fastset(new_crtc_state)); 6966 6967 if (!intel_crtc_needs_modeset(new_crtc_state) && 6968 !intel_crtc_needs_fastset(new_crtc_state)) 6969 continue; 6970 6971 intel_crtc_state_dump(new_crtc_state, state, 6972 intel_crtc_needs_modeset(new_crtc_state) ? 6973 "modeset" : "fastset"); 6974 } 6975 6976 return 0; 6977 6978 fail: 6979 if (ret == -EDEADLK) 6980 return ret; 6981 6982 /* 6983 * FIXME would probably be nice to know which crtc specifically 6984 * caused the failure, in cases where we can pinpoint it. 6985 */ 6986 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6987 new_crtc_state, i) 6988 intel_crtc_state_dump(new_crtc_state, state, "failed"); 6989 6990 return ret; 6991 } 6992 6993 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 6994 { 6995 int ret; 6996 6997 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 6998 if (ret < 0) 6999 return ret; 7000 7001 return 0; 7002 } 7003 7004 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 7005 struct intel_crtc_state *crtc_state) 7006 { 7007 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7008 7009 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes) 7010 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7011 7012 if (crtc_state->has_pch_encoder) { 7013 enum pipe pch_transcoder = 7014 intel_crtc_pch_transcoder(crtc); 7015 7016 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 7017 } 7018 } 7019 7020 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 7021 const struct intel_crtc_state *new_crtc_state) 7022 { 7023 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 7024 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7025 7026 /* 7027 * Update pipe size and adjust fitter if needed: the reason for this is 7028 * that in compute_mode_changes we check the native mode (not the pfit 7029 * mode) to see if we can flip rather than do a full mode set. In the 7030 * fastboot case, we'll flip, but if we don't update the pipesrc and 7031 * pfit state, we'll end up with a big fb scanned out into the wrong 7032 * sized surface. 7033 */ 7034 intel_set_pipe_src_size(new_crtc_state); 7035 7036 /* on skylake this is done by detaching scalers */ 7037 if (DISPLAY_VER(dev_priv) >= 9) { 7038 if (new_crtc_state->pch_pfit.enabled) 7039 skl_pfit_enable(new_crtc_state); 7040 } else if (HAS_PCH_SPLIT(dev_priv)) { 7041 if (new_crtc_state->pch_pfit.enabled) 7042 ilk_pfit_enable(new_crtc_state); 7043 else if (old_crtc_state->pch_pfit.enabled) 7044 ilk_pfit_disable(old_crtc_state); 7045 } 7046 7047 /* 7048 * The register is supposedly single buffered so perhaps 7049 * not 100% correct to do this here. But SKL+ calculate 7050 * this based on the adjust pixel rate so pfit changes do 7051 * affect it and so it must be updated for fastsets. 7052 * HSW/BDW only really need this here for fastboot, after 7053 * that the value should not change without a full modeset. 7054 */ 7055 if (DISPLAY_VER(dev_priv) >= 9 || 7056 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 7057 hsw_set_linetime_wm(new_crtc_state); 7058 7059 if (new_crtc_state->update_m_n) 7060 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, 7061 &new_crtc_state->dp_m_n); 7062 7063 if (new_crtc_state->update_lrr) 7064 intel_set_transcoder_timings_lrr(new_crtc_state); 7065 } 7066 7067 static void commit_pipe_pre_planes(struct intel_atomic_state *state, 7068 struct intel_crtc *crtc) 7069 { 7070 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7071 const struct intel_crtc_state *old_crtc_state = 7072 intel_atomic_get_old_crtc_state(state, crtc); 7073 const struct intel_crtc_state *new_crtc_state = 7074 intel_atomic_get_new_crtc_state(state, crtc); 7075 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7076 7077 /* 7078 * During modesets pipe configuration was programmed as the 7079 * CRTC was enabled. 7080 */ 7081 if (!modeset && !new_crtc_state->use_dsb) { 7082 if (intel_crtc_needs_color_update(new_crtc_state)) 7083 intel_color_commit_arm(NULL, new_crtc_state); 7084 7085 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 7086 bdw_set_pipe_misc(NULL, new_crtc_state); 7087 7088 if (intel_crtc_needs_fastset(new_crtc_state)) 7089 intel_pipe_fastset(old_crtc_state, new_crtc_state); 7090 } 7091 7092 intel_psr2_program_trans_man_trk_ctl(new_crtc_state); 7093 7094 intel_atomic_update_watermarks(state, crtc); 7095 } 7096 7097 static void commit_pipe_post_planes(struct intel_atomic_state *state, 7098 struct intel_crtc *crtc) 7099 { 7100 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7101 const struct intel_crtc_state *new_crtc_state = 7102 intel_atomic_get_new_crtc_state(state, crtc); 7103 7104 /* 7105 * Disable the scaler(s) after the plane(s) so that we don't 7106 * get a catastrophic underrun even if the two operations 7107 * end up happening in two different frames. 7108 */ 7109 if (DISPLAY_VER(dev_priv) >= 9 && 7110 !intel_crtc_needs_modeset(new_crtc_state)) 7111 skl_detach_scalers(new_crtc_state); 7112 7113 if (intel_crtc_vrr_enabling(state, crtc)) 7114 intel_vrr_enable(new_crtc_state); 7115 } 7116 7117 static void intel_enable_crtc(struct intel_atomic_state *state, 7118 struct intel_crtc *crtc) 7119 { 7120 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7121 const struct intel_crtc_state *new_crtc_state = 7122 intel_atomic_get_new_crtc_state(state, crtc); 7123 struct intel_crtc *pipe_crtc; 7124 7125 if (!intel_crtc_needs_modeset(new_crtc_state)) 7126 return; 7127 7128 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, 7129 intel_crtc_joined_pipe_mask(new_crtc_state)) { 7130 const struct intel_crtc_state *pipe_crtc_state = 7131 intel_atomic_get_new_crtc_state(state, pipe_crtc); 7132 7133 /* VRR will be enable later, if required */ 7134 intel_crtc_update_active_timings(pipe_crtc_state, false); 7135 } 7136 7137 dev_priv->display.funcs.display->crtc_enable(state, crtc); 7138 7139 /* vblanks work again, re-enable pipe CRC. */ 7140 intel_crtc_enable_pipe_crc(crtc); 7141 } 7142 7143 static void intel_pre_update_crtc(struct intel_atomic_state *state, 7144 struct intel_crtc *crtc) 7145 { 7146 struct drm_i915_private *i915 = to_i915(state->base.dev); 7147 const struct intel_crtc_state *old_crtc_state = 7148 intel_atomic_get_old_crtc_state(state, crtc); 7149 struct intel_crtc_state *new_crtc_state = 7150 intel_atomic_get_new_crtc_state(state, crtc); 7151 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7152 7153 if (old_crtc_state->inherited || 7154 intel_crtc_needs_modeset(new_crtc_state)) { 7155 if (HAS_DPT(i915)) 7156 intel_dpt_configure(crtc); 7157 } 7158 7159 if (!modeset) { 7160 if (new_crtc_state->preload_luts && 7161 intel_crtc_needs_color_update(new_crtc_state)) 7162 intel_color_load_luts(new_crtc_state); 7163 7164 intel_pre_plane_update(state, crtc); 7165 7166 if (intel_crtc_needs_fastset(new_crtc_state)) 7167 intel_encoders_update_pipe(state, crtc); 7168 7169 if (DISPLAY_VER(i915) >= 11 && 7170 intel_crtc_needs_fastset(new_crtc_state)) 7171 icl_set_pipe_chicken(new_crtc_state); 7172 7173 if (vrr_params_changed(old_crtc_state, new_crtc_state) || 7174 cmrr_params_changed(old_crtc_state, new_crtc_state)) 7175 intel_vrr_set_transcoder_timings(new_crtc_state); 7176 } 7177 7178 intel_fbc_update(state, crtc); 7179 7180 drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); 7181 7182 if (!modeset && 7183 intel_crtc_needs_color_update(new_crtc_state) && 7184 !new_crtc_state->use_dsb) 7185 intel_color_commit_noarm(NULL, new_crtc_state); 7186 7187 if (!new_crtc_state->use_dsb) 7188 intel_crtc_planes_update_noarm(NULL, state, crtc); 7189 } 7190 7191 static void intel_update_crtc(struct intel_atomic_state *state, 7192 struct intel_crtc *crtc) 7193 { 7194 const struct intel_crtc_state *old_crtc_state = 7195 intel_atomic_get_old_crtc_state(state, crtc); 7196 struct intel_crtc_state *new_crtc_state = 7197 intel_atomic_get_new_crtc_state(state, crtc); 7198 7199 if (new_crtc_state->use_dsb) { 7200 intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event); 7201 7202 intel_dsb_commit(new_crtc_state->dsb_commit, false); 7203 } else { 7204 /* Perform vblank evasion around commit operation */ 7205 intel_pipe_update_start(state, crtc); 7206 7207 if (new_crtc_state->dsb_commit) 7208 intel_dsb_commit(new_crtc_state->dsb_commit, false); 7209 7210 commit_pipe_pre_planes(state, crtc); 7211 7212 intel_crtc_planes_update_arm(NULL, state, crtc); 7213 7214 commit_pipe_post_planes(state, crtc); 7215 7216 intel_pipe_update_end(state, crtc); 7217 } 7218 7219 /* 7220 * VRR/Seamless M/N update may need to update frame timings. 7221 * 7222 * FIXME Should be synchronized with the start of vblank somehow... 7223 */ 7224 if (intel_crtc_vrr_enabling(state, crtc) || 7225 new_crtc_state->update_m_n || new_crtc_state->update_lrr) 7226 intel_crtc_update_active_timings(new_crtc_state, 7227 new_crtc_state->vrr.enable); 7228 7229 /* 7230 * We usually enable FIFO underrun interrupts as part of the 7231 * CRTC enable sequence during modesets. But when we inherit a 7232 * valid pipe configuration from the BIOS we need to take care 7233 * of enabling them on the CRTC's first fastset. 7234 */ 7235 if (intel_crtc_needs_fastset(new_crtc_state) && 7236 old_crtc_state->inherited) 7237 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 7238 } 7239 7240 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 7241 struct intel_crtc *crtc) 7242 { 7243 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7244 const struct intel_crtc_state *old_crtc_state = 7245 intel_atomic_get_old_crtc_state(state, crtc); 7246 struct intel_crtc *pipe_crtc; 7247 7248 /* 7249 * We need to disable pipe CRC before disabling the pipe, 7250 * or we race against vblank off. 7251 */ 7252 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, 7253 intel_crtc_joined_pipe_mask(old_crtc_state)) 7254 intel_crtc_disable_pipe_crc(pipe_crtc); 7255 7256 dev_priv->display.funcs.display->crtc_disable(state, crtc); 7257 7258 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, 7259 intel_crtc_joined_pipe_mask(old_crtc_state)) { 7260 const struct intel_crtc_state *new_pipe_crtc_state = 7261 intel_atomic_get_new_crtc_state(state, pipe_crtc); 7262 7263 pipe_crtc->active = false; 7264 intel_fbc_disable(pipe_crtc); 7265 7266 if (!new_pipe_crtc_state->hw.active) 7267 intel_initial_watermarks(state, pipe_crtc); 7268 } 7269 } 7270 7271 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 7272 { 7273 struct drm_i915_private *i915 = to_i915(state->base.dev); 7274 const struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7275 struct intel_crtc *crtc; 7276 u8 disable_pipes = 0; 7277 int i; 7278 7279 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7280 new_crtc_state, i) { 7281 if (!intel_crtc_needs_modeset(new_crtc_state)) 7282 continue; 7283 7284 /* 7285 * Needs to be done even for pipes 7286 * that weren't enabled previously. 7287 */ 7288 intel_pre_plane_update(state, crtc); 7289 7290 if (!old_crtc_state->hw.active) 7291 continue; 7292 7293 disable_pipes |= BIT(crtc->pipe); 7294 } 7295 7296 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7297 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7298 continue; 7299 7300 intel_crtc_disable_planes(state, crtc); 7301 7302 drm_vblank_work_flush_all(&crtc->base); 7303 } 7304 7305 /* Only disable port sync and MST slaves */ 7306 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7307 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7308 continue; 7309 7310 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 7311 continue; 7312 7313 /* In case of Transcoder port Sync master slave CRTCs can be 7314 * assigned in any order and we need to make sure that 7315 * slave CRTCs are disabled first and then master CRTC since 7316 * Slave vblanks are masked till Master Vblanks. 7317 */ 7318 if (!is_trans_port_sync_slave(old_crtc_state) && 7319 !intel_dp_mst_is_slave_trans(old_crtc_state)) 7320 continue; 7321 7322 intel_old_crtc_state_disables(state, crtc); 7323 7324 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 7325 } 7326 7327 /* Disable everything else left on */ 7328 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7329 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7330 continue; 7331 7332 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 7333 continue; 7334 7335 intel_old_crtc_state_disables(state, crtc); 7336 7337 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 7338 } 7339 7340 drm_WARN_ON(&i915->drm, disable_pipes); 7341 } 7342 7343 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 7344 { 7345 struct intel_crtc_state *new_crtc_state; 7346 struct intel_crtc *crtc; 7347 int i; 7348 7349 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7350 if (!new_crtc_state->hw.active) 7351 continue; 7352 7353 intel_enable_crtc(state, crtc); 7354 intel_pre_update_crtc(state, crtc); 7355 } 7356 7357 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7358 if (!new_crtc_state->hw.active) 7359 continue; 7360 7361 intel_update_crtc(state, crtc); 7362 } 7363 } 7364 7365 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 7366 { 7367 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7368 struct intel_crtc *crtc; 7369 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7370 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 7371 u8 update_pipes = 0, modeset_pipes = 0; 7372 int i; 7373 7374 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7375 enum pipe pipe = crtc->pipe; 7376 7377 if (!new_crtc_state->hw.active) 7378 continue; 7379 7380 /* ignore allocations for crtc's that have been turned off. */ 7381 if (!intel_crtc_needs_modeset(new_crtc_state)) { 7382 entries[pipe] = old_crtc_state->wm.skl.ddb; 7383 update_pipes |= BIT(pipe); 7384 } else { 7385 modeset_pipes |= BIT(pipe); 7386 } 7387 } 7388 7389 /* 7390 * Whenever the number of active pipes changes, we need to make sure we 7391 * update the pipes in the right order so that their ddb allocations 7392 * never overlap with each other between CRTC updates. Otherwise we'll 7393 * cause pipe underruns and other bad stuff. 7394 * 7395 * So first lets enable all pipes that do not need a fullmodeset as 7396 * those don't have any external dependency. 7397 */ 7398 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7399 enum pipe pipe = crtc->pipe; 7400 7401 if ((update_pipes & BIT(pipe)) == 0) 7402 continue; 7403 7404 intel_pre_update_crtc(state, crtc); 7405 } 7406 7407 intel_dbuf_mbus_pre_ddb_update(state); 7408 7409 while (update_pipes) { 7410 /* 7411 * Commit in reverse order to make joiner primary 7412 * send the uapi events after secondaries are done. 7413 */ 7414 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, 7415 new_crtc_state, i) { 7416 enum pipe pipe = crtc->pipe; 7417 7418 if ((update_pipes & BIT(pipe)) == 0) 7419 continue; 7420 7421 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7422 entries, I915_MAX_PIPES, pipe)) 7423 continue; 7424 7425 entries[pipe] = new_crtc_state->wm.skl.ddb; 7426 update_pipes &= ~BIT(pipe); 7427 7428 intel_update_crtc(state, crtc); 7429 7430 /* 7431 * If this is an already active pipe, it's DDB changed, 7432 * and this isn't the last pipe that needs updating 7433 * then we need to wait for a vblank to pass for the 7434 * new ddb allocation to take effect. 7435 */ 7436 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 7437 &old_crtc_state->wm.skl.ddb) && 7438 (update_pipes | modeset_pipes)) 7439 intel_crtc_wait_for_next_vblank(crtc); 7440 } 7441 } 7442 7443 intel_dbuf_mbus_post_ddb_update(state); 7444 7445 update_pipes = modeset_pipes; 7446 7447 /* 7448 * Enable all pipes that needs a modeset and do not depends on other 7449 * pipes 7450 */ 7451 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7452 enum pipe pipe = crtc->pipe; 7453 7454 if ((modeset_pipes & BIT(pipe)) == 0) 7455 continue; 7456 7457 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7458 continue; 7459 7460 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 7461 is_trans_port_sync_master(new_crtc_state)) 7462 continue; 7463 7464 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7465 7466 intel_enable_crtc(state, crtc); 7467 } 7468 7469 /* 7470 * Then we enable all remaining pipes that depend on other 7471 * pipes: MST slaves and port sync masters 7472 */ 7473 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7474 enum pipe pipe = crtc->pipe; 7475 7476 if ((modeset_pipes & BIT(pipe)) == 0) 7477 continue; 7478 7479 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7480 continue; 7481 7482 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7483 7484 intel_enable_crtc(state, crtc); 7485 } 7486 7487 /* 7488 * Finally we do the plane updates/etc. for all pipes that got enabled. 7489 */ 7490 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7491 enum pipe pipe = crtc->pipe; 7492 7493 if ((update_pipes & BIT(pipe)) == 0) 7494 continue; 7495 7496 intel_pre_update_crtc(state, crtc); 7497 } 7498 7499 /* 7500 * Commit in reverse order to make joiner primary 7501 * send the uapi events after secondaries are done. 7502 */ 7503 for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) { 7504 enum pipe pipe = crtc->pipe; 7505 7506 if ((update_pipes & BIT(pipe)) == 0) 7507 continue; 7508 7509 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7510 entries, I915_MAX_PIPES, pipe)); 7511 7512 entries[pipe] = new_crtc_state->wm.skl.ddb; 7513 update_pipes &= ~BIT(pipe); 7514 7515 intel_update_crtc(state, crtc); 7516 } 7517 7518 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 7519 drm_WARN_ON(&dev_priv->drm, update_pipes); 7520 } 7521 7522 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 7523 { 7524 struct drm_i915_private *i915 = to_i915(intel_state->base.dev); 7525 struct drm_plane *plane; 7526 struct drm_plane_state *new_plane_state; 7527 int ret, i; 7528 7529 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { 7530 if (new_plane_state->fence) { 7531 ret = dma_fence_wait_timeout(new_plane_state->fence, false, 7532 i915_fence_timeout(i915)); 7533 if (ret <= 0) 7534 break; 7535 7536 dma_fence_put(new_plane_state->fence); 7537 new_plane_state->fence = NULL; 7538 } 7539 } 7540 } 7541 7542 static void intel_atomic_dsb_wait_commit(struct intel_crtc_state *crtc_state) 7543 { 7544 if (crtc_state->dsb_commit) 7545 intel_dsb_wait(crtc_state->dsb_commit); 7546 7547 intel_color_wait_commit(crtc_state); 7548 } 7549 7550 static void intel_atomic_dsb_cleanup(struct intel_crtc_state *crtc_state) 7551 { 7552 if (crtc_state->dsb_commit) { 7553 intel_dsb_cleanup(crtc_state->dsb_commit); 7554 crtc_state->dsb_commit = NULL; 7555 } 7556 7557 intel_color_cleanup_commit(crtc_state); 7558 } 7559 7560 static void intel_atomic_cleanup_work(struct work_struct *work) 7561 { 7562 struct intel_atomic_state *state = 7563 container_of(work, struct intel_atomic_state, cleanup_work); 7564 struct drm_i915_private *i915 = to_i915(state->base.dev); 7565 struct intel_crtc_state *old_crtc_state; 7566 struct intel_crtc *crtc; 7567 int i; 7568 7569 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) 7570 intel_atomic_dsb_cleanup(old_crtc_state); 7571 7572 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 7573 drm_atomic_helper_commit_cleanup_done(&state->base); 7574 drm_atomic_state_put(&state->base); 7575 } 7576 7577 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 7578 { 7579 struct drm_i915_private *i915 = to_i915(state->base.dev); 7580 struct intel_plane *plane; 7581 struct intel_plane_state *plane_state; 7582 int i; 7583 7584 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7585 struct drm_framebuffer *fb = plane_state->hw.fb; 7586 int cc_plane; 7587 int ret; 7588 7589 if (!fb) 7590 continue; 7591 7592 cc_plane = intel_fb_rc_ccs_cc_plane(fb); 7593 if (cc_plane < 0) 7594 continue; 7595 7596 /* 7597 * The layout of the fast clear color value expected by HW 7598 * (the DRM ABI requiring this value to be located in fb at 7599 * offset 0 of cc plane, plane #2 previous generations or 7600 * plane #1 for flat ccs): 7601 * - 4 x 4 bytes per-channel value 7602 * (in surface type specific float/int format provided by the fb user) 7603 * - 8 bytes native color value used by the display 7604 * (converted/written by GPU during a fast clear operation using the 7605 * above per-channel values) 7606 * 7607 * The commit's FB prepare hook already ensured that FB obj is pinned and the 7608 * caller made sure that the object is synced wrt. the related color clear value 7609 * GPU write on it. 7610 */ 7611 ret = intel_bo_read_from_page(intel_fb_bo(fb), 7612 fb->offsets[cc_plane] + 16, 7613 &plane_state->ccval, 7614 sizeof(plane_state->ccval)); 7615 /* The above could only fail if the FB obj has an unexpected backing store type. */ 7616 drm_WARN_ON(&i915->drm, ret); 7617 } 7618 } 7619 7620 static void intel_atomic_dsb_prepare(struct intel_atomic_state *state, 7621 struct intel_crtc *crtc) 7622 { 7623 intel_color_prepare_commit(state, crtc); 7624 } 7625 7626 static void intel_atomic_dsb_finish(struct intel_atomic_state *state, 7627 struct intel_crtc *crtc) 7628 { 7629 const struct intel_crtc_state *old_crtc_state = 7630 intel_atomic_get_old_crtc_state(state, crtc); 7631 struct intel_crtc_state *new_crtc_state = 7632 intel_atomic_get_new_crtc_state(state, crtc); 7633 7634 if (!new_crtc_state->hw.active) 7635 return; 7636 7637 if (state->base.legacy_cursor_update) 7638 return; 7639 7640 /* FIXME deal with everything */ 7641 new_crtc_state->use_dsb = 7642 new_crtc_state->update_planes && 7643 !new_crtc_state->vrr.enable && 7644 !new_crtc_state->do_async_flip && 7645 !new_crtc_state->has_psr && 7646 !new_crtc_state->scaler_state.scaler_users && 7647 !old_crtc_state->scaler_state.scaler_users && 7648 !intel_crtc_needs_modeset(new_crtc_state) && 7649 !intel_crtc_needs_fastset(new_crtc_state); 7650 7651 if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank) 7652 return; 7653 7654 /* 7655 * Rough estimate: 7656 * ~64 registers per each plane * 8 planes = 512 7657 * Double that for pipe stuff and other overhead. 7658 */ 7659 new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 7660 new_crtc_state->use_dsb ? 1024 : 16); 7661 if (!new_crtc_state->dsb_commit) { 7662 new_crtc_state->use_dsb = false; 7663 intel_color_cleanup_commit(new_crtc_state); 7664 return; 7665 } 7666 7667 if (new_crtc_state->use_dsb) { 7668 if (intel_crtc_needs_color_update(new_crtc_state)) 7669 intel_color_commit_noarm(new_crtc_state->dsb_commit, 7670 new_crtc_state); 7671 intel_crtc_planes_update_noarm(new_crtc_state->dsb_commit, 7672 state, crtc); 7673 7674 intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit); 7675 7676 if (intel_crtc_needs_color_update(new_crtc_state)) 7677 intel_color_commit_arm(new_crtc_state->dsb_commit, 7678 new_crtc_state); 7679 bdw_set_pipe_misc(new_crtc_state->dsb_commit, 7680 new_crtc_state); 7681 intel_crtc_planes_update_arm(new_crtc_state->dsb_commit, 7682 state, crtc); 7683 7684 if (!new_crtc_state->dsb_color_vblank) { 7685 intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1); 7686 intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit); 7687 intel_dsb_interrupt(new_crtc_state->dsb_commit); 7688 } 7689 } 7690 7691 if (new_crtc_state->dsb_color_vblank) 7692 intel_dsb_chain(state, new_crtc_state->dsb_commit, 7693 new_crtc_state->dsb_color_vblank, true); 7694 7695 intel_dsb_finish(new_crtc_state->dsb_commit); 7696 } 7697 7698 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 7699 { 7700 struct drm_device *dev = state->base.dev; 7701 struct drm_i915_private *dev_priv = to_i915(dev); 7702 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7703 struct intel_crtc *crtc; 7704 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; 7705 intel_wakeref_t wakeref = NULL; 7706 int i; 7707 7708 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7709 intel_atomic_dsb_prepare(state, crtc); 7710 7711 intel_atomic_commit_fence_wait(state); 7712 7713 intel_td_flush(dev_priv); 7714 7715 intel_atomic_prepare_plane_clear_colors(state); 7716 7717 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7718 intel_atomic_dsb_finish(state, crtc); 7719 7720 drm_atomic_helper_wait_for_dependencies(&state->base); 7721 drm_dp_mst_atomic_wait_for_dependencies(&state->base); 7722 intel_atomic_global_state_wait_for_dependencies(state); 7723 7724 /* 7725 * During full modesets we write a lot of registers, wait 7726 * for PLLs, etc. Doing that while DC states are enabled 7727 * is not a good idea. 7728 * 7729 * During fastsets and other updates we also need to 7730 * disable DC states due to the following scenario: 7731 * 1. DC5 exit and PSR exit happen 7732 * 2. Some or all _noarm() registers are written 7733 * 3. Due to some long delay PSR is re-entered 7734 * 4. DC5 entry -> DMC saves the already written new 7735 * _noarm() registers and the old not yet written 7736 * _arm() registers 7737 * 5. DC5 exit -> DMC restores a mixture of old and 7738 * new register values and arms the update 7739 * 6. PSR exit -> hardware latches a mixture of old and 7740 * new register values -> corrupted frame, or worse 7741 * 7. New _arm() registers are finally written 7742 * 8. Hardware finally latches a complete set of new 7743 * register values, and subsequent frames will be OK again 7744 * 7745 * Also note that due to the pipe CSC hardware issues on 7746 * SKL/GLK DC states must remain off until the pipe CSC 7747 * state readout has happened. Otherwise we risk corrupting 7748 * the CSC latched register values with the readout (see 7749 * skl_read_csc() and skl_color_commit_noarm()). 7750 */ 7751 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); 7752 7753 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7754 new_crtc_state, i) { 7755 if (intel_crtc_needs_modeset(new_crtc_state) || 7756 intel_crtc_needs_fastset(new_crtc_state)) 7757 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); 7758 } 7759 7760 intel_commit_modeset_disables(state); 7761 7762 intel_dp_tunnel_atomic_alloc_bw(state); 7763 7764 /* FIXME: Eventually get rid of our crtc->config pointer */ 7765 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7766 crtc->config = new_crtc_state; 7767 7768 /* 7769 * In XE_LPD+ Pmdemand combines many parameters such as voltage index, 7770 * plls, cdclk frequency, QGV point selection parameter etc. Voltage 7771 * index, cdclk/ddiclk frequencies are supposed to be configured before 7772 * the cdclk config is set. 7773 */ 7774 intel_pmdemand_pre_plane_update(state); 7775 7776 if (state->modeset) { 7777 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 7778 7779 intel_set_cdclk_pre_plane_update(state); 7780 7781 intel_modeset_verify_disabled(state); 7782 } 7783 7784 intel_sagv_pre_plane_update(state); 7785 7786 /* Complete the events for pipes that have now been disabled */ 7787 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7788 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7789 7790 /* Complete events for now disable pipes here. */ 7791 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 7792 spin_lock_irq(&dev->event_lock); 7793 drm_crtc_send_vblank_event(&crtc->base, 7794 new_crtc_state->uapi.event); 7795 spin_unlock_irq(&dev->event_lock); 7796 7797 new_crtc_state->uapi.event = NULL; 7798 } 7799 } 7800 7801 intel_encoders_update_prepare(state); 7802 7803 intel_dbuf_pre_plane_update(state); 7804 7805 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7806 if (new_crtc_state->do_async_flip) 7807 intel_crtc_enable_flip_done(state, crtc); 7808 } 7809 7810 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7811 dev_priv->display.funcs.display->commit_modeset_enables(state); 7812 7813 intel_program_dpkgc_latency(state); 7814 7815 if (state->modeset) 7816 intel_set_cdclk_post_plane_update(state); 7817 7818 intel_wait_for_vblank_workers(state); 7819 7820 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 7821 * already, but still need the state for the delayed optimization. To 7822 * fix this: 7823 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 7824 * - schedule that vblank worker _before_ calling hw_done 7825 * - at the start of commit_tail, cancel it _synchrously 7826 * - switch over to the vblank wait helper in the core after that since 7827 * we don't need out special handling any more. 7828 */ 7829 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 7830 7831 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7832 if (new_crtc_state->do_async_flip) 7833 intel_crtc_disable_flip_done(state, crtc); 7834 7835 intel_atomic_dsb_wait_commit(new_crtc_state); 7836 } 7837 7838 /* 7839 * Now that the vblank has passed, we can go ahead and program the 7840 * optimal watermarks on platforms that need two-step watermark 7841 * programming. 7842 * 7843 * TODO: Move this (and other cleanup) to an async worker eventually. 7844 */ 7845 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7846 new_crtc_state, i) { 7847 /* 7848 * Gen2 reports pipe underruns whenever all planes are disabled. 7849 * So re-enable underrun reporting after some planes get enabled. 7850 * 7851 * We do this before .optimize_watermarks() so that we have a 7852 * chance of catching underruns with the intermediate watermarks 7853 * vs. the new plane configuration. 7854 */ 7855 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 7856 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7857 7858 intel_optimize_watermarks(state, crtc); 7859 } 7860 7861 intel_dbuf_post_plane_update(state); 7862 7863 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7864 intel_post_plane_update(state, crtc); 7865 7866 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); 7867 7868 intel_modeset_verify_crtc(state, crtc); 7869 7870 intel_post_plane_update_after_readout(state, crtc); 7871 7872 /* 7873 * DSB cleanup is done in cleanup_work aligning with framebuffer 7874 * cleanup. So copy and reset the dsb structure to sync with 7875 * commit_done and later do dsb cleanup in cleanup_work. 7876 * 7877 * FIXME get rid of this funny new->old swapping 7878 */ 7879 old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank); 7880 old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit); 7881 } 7882 7883 /* Underruns don't always raise interrupts, so check manually */ 7884 intel_check_cpu_fifo_underruns(dev_priv); 7885 intel_check_pch_fifo_underruns(dev_priv); 7886 7887 if (state->modeset) 7888 intel_verify_planes(state); 7889 7890 intel_sagv_post_plane_update(state); 7891 intel_pmdemand_post_plane_update(state); 7892 7893 drm_atomic_helper_commit_hw_done(&state->base); 7894 intel_atomic_global_state_commit_done(state); 7895 7896 if (state->modeset) { 7897 /* As one of the primary mmio accessors, KMS has a high 7898 * likelihood of triggering bugs in unclaimed access. After we 7899 * finish modesetting, see if an error has been flagged, and if 7900 * so enable debugging for the next modeset - and hope we catch 7901 * the culprit. 7902 */ 7903 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 7904 } 7905 /* 7906 * Delay re-enabling DC states by 17 ms to avoid the off->on->off 7907 * toggling overhead at and above 60 FPS. 7908 */ 7909 intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17); 7910 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7911 7912 /* 7913 * Defer the cleanup of the old state to a separate worker to not 7914 * impede the current task (userspace for blocking modesets) that 7915 * are executed inline. For out-of-line asynchronous modesets/flips, 7916 * deferring to a new worker seems overkill, but we would place a 7917 * schedule point (cond_resched()) here anyway to keep latencies 7918 * down. 7919 */ 7920 INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work); 7921 queue_work(dev_priv->display.wq.cleanup, &state->cleanup_work); 7922 } 7923 7924 static void intel_atomic_commit_work(struct work_struct *work) 7925 { 7926 struct intel_atomic_state *state = 7927 container_of(work, struct intel_atomic_state, base.commit_work); 7928 7929 intel_atomic_commit_tail(state); 7930 } 7931 7932 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7933 { 7934 struct intel_plane_state *old_plane_state, *new_plane_state; 7935 struct intel_plane *plane; 7936 int i; 7937 7938 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7939 new_plane_state, i) 7940 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 7941 to_intel_frontbuffer(new_plane_state->hw.fb), 7942 plane->frontbuffer_bit); 7943 } 7944 7945 static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock) 7946 { 7947 int ret; 7948 7949 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 7950 if (ret) 7951 return ret; 7952 7953 ret = intel_atomic_global_state_setup_commit(state); 7954 if (ret) 7955 return ret; 7956 7957 return 0; 7958 } 7959 7960 static int intel_atomic_swap_state(struct intel_atomic_state *state) 7961 { 7962 int ret; 7963 7964 ret = drm_atomic_helper_swap_state(&state->base, true); 7965 if (ret) 7966 return ret; 7967 7968 intel_atomic_swap_global_state(state); 7969 7970 intel_shared_dpll_swap_state(state); 7971 7972 intel_atomic_track_fbs(state); 7973 7974 return 0; 7975 } 7976 7977 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, 7978 bool nonblock) 7979 { 7980 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7981 struct drm_i915_private *dev_priv = to_i915(dev); 7982 int ret = 0; 7983 7984 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 7985 7986 /* 7987 * The intel_legacy_cursor_update() fast path takes care 7988 * of avoiding the vblank waits for simple cursor 7989 * movement and flips. For cursor on/off and size changes, 7990 * we want to perform the vblank waits so that watermark 7991 * updates happen during the correct frames. Gen9+ have 7992 * double buffered watermarks and so shouldn't need this. 7993 * 7994 * Unset state->legacy_cursor_update before the call to 7995 * drm_atomic_helper_setup_commit() because otherwise 7996 * drm_atomic_helper_wait_for_flip_done() is a noop and 7997 * we get FIFO underruns because we didn't wait 7998 * for vblank. 7999 * 8000 * FIXME doing watermarks and fb cleanup from a vblank worker 8001 * (assuming we had any) would solve these problems. 8002 */ 8003 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) { 8004 struct intel_crtc_state *new_crtc_state; 8005 struct intel_crtc *crtc; 8006 int i; 8007 8008 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 8009 if (new_crtc_state->wm.need_postvbl_update || 8010 new_crtc_state->update_wm_post) 8011 state->base.legacy_cursor_update = false; 8012 } 8013 8014 ret = intel_atomic_prepare_commit(state); 8015 if (ret) { 8016 drm_dbg_atomic(&dev_priv->drm, 8017 "Preparing state failed with %i\n", ret); 8018 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 8019 return ret; 8020 } 8021 8022 ret = intel_atomic_setup_commit(state, nonblock); 8023 if (!ret) 8024 ret = intel_atomic_swap_state(state); 8025 8026 if (ret) { 8027 drm_atomic_helper_unprepare_planes(dev, &state->base); 8028 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 8029 return ret; 8030 } 8031 8032 drm_atomic_state_get(&state->base); 8033 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 8034 8035 if (nonblock && state->modeset) { 8036 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); 8037 } else if (nonblock) { 8038 queue_work(dev_priv->display.wq.flip, &state->base.commit_work); 8039 } else { 8040 if (state->modeset) 8041 flush_workqueue(dev_priv->display.wq.modeset); 8042 intel_atomic_commit_tail(state); 8043 } 8044 8045 return 0; 8046 } 8047 8048 /** 8049 * intel_plane_destroy - destroy a plane 8050 * @plane: plane to destroy 8051 * 8052 * Common destruction function for all types of planes (primary, cursor, 8053 * sprite). 8054 */ 8055 void intel_plane_destroy(struct drm_plane *plane) 8056 { 8057 drm_plane_cleanup(plane); 8058 kfree(to_intel_plane(plane)); 8059 } 8060 8061 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 8062 { 8063 struct drm_device *dev = encoder->base.dev; 8064 struct intel_encoder *source_encoder; 8065 u32 possible_clones = 0; 8066 8067 for_each_intel_encoder(dev, source_encoder) { 8068 if (encoders_cloneable(encoder, source_encoder)) 8069 possible_clones |= drm_encoder_mask(&source_encoder->base); 8070 } 8071 8072 return possible_clones; 8073 } 8074 8075 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 8076 { 8077 struct drm_device *dev = encoder->base.dev; 8078 struct intel_crtc *crtc; 8079 u32 possible_crtcs = 0; 8080 8081 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) 8082 possible_crtcs |= drm_crtc_mask(&crtc->base); 8083 8084 return possible_crtcs; 8085 } 8086 8087 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 8088 { 8089 if (!IS_MOBILE(dev_priv)) 8090 return false; 8091 8092 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 8093 return false; 8094 8095 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 8096 return false; 8097 8098 return true; 8099 } 8100 8101 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 8102 { 8103 if (DISPLAY_VER(dev_priv) >= 9) 8104 return false; 8105 8106 if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)) 8107 return false; 8108 8109 if (HAS_PCH_LPT_H(dev_priv) && 8110 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 8111 return false; 8112 8113 /* DDI E can't be used if DDI A requires 4 lanes */ 8114 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 8115 return false; 8116 8117 if (!dev_priv->display.vbt.int_crt_support) 8118 return false; 8119 8120 return true; 8121 } 8122 8123 bool assert_port_valid(struct drm_i915_private *i915, enum port port) 8124 { 8125 return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)), 8126 "Platform does not support port %c\n", port_name(port)); 8127 } 8128 8129 void intel_setup_outputs(struct drm_i915_private *dev_priv) 8130 { 8131 struct intel_display *display = &dev_priv->display; 8132 struct intel_encoder *encoder; 8133 bool dpd_is_edp = false; 8134 8135 intel_pps_unlock_regs_wa(display); 8136 8137 if (!HAS_DISPLAY(dev_priv)) 8138 return; 8139 8140 if (HAS_DDI(dev_priv)) { 8141 if (intel_ddi_crt_present(dev_priv)) 8142 intel_crt_init(display); 8143 8144 intel_bios_for_each_encoder(display, intel_ddi_init); 8145 8146 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 8147 vlv_dsi_init(dev_priv); 8148 } else if (HAS_PCH_SPLIT(dev_priv)) { 8149 int found; 8150 8151 /* 8152 * intel_edp_init_connector() depends on this completing first, 8153 * to prevent the registration of both eDP and LVDS and the 8154 * incorrect sharing of the PPS. 8155 */ 8156 intel_lvds_init(dev_priv); 8157 intel_crt_init(display); 8158 8159 dpd_is_edp = intel_dp_is_port_edp(display, PORT_D); 8160 8161 if (ilk_has_edp_a(dev_priv)) 8162 g4x_dp_init(dev_priv, DP_A, PORT_A); 8163 8164 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 8165 /* PCH SDVOB multiplex with HDMIB */ 8166 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 8167 if (!found) 8168 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 8169 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 8170 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B); 8171 } 8172 8173 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 8174 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 8175 8176 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 8177 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 8178 8179 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 8180 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C); 8181 8182 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 8183 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D); 8184 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 8185 bool has_edp, has_port; 8186 8187 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) 8188 intel_crt_init(display); 8189 8190 /* 8191 * The DP_DETECTED bit is the latched state of the DDC 8192 * SDA pin at boot. However since eDP doesn't require DDC 8193 * (no way to plug in a DP->HDMI dongle) the DDC pins for 8194 * eDP ports may have been muxed to an alternate function. 8195 * Thus we can't rely on the DP_DETECTED bit alone to detect 8196 * eDP ports. Consult the VBT as well as DP_DETECTED to 8197 * detect eDP ports. 8198 * 8199 * Sadly the straps seem to be missing sometimes even for HDMI 8200 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 8201 * and VBT for the presence of the port. Additionally we can't 8202 * trust the port type the VBT declares as we've seen at least 8203 * HDMI ports that the VBT claim are DP or eDP. 8204 */ 8205 has_edp = intel_dp_is_port_edp(display, PORT_B); 8206 has_port = intel_bios_is_port_present(display, PORT_B); 8207 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 8208 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B); 8209 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 8210 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 8211 8212 has_edp = intel_dp_is_port_edp(display, PORT_C); 8213 has_port = intel_bios_is_port_present(display, PORT_C); 8214 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 8215 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C); 8216 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 8217 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 8218 8219 if (IS_CHERRYVIEW(dev_priv)) { 8220 /* 8221 * eDP not supported on port D, 8222 * so no need to worry about it 8223 */ 8224 has_port = intel_bios_is_port_present(display, PORT_D); 8225 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 8226 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D); 8227 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 8228 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 8229 } 8230 8231 vlv_dsi_init(dev_priv); 8232 } else if (IS_PINEVIEW(dev_priv)) { 8233 intel_lvds_init(dev_priv); 8234 intel_crt_init(display); 8235 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { 8236 bool found = false; 8237 8238 if (IS_MOBILE(dev_priv)) 8239 intel_lvds_init(dev_priv); 8240 8241 intel_crt_init(display); 8242 8243 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 8244 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 8245 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 8246 if (!found && IS_G4X(dev_priv)) { 8247 drm_dbg_kms(&dev_priv->drm, 8248 "probing HDMI on SDVOB\n"); 8249 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 8250 } 8251 8252 if (!found && IS_G4X(dev_priv)) 8253 g4x_dp_init(dev_priv, DP_B, PORT_B); 8254 } 8255 8256 /* Before G4X SDVOC doesn't have its own detect register */ 8257 8258 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 8259 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 8260 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 8261 } 8262 8263 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 8264 8265 if (IS_G4X(dev_priv)) { 8266 drm_dbg_kms(&dev_priv->drm, 8267 "probing HDMI on SDVOC\n"); 8268 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 8269 } 8270 if (IS_G4X(dev_priv)) 8271 g4x_dp_init(dev_priv, DP_C, PORT_C); 8272 } 8273 8274 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 8275 g4x_dp_init(dev_priv, DP_D, PORT_D); 8276 8277 if (SUPPORTS_TV(dev_priv)) 8278 intel_tv_init(display); 8279 } else if (DISPLAY_VER(dev_priv) == 2) { 8280 if (IS_I85X(dev_priv)) 8281 intel_lvds_init(dev_priv); 8282 8283 intel_crt_init(display); 8284 intel_dvo_init(dev_priv); 8285 } 8286 8287 for_each_intel_encoder(&dev_priv->drm, encoder) { 8288 encoder->base.possible_crtcs = 8289 intel_encoder_possible_crtcs(encoder); 8290 encoder->base.possible_clones = 8291 intel_encoder_possible_clones(encoder); 8292 } 8293 8294 intel_init_pch_refclk(dev_priv); 8295 8296 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 8297 } 8298 8299 static int max_dotclock(struct drm_i915_private *i915) 8300 { 8301 struct intel_display *display = &i915->display; 8302 int max_dotclock = display->cdclk.max_dotclk_freq; 8303 8304 if (HAS_ULTRAJOINER(display)) 8305 max_dotclock *= 4; 8306 else if (HAS_UNCOMPRESSED_JOINER(display) || HAS_BIGJOINER(display)) 8307 max_dotclock *= 2; 8308 8309 return max_dotclock; 8310 } 8311 8312 enum drm_mode_status intel_mode_valid(struct drm_device *dev, 8313 const struct drm_display_mode *mode) 8314 { 8315 struct drm_i915_private *dev_priv = to_i915(dev); 8316 int hdisplay_max, htotal_max; 8317 int vdisplay_max, vtotal_max; 8318 8319 /* 8320 * Can't reject DBLSCAN here because Xorg ddxen can add piles 8321 * of DBLSCAN modes to the output's mode list when they detect 8322 * the scaling mode property on the connector. And they don't 8323 * ask the kernel to validate those modes in any way until 8324 * modeset time at which point the client gets a protocol error. 8325 * So in order to not upset those clients we silently ignore the 8326 * DBLSCAN flag on such connectors. For other connectors we will 8327 * reject modes with the DBLSCAN flag in encoder->compute_config(). 8328 * And we always reject DBLSCAN modes in connector->mode_valid() 8329 * as we never want such modes on the connector's mode list. 8330 */ 8331 8332 if (mode->vscan > 1) 8333 return MODE_NO_VSCAN; 8334 8335 if (mode->flags & DRM_MODE_FLAG_HSKEW) 8336 return MODE_H_ILLEGAL; 8337 8338 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 8339 DRM_MODE_FLAG_NCSYNC | 8340 DRM_MODE_FLAG_PCSYNC)) 8341 return MODE_HSYNC; 8342 8343 if (mode->flags & (DRM_MODE_FLAG_BCAST | 8344 DRM_MODE_FLAG_PIXMUX | 8345 DRM_MODE_FLAG_CLKDIV2)) 8346 return MODE_BAD; 8347 8348 /* 8349 * Reject clearly excessive dotclocks early to 8350 * avoid having to worry about huge integers later. 8351 */ 8352 if (mode->clock > max_dotclock(dev_priv)) 8353 return MODE_CLOCK_HIGH; 8354 8355 /* Transcoder timing limits */ 8356 if (DISPLAY_VER(dev_priv) >= 11) { 8357 hdisplay_max = 16384; 8358 vdisplay_max = 8192; 8359 htotal_max = 16384; 8360 vtotal_max = 8192; 8361 } else if (DISPLAY_VER(dev_priv) >= 9 || 8362 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 8363 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 8364 vdisplay_max = 4096; 8365 htotal_max = 8192; 8366 vtotal_max = 8192; 8367 } else if (DISPLAY_VER(dev_priv) >= 3) { 8368 hdisplay_max = 4096; 8369 vdisplay_max = 4096; 8370 htotal_max = 8192; 8371 vtotal_max = 8192; 8372 } else { 8373 hdisplay_max = 2048; 8374 vdisplay_max = 2048; 8375 htotal_max = 4096; 8376 vtotal_max = 4096; 8377 } 8378 8379 if (mode->hdisplay > hdisplay_max || 8380 mode->hsync_start > htotal_max || 8381 mode->hsync_end > htotal_max || 8382 mode->htotal > htotal_max) 8383 return MODE_H_ILLEGAL; 8384 8385 if (mode->vdisplay > vdisplay_max || 8386 mode->vsync_start > vtotal_max || 8387 mode->vsync_end > vtotal_max || 8388 mode->vtotal > vtotal_max) 8389 return MODE_V_ILLEGAL; 8390 8391 return MODE_OK; 8392 } 8393 8394 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv, 8395 const struct drm_display_mode *mode) 8396 { 8397 /* 8398 * Additional transcoder timing limits, 8399 * excluding BXT/GLK DSI transcoders. 8400 */ 8401 if (DISPLAY_VER(dev_priv) >= 5) { 8402 if (mode->hdisplay < 64 || 8403 mode->htotal - mode->hdisplay < 32) 8404 return MODE_H_ILLEGAL; 8405 8406 if (mode->vtotal - mode->vdisplay < 5) 8407 return MODE_V_ILLEGAL; 8408 } else { 8409 if (mode->htotal - mode->hdisplay < 32) 8410 return MODE_H_ILLEGAL; 8411 8412 if (mode->vtotal - mode->vdisplay < 3) 8413 return MODE_V_ILLEGAL; 8414 } 8415 8416 /* 8417 * Cantiga+ cannot handle modes with a hsync front porch of 0. 8418 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 8419 */ 8420 if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) && 8421 mode->hsync_start == mode->hdisplay) 8422 return MODE_H_ILLEGAL; 8423 8424 return MODE_OK; 8425 } 8426 8427 enum drm_mode_status 8428 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 8429 const struct drm_display_mode *mode, 8430 int num_joined_pipes) 8431 { 8432 int plane_width_max, plane_height_max; 8433 8434 /* 8435 * intel_mode_valid() should be 8436 * sufficient on older platforms. 8437 */ 8438 if (DISPLAY_VER(dev_priv) < 9) 8439 return MODE_OK; 8440 8441 /* 8442 * Most people will probably want a fullscreen 8443 * plane so let's not advertize modes that are 8444 * too big for that. 8445 */ 8446 if (DISPLAY_VER(dev_priv) >= 30) { 8447 plane_width_max = 6144 * num_joined_pipes; 8448 plane_height_max = 4800; 8449 } else if (DISPLAY_VER(dev_priv) >= 11) { 8450 plane_width_max = 5120 * num_joined_pipes; 8451 plane_height_max = 4320; 8452 } else { 8453 plane_width_max = 5120; 8454 plane_height_max = 4096; 8455 } 8456 8457 if (mode->hdisplay > plane_width_max) 8458 return MODE_H_ILLEGAL; 8459 8460 if (mode->vdisplay > plane_height_max) 8461 return MODE_V_ILLEGAL; 8462 8463 return MODE_OK; 8464 } 8465 8466 static const struct intel_display_funcs skl_display_funcs = { 8467 .get_pipe_config = hsw_get_pipe_config, 8468 .crtc_enable = hsw_crtc_enable, 8469 .crtc_disable = hsw_crtc_disable, 8470 .commit_modeset_enables = skl_commit_modeset_enables, 8471 .get_initial_plane_config = skl_get_initial_plane_config, 8472 .fixup_initial_plane_config = skl_fixup_initial_plane_config, 8473 }; 8474 8475 static const struct intel_display_funcs ddi_display_funcs = { 8476 .get_pipe_config = hsw_get_pipe_config, 8477 .crtc_enable = hsw_crtc_enable, 8478 .crtc_disable = hsw_crtc_disable, 8479 .commit_modeset_enables = intel_commit_modeset_enables, 8480 .get_initial_plane_config = i9xx_get_initial_plane_config, 8481 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8482 }; 8483 8484 static const struct intel_display_funcs pch_split_display_funcs = { 8485 .get_pipe_config = ilk_get_pipe_config, 8486 .crtc_enable = ilk_crtc_enable, 8487 .crtc_disable = ilk_crtc_disable, 8488 .commit_modeset_enables = intel_commit_modeset_enables, 8489 .get_initial_plane_config = i9xx_get_initial_plane_config, 8490 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8491 }; 8492 8493 static const struct intel_display_funcs vlv_display_funcs = { 8494 .get_pipe_config = i9xx_get_pipe_config, 8495 .crtc_enable = valleyview_crtc_enable, 8496 .crtc_disable = i9xx_crtc_disable, 8497 .commit_modeset_enables = intel_commit_modeset_enables, 8498 .get_initial_plane_config = i9xx_get_initial_plane_config, 8499 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8500 }; 8501 8502 static const struct intel_display_funcs i9xx_display_funcs = { 8503 .get_pipe_config = i9xx_get_pipe_config, 8504 .crtc_enable = i9xx_crtc_enable, 8505 .crtc_disable = i9xx_crtc_disable, 8506 .commit_modeset_enables = intel_commit_modeset_enables, 8507 .get_initial_plane_config = i9xx_get_initial_plane_config, 8508 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8509 }; 8510 8511 /** 8512 * intel_init_display_hooks - initialize the display modesetting hooks 8513 * @dev_priv: device private 8514 */ 8515 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 8516 { 8517 if (DISPLAY_VER(dev_priv) >= 9) { 8518 dev_priv->display.funcs.display = &skl_display_funcs; 8519 } else if (HAS_DDI(dev_priv)) { 8520 dev_priv->display.funcs.display = &ddi_display_funcs; 8521 } else if (HAS_PCH_SPLIT(dev_priv)) { 8522 dev_priv->display.funcs.display = &pch_split_display_funcs; 8523 } else if (IS_CHERRYVIEW(dev_priv) || 8524 IS_VALLEYVIEW(dev_priv)) { 8525 dev_priv->display.funcs.display = &vlv_display_funcs; 8526 } else { 8527 dev_priv->display.funcs.display = &i9xx_display_funcs; 8528 } 8529 } 8530 8531 int intel_initial_commit(struct drm_device *dev) 8532 { 8533 struct drm_atomic_state *state = NULL; 8534 struct drm_modeset_acquire_ctx ctx; 8535 struct intel_crtc *crtc; 8536 int ret = 0; 8537 8538 state = drm_atomic_state_alloc(dev); 8539 if (!state) 8540 return -ENOMEM; 8541 8542 drm_modeset_acquire_init(&ctx, 0); 8543 8544 state->acquire_ctx = &ctx; 8545 to_intel_atomic_state(state)->internal = true; 8546 8547 retry: 8548 for_each_intel_crtc(dev, crtc) { 8549 struct intel_crtc_state *crtc_state = 8550 intel_atomic_get_crtc_state(state, crtc); 8551 8552 if (IS_ERR(crtc_state)) { 8553 ret = PTR_ERR(crtc_state); 8554 goto out; 8555 } 8556 8557 if (crtc_state->hw.active) { 8558 struct intel_encoder *encoder; 8559 8560 ret = drm_atomic_add_affected_planes(state, &crtc->base); 8561 if (ret) 8562 goto out; 8563 8564 /* 8565 * FIXME hack to force a LUT update to avoid the 8566 * plane update forcing the pipe gamma on without 8567 * having a proper LUT loaded. Remove once we 8568 * have readout for pipe gamma enable. 8569 */ 8570 crtc_state->uapi.color_mgmt_changed = true; 8571 8572 for_each_intel_encoder_mask(dev, encoder, 8573 crtc_state->uapi.encoder_mask) { 8574 if (encoder->initial_fastset_check && 8575 !encoder->initial_fastset_check(encoder, crtc_state)) { 8576 ret = drm_atomic_add_affected_connectors(state, 8577 &crtc->base); 8578 if (ret) 8579 goto out; 8580 } 8581 } 8582 } 8583 } 8584 8585 ret = drm_atomic_commit(state); 8586 8587 out: 8588 if (ret == -EDEADLK) { 8589 drm_atomic_state_clear(state); 8590 drm_modeset_backoff(&ctx); 8591 goto retry; 8592 } 8593 8594 drm_atomic_state_put(state); 8595 8596 drm_modeset_drop_locks(&ctx); 8597 drm_modeset_acquire_fini(&ctx); 8598 8599 return ret; 8600 } 8601 8602 void i830_enable_pipe(struct intel_display *display, enum pipe pipe) 8603 { 8604 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8605 enum transcoder cpu_transcoder = (enum transcoder)pipe; 8606 /* 640x480@60Hz, ~25175 kHz */ 8607 struct dpll clock = { 8608 .m1 = 18, 8609 .m2 = 7, 8610 .p1 = 13, 8611 .p2 = 4, 8612 .n = 2, 8613 }; 8614 u32 dpll, fp; 8615 int i; 8616 8617 drm_WARN_ON(display->drm, 8618 i9xx_calc_dpll_params(48000, &clock) != 25154); 8619 8620 drm_dbg_kms(display->drm, 8621 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 8622 pipe_name(pipe), clock.vco, clock.dot); 8623 8624 fp = i9xx_dpll_compute_fp(&clock); 8625 dpll = DPLL_DVO_2X_MODE | 8626 DPLL_VGA_MODE_DIS | 8627 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 8628 PLL_P2_DIVIDE_BY_4 | 8629 PLL_REF_INPUT_DREFCLK | 8630 DPLL_VCO_ENABLE; 8631 8632 intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder), 8633 HACTIVE(640 - 1) | HTOTAL(800 - 1)); 8634 intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder), 8635 HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); 8636 intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder), 8637 HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); 8638 intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), 8639 VACTIVE(480 - 1) | VTOTAL(525 - 1)); 8640 intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), 8641 VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); 8642 intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder), 8643 VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); 8644 intel_de_write(display, PIPESRC(display, pipe), 8645 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); 8646 8647 intel_de_write(display, FP0(pipe), fp); 8648 intel_de_write(display, FP1(pipe), fp); 8649 8650 /* 8651 * Apparently we need to have VGA mode enabled prior to changing 8652 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 8653 * dividers, even though the register value does change. 8654 */ 8655 intel_de_write(display, DPLL(display, pipe), 8656 dpll & ~DPLL_VGA_MODE_DIS); 8657 intel_de_write(display, DPLL(display, pipe), dpll); 8658 8659 /* Wait for the clocks to stabilize. */ 8660 intel_de_posting_read(display, DPLL(display, pipe)); 8661 udelay(150); 8662 8663 /* The pixel multiplier can only be updated once the 8664 * DPLL is enabled and the clocks are stable. 8665 * 8666 * So write it again. 8667 */ 8668 intel_de_write(display, DPLL(display, pipe), dpll); 8669 8670 /* We do this three times for luck */ 8671 for (i = 0; i < 3 ; i++) { 8672 intel_de_write(display, DPLL(display, pipe), dpll); 8673 intel_de_posting_read(display, DPLL(display, pipe)); 8674 udelay(150); /* wait for warmup */ 8675 } 8676 8677 intel_de_write(display, TRANSCONF(display, pipe), TRANSCONF_ENABLE); 8678 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8679 8680 intel_wait_for_pipe_scanline_moving(crtc); 8681 } 8682 8683 void i830_disable_pipe(struct intel_display *display, enum pipe pipe) 8684 { 8685 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8686 8687 drm_dbg_kms(display->drm, "disabling pipe %c due to force quirk\n", 8688 pipe_name(pipe)); 8689 8690 drm_WARN_ON(display->drm, 8691 intel_de_read(display, DSPCNTR(display, PLANE_A)) & DISP_ENABLE); 8692 drm_WARN_ON(display->drm, 8693 intel_de_read(display, DSPCNTR(display, PLANE_B)) & DISP_ENABLE); 8694 drm_WARN_ON(display->drm, 8695 intel_de_read(display, DSPCNTR(display, PLANE_C)) & DISP_ENABLE); 8696 drm_WARN_ON(display->drm, 8697 intel_de_read(display, CURCNTR(display, PIPE_A)) & MCURSOR_MODE_MASK); 8698 drm_WARN_ON(display->drm, 8699 intel_de_read(display, CURCNTR(display, PIPE_B)) & MCURSOR_MODE_MASK); 8700 8701 intel_de_write(display, TRANSCONF(display, pipe), 0); 8702 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8703 8704 intel_wait_for_pipe_scanline_stopped(crtc); 8705 8706 intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS); 8707 intel_de_posting_read(display, DPLL(display, pipe)); 8708 } 8709 8710 void intel_hpd_poll_fini(struct drm_i915_private *i915) 8711 { 8712 struct intel_connector *connector; 8713 struct drm_connector_list_iter conn_iter; 8714 8715 /* Kill all the work that may have been queued by hpd. */ 8716 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 8717 for_each_intel_connector_iter(connector, &conn_iter) { 8718 if (connector->modeset_retry_work.func && 8719 cancel_work_sync(&connector->modeset_retry_work)) 8720 drm_connector_put(&connector->base); 8721 if (connector->hdcp.shim) { 8722 cancel_delayed_work_sync(&connector->hdcp.check_work); 8723 cancel_work_sync(&connector->hdcp.prop_work); 8724 } 8725 } 8726 drm_connector_list_iter_end(&conn_iter); 8727 } 8728 8729 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915) 8730 { 8731 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915); 8732 } 8733