1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dma-resv.h> 28 #include <linux/i2c.h> 29 #include <linux/input.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 #include <linux/string_helpers.h> 34 35 #include <drm/display/drm_dp_helper.h> 36 #include <drm/display/drm_dp_tunnel.h> 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_atomic_uapi.h> 40 #include <drm/drm_damage_helper.h> 41 #include <drm/drm_edid.h> 42 #include <drm/drm_fixed.h> 43 #include <drm/drm_fourcc.h> 44 #include <drm/drm_probe_helper.h> 45 #include <drm/drm_rect.h> 46 #include <drm/drm_vblank.h> 47 48 #include "g4x_dp.h" 49 #include "g4x_hdmi.h" 50 #include "hsw_ips.h" 51 #include "i915_config.h" 52 #include "i915_drv.h" 53 #include "i915_reg.h" 54 #include "i915_utils.h" 55 #include "i9xx_plane.h" 56 #include "i9xx_plane_regs.h" 57 #include "i9xx_wm.h" 58 #include "intel_atomic.h" 59 #include "intel_atomic_plane.h" 60 #include "intel_audio.h" 61 #include "intel_bo.h" 62 #include "intel_bw.h" 63 #include "intel_cdclk.h" 64 #include "intel_clock_gating.h" 65 #include "intel_color.h" 66 #include "intel_crt.h" 67 #include "intel_crtc.h" 68 #include "intel_crtc_state_dump.h" 69 #include "intel_cursor_regs.h" 70 #include "intel_cx0_phy.h" 71 #include "intel_cursor.h" 72 #include "intel_ddi.h" 73 #include "intel_de.h" 74 #include "intel_display_driver.h" 75 #include "intel_display_power.h" 76 #include "intel_display_types.h" 77 #include "intel_dmc.h" 78 #include "intel_dp.h" 79 #include "intel_dp_link_training.h" 80 #include "intel_dp_mst.h" 81 #include "intel_dp_tunnel.h" 82 #include "intel_dpll.h" 83 #include "intel_dpll_mgr.h" 84 #include "intel_dpt.h" 85 #include "intel_dpt_common.h" 86 #include "intel_drrs.h" 87 #include "intel_dsb.h" 88 #include "intel_dsi.h" 89 #include "intel_dvo.h" 90 #include "intel_fb.h" 91 #include "intel_fbc.h" 92 #include "intel_fdi.h" 93 #include "intel_fifo_underrun.h" 94 #include "intel_frontbuffer.h" 95 #include "intel_hdmi.h" 96 #include "intel_hotplug.h" 97 #include "intel_link_bw.h" 98 #include "intel_lvds.h" 99 #include "intel_lvds_regs.h" 100 #include "intel_modeset_setup.h" 101 #include "intel_modeset_verify.h" 102 #include "intel_overlay.h" 103 #include "intel_panel.h" 104 #include "intel_pch_display.h" 105 #include "intel_pch_refclk.h" 106 #include "intel_pcode.h" 107 #include "intel_pipe_crc.h" 108 #include "intel_plane_initial.h" 109 #include "intel_pmdemand.h" 110 #include "intel_pps.h" 111 #include "intel_psr.h" 112 #include "intel_psr_regs.h" 113 #include "intel_sdvo.h" 114 #include "intel_snps_phy.h" 115 #include "intel_tc.h" 116 #include "intel_tdf.h" 117 #include "intel_tv.h" 118 #include "intel_vblank.h" 119 #include "intel_vdsc.h" 120 #include "intel_vdsc_regs.h" 121 #include "intel_vga.h" 122 #include "intel_vrr.h" 123 #include "intel_wm.h" 124 #include "skl_scaler.h" 125 #include "skl_universal_plane.h" 126 #include "skl_universal_plane_regs.h" 127 #include "skl_watermark.h" 128 #include "vlv_dpio_phy_regs.h" 129 #include "vlv_dsi.h" 130 #include "vlv_dsi_pll.h" 131 #include "vlv_dsi_regs.h" 132 #include "vlv_sideband.h" 133 134 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 135 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 136 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 137 static void bdw_set_pipe_misc(struct intel_dsb *dsb, 138 const struct intel_crtc_state *crtc_state); 139 140 /* returns HPLL frequency in kHz */ 141 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 142 { 143 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 144 145 /* Obtain SKU information */ 146 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 147 CCK_FUSE_HPLL_FREQ_MASK; 148 149 return vco_freq[hpll_freq] * 1000; 150 } 151 152 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 153 const char *name, u32 reg, int ref_freq) 154 { 155 u32 val; 156 int divider; 157 158 val = vlv_cck_read(dev_priv, reg); 159 divider = val & CCK_FREQUENCY_VALUES; 160 161 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 162 (divider << CCK_FREQUENCY_STATUS_SHIFT), 163 "%s change in progress\n", name); 164 165 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 166 } 167 168 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 169 const char *name, u32 reg) 170 { 171 int hpll; 172 173 vlv_cck_get(dev_priv); 174 175 if (dev_priv->hpll_freq == 0) 176 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 177 178 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 179 180 vlv_cck_put(dev_priv); 181 182 return hpll; 183 } 184 185 void intel_update_czclk(struct drm_i915_private *dev_priv) 186 { 187 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 188 return; 189 190 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 191 CCK_CZ_CLOCK_CONTROL); 192 193 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 194 dev_priv->czclk_freq); 195 } 196 197 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 198 { 199 return (crtc_state->active_planes & 200 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 201 } 202 203 /* WA Display #0827: Gen9:all */ 204 static void 205 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 206 { 207 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 208 DUPS1_GATING_DIS | DUPS2_GATING_DIS, 209 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); 210 } 211 212 /* Wa_2006604312:icl,ehl */ 213 static void 214 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 215 bool enable) 216 { 217 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 218 DPFR_GATING_DIS, 219 enable ? DPFR_GATING_DIS : 0); 220 } 221 222 /* Wa_1604331009:icl,jsl,ehl */ 223 static void 224 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 225 bool enable) 226 { 227 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 228 CURSOR_GATING_DIS, 229 enable ? CURSOR_GATING_DIS : 0); 230 } 231 232 static bool 233 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 234 { 235 return crtc_state->master_transcoder != INVALID_TRANSCODER; 236 } 237 238 bool 239 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 240 { 241 return crtc_state->sync_mode_slaves_mask != 0; 242 } 243 244 bool 245 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 246 { 247 return is_trans_port_sync_master(crtc_state) || 248 is_trans_port_sync_slave(crtc_state); 249 } 250 251 static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state) 252 { 253 return ffs(crtc_state->joiner_pipes) - 1; 254 } 255 256 /* 257 * The following helper functions, despite being named for bigjoiner, 258 * are applicable to both bigjoiner and uncompressed joiner configurations. 259 */ 260 static bool is_bigjoiner(const struct intel_crtc_state *crtc_state) 261 { 262 return hweight8(crtc_state->joiner_pipes) >= 2; 263 } 264 265 static u8 bigjoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 266 { 267 if (!is_bigjoiner(crtc_state)) 268 return 0; 269 270 return crtc_state->joiner_pipes & (0b01010101 << joiner_primary_pipe(crtc_state)); 271 } 272 273 static unsigned int bigjoiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 274 { 275 if (!is_bigjoiner(crtc_state)) 276 return 0; 277 278 return crtc_state->joiner_pipes & (0b10101010 << joiner_primary_pipe(crtc_state)); 279 } 280 281 bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state) 282 { 283 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 284 285 if (!is_bigjoiner(crtc_state)) 286 return false; 287 288 return BIT(crtc->pipe) & bigjoiner_primary_pipes(crtc_state); 289 } 290 291 bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state) 292 { 293 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 294 295 if (!is_bigjoiner(crtc_state)) 296 return false; 297 298 return BIT(crtc->pipe) & bigjoiner_secondary_pipes(crtc_state); 299 } 300 301 u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state) 302 { 303 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 304 305 if (!is_bigjoiner(crtc_state)) 306 return BIT(crtc->pipe); 307 308 return bigjoiner_primary_pipes(crtc_state); 309 } 310 311 u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state) 312 { 313 return bigjoiner_secondary_pipes(crtc_state); 314 } 315 316 bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state) 317 { 318 return intel_crtc_num_joined_pipes(crtc_state) >= 4; 319 } 320 321 static u8 ultrajoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 322 { 323 if (!intel_crtc_is_ultrajoiner(crtc_state)) 324 return 0; 325 326 return crtc_state->joiner_pipes & (0b00010001 << joiner_primary_pipe(crtc_state)); 327 } 328 329 bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state) 330 { 331 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 332 333 return intel_crtc_is_ultrajoiner(crtc_state) && 334 BIT(crtc->pipe) & ultrajoiner_primary_pipes(crtc_state); 335 } 336 337 /* 338 * The ultrajoiner enable bit doesn't seem to follow primary/secondary logic or 339 * any other logic, so lets just add helper function to 340 * at least hide this hassle.. 341 */ 342 static u8 ultrajoiner_enable_pipes(const struct intel_crtc_state *crtc_state) 343 { 344 if (!intel_crtc_is_ultrajoiner(crtc_state)) 345 return 0; 346 347 return crtc_state->joiner_pipes & (0b01110111 << joiner_primary_pipe(crtc_state)); 348 } 349 350 bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state) 351 { 352 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 353 354 return intel_crtc_is_ultrajoiner(crtc_state) && 355 BIT(crtc->pipe) & ultrajoiner_enable_pipes(crtc_state); 356 } 357 358 u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 359 { 360 if (crtc_state->joiner_pipes) 361 return crtc_state->joiner_pipes & ~BIT(joiner_primary_pipe(crtc_state)); 362 else 363 return 0; 364 } 365 366 bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state) 367 { 368 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 369 370 return crtc_state->joiner_pipes && 371 crtc->pipe != joiner_primary_pipe(crtc_state); 372 } 373 374 bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state) 375 { 376 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 377 378 return crtc_state->joiner_pipes && 379 crtc->pipe == joiner_primary_pipe(crtc_state); 380 } 381 382 int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state) 383 { 384 return hweight8(intel_crtc_joined_pipe_mask(crtc_state)); 385 } 386 387 u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) 388 { 389 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 390 391 return BIT(crtc->pipe) | crtc_state->joiner_pipes; 392 } 393 394 struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state) 395 { 396 struct intel_display *display = to_intel_display(crtc_state); 397 398 if (intel_crtc_is_joiner_secondary(crtc_state)) 399 return intel_crtc_for_pipe(display, joiner_primary_pipe(crtc_state)); 400 else 401 return to_intel_crtc(crtc_state->uapi.crtc); 402 } 403 404 static void 405 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 406 { 407 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 408 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 409 410 if (DISPLAY_VER(dev_priv) >= 4) { 411 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 412 413 /* Wait for the Pipe State to go off */ 414 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), 415 TRANSCONF_STATE_ENABLE, 100)) 416 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); 417 } else { 418 intel_wait_for_pipe_scanline_stopped(crtc); 419 } 420 } 421 422 void assert_transcoder(struct drm_i915_private *dev_priv, 423 enum transcoder cpu_transcoder, bool state) 424 { 425 struct intel_display *display = &dev_priv->display; 426 bool cur_state; 427 enum intel_display_power_domain power_domain; 428 intel_wakeref_t wakeref; 429 430 /* we keep both pipes enabled on 830 */ 431 if (IS_I830(dev_priv)) 432 state = true; 433 434 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 435 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 436 if (wakeref) { 437 u32 val = intel_de_read(dev_priv, 438 TRANSCONF(dev_priv, cpu_transcoder)); 439 cur_state = !!(val & TRANSCONF_ENABLE); 440 441 intel_display_power_put(dev_priv, power_domain, wakeref); 442 } else { 443 cur_state = false; 444 } 445 446 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 447 "transcoder %s assertion failure (expected %s, current %s)\n", 448 transcoder_name(cpu_transcoder), str_on_off(state), 449 str_on_off(cur_state)); 450 } 451 452 static void assert_plane(struct intel_plane *plane, bool state) 453 { 454 struct intel_display *display = to_intel_display(plane->base.dev); 455 enum pipe pipe; 456 bool cur_state; 457 458 cur_state = plane->get_hw_state(plane, &pipe); 459 460 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 461 "%s assertion failure (expected %s, current %s)\n", 462 plane->base.name, str_on_off(state), 463 str_on_off(cur_state)); 464 } 465 466 #define assert_plane_enabled(p) assert_plane(p, true) 467 #define assert_plane_disabled(p) assert_plane(p, false) 468 469 static void assert_planes_disabled(struct intel_crtc *crtc) 470 { 471 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 472 struct intel_plane *plane; 473 474 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 475 assert_plane_disabled(plane); 476 } 477 478 void vlv_wait_port_ready(struct intel_display *display, 479 struct intel_digital_port *dig_port, 480 unsigned int expected_mask) 481 { 482 u32 port_mask; 483 i915_reg_t dpll_reg; 484 485 switch (dig_port->base.port) { 486 default: 487 MISSING_CASE(dig_port->base.port); 488 fallthrough; 489 case PORT_B: 490 port_mask = DPLL_PORTB_READY_MASK; 491 dpll_reg = DPLL(display, 0); 492 break; 493 case PORT_C: 494 port_mask = DPLL_PORTC_READY_MASK; 495 dpll_reg = DPLL(display, 0); 496 expected_mask <<= 4; 497 break; 498 case PORT_D: 499 port_mask = DPLL_PORTD_READY_MASK; 500 dpll_reg = DPIO_PHY_STATUS; 501 break; 502 } 503 504 if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000)) 505 drm_WARN(display->drm, 1, 506 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 507 dig_port->base.base.base.id, dig_port->base.base.name, 508 intel_de_read(display, dpll_reg) & port_mask, 509 expected_mask); 510 } 511 512 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 513 { 514 struct intel_display *display = to_intel_display(new_crtc_state); 515 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 517 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 518 enum pipe pipe = crtc->pipe; 519 u32 val; 520 521 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 522 523 assert_planes_disabled(crtc); 524 525 /* 526 * A pipe without a PLL won't actually be able to drive bits from 527 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 528 * need the check. 529 */ 530 if (HAS_GMCH(dev_priv)) { 531 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 532 assert_dsi_pll_enabled(dev_priv); 533 else 534 assert_pll_enabled(dev_priv, pipe); 535 } else { 536 if (new_crtc_state->has_pch_encoder) { 537 /* if driving the PCH, we need FDI enabled */ 538 assert_fdi_rx_pll_enabled(dev_priv, 539 intel_crtc_pch_transcoder(crtc)); 540 assert_fdi_tx_pll_enabled(dev_priv, 541 (enum pipe) cpu_transcoder); 542 } 543 /* FIXME: assert CPU port conditions for SNB+ */ 544 } 545 546 /* Wa_22012358565:adl-p */ 547 if (DISPLAY_VER(dev_priv) == 13) 548 intel_de_rmw(dev_priv, PIPE_ARB_CTL(dev_priv, pipe), 549 0, PIPE_ARB_USE_PROG_SLOTS); 550 551 if (DISPLAY_VER(dev_priv) >= 14) { 552 u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA; 553 u32 set = 0; 554 555 if (DISPLAY_VER(dev_priv) == 14) 556 set |= DP_FEC_BS_JITTER_WA; 557 558 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 559 clear, set); 560 } 561 562 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 563 if (val & TRANSCONF_ENABLE) { 564 /* we keep both pipes enabled on 830 */ 565 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 566 return; 567 } 568 569 /* Wa_1409098942:adlp+ */ 570 if (DISPLAY_VER(dev_priv) >= 13 && 571 new_crtc_state->dsc.compression_enable) { 572 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 573 val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK, 574 TRANSCONF_PIXEL_COUNT_SCALING_X4); 575 } 576 577 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), 578 val | TRANSCONF_ENABLE); 579 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 580 581 /* 582 * Until the pipe starts PIPEDSL reads will return a stale value, 583 * which causes an apparent vblank timestamp jump when PIPEDSL 584 * resets to its proper value. That also messes up the frame count 585 * when it's derived from the timestamps. So let's wait for the 586 * pipe to start properly before we call drm_crtc_vblank_on() 587 */ 588 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 589 intel_wait_for_pipe_scanline_moving(crtc); 590 } 591 592 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 593 { 594 struct intel_display *display = to_intel_display(old_crtc_state); 595 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 596 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 597 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 598 enum pipe pipe = crtc->pipe; 599 u32 val; 600 601 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 602 603 /* 604 * Make sure planes won't keep trying to pump pixels to us, 605 * or we might hang the display. 606 */ 607 assert_planes_disabled(crtc); 608 609 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 610 if ((val & TRANSCONF_ENABLE) == 0) 611 return; 612 613 /* 614 * Double wide has implications for planes 615 * so best keep it disabled when not needed. 616 */ 617 if (old_crtc_state->double_wide) 618 val &= ~TRANSCONF_DOUBLE_WIDE; 619 620 /* Don't disable pipe or pipe PLLs if needed */ 621 if (!IS_I830(dev_priv)) 622 val &= ~TRANSCONF_ENABLE; 623 624 /* Wa_1409098942:adlp+ */ 625 if (DISPLAY_VER(dev_priv) >= 13 && 626 old_crtc_state->dsc.compression_enable) 627 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 628 629 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 630 631 if (DISPLAY_VER(dev_priv) >= 12) 632 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 633 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 634 635 if ((val & TRANSCONF_ENABLE) == 0) 636 intel_wait_for_pipe_off(old_crtc_state); 637 } 638 639 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 640 { 641 unsigned int size = 0; 642 int i; 643 644 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 645 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; 646 647 return size; 648 } 649 650 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 651 { 652 unsigned int size = 0; 653 int i; 654 655 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 656 unsigned int plane_size; 657 658 if (rem_info->plane[i].linear) 659 plane_size = rem_info->plane[i].size; 660 else 661 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; 662 663 if (plane_size == 0) 664 continue; 665 666 if (rem_info->plane_alignment) 667 size = ALIGN(size, rem_info->plane_alignment); 668 669 size += plane_size; 670 } 671 672 return size; 673 } 674 675 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 676 { 677 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 678 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 679 680 return DISPLAY_VER(dev_priv) < 4 || 681 (plane->fbc && !plane_state->no_fbc_reason && 682 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); 683 } 684 685 /* 686 * Convert the x/y offsets into a linear offset. 687 * Only valid with 0/180 degree rotation, which is fine since linear 688 * offset is only used with linear buffers on pre-hsw and tiled buffers 689 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 690 */ 691 u32 intel_fb_xy_to_linear(int x, int y, 692 const struct intel_plane_state *state, 693 int color_plane) 694 { 695 const struct drm_framebuffer *fb = state->hw.fb; 696 unsigned int cpp = fb->format->cpp[color_plane]; 697 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; 698 699 return y * pitch + x * cpp; 700 } 701 702 /* 703 * Add the x/y offsets derived from fb->offsets[] to the user 704 * specified plane src x/y offsets. The resulting x/y offsets 705 * specify the start of scanout from the beginning of the gtt mapping. 706 */ 707 void intel_add_fb_offsets(int *x, int *y, 708 const struct intel_plane_state *state, 709 int color_plane) 710 711 { 712 *x += state->view.color_plane[color_plane].x; 713 *y += state->view.color_plane[color_plane].y; 714 } 715 716 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 717 u32 pixel_format, u64 modifier) 718 { 719 struct intel_crtc *crtc; 720 struct intel_plane *plane; 721 722 if (!HAS_DISPLAY(dev_priv)) 723 return 0; 724 725 /* 726 * We assume the primary plane for pipe A has 727 * the highest stride limits of them all, 728 * if in case pipe A is disabled, use the first pipe from pipe_mask. 729 */ 730 crtc = intel_first_crtc(dev_priv); 731 if (!crtc) 732 return 0; 733 734 plane = to_intel_plane(crtc->base.primary); 735 736 return plane->max_stride(plane, pixel_format, modifier, 737 DRM_MODE_ROTATE_0); 738 } 739 740 void intel_set_plane_visible(struct intel_crtc_state *crtc_state, 741 struct intel_plane_state *plane_state, 742 bool visible) 743 { 744 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 745 746 plane_state->uapi.visible = visible; 747 748 if (visible) 749 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 750 else 751 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 752 } 753 754 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) 755 { 756 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 757 struct drm_plane *plane; 758 759 /* 760 * Active_planes aliases if multiple "primary" or cursor planes 761 * have been used on the same (or wrong) pipe. plane_mask uses 762 * unique ids, hence we can use that to reconstruct active_planes. 763 */ 764 crtc_state->enabled_planes = 0; 765 crtc_state->active_planes = 0; 766 767 drm_for_each_plane_mask(plane, &dev_priv->drm, 768 crtc_state->uapi.plane_mask) { 769 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 770 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 771 } 772 } 773 774 void intel_plane_disable_noatomic(struct intel_crtc *crtc, 775 struct intel_plane *plane) 776 { 777 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 778 struct intel_crtc_state *crtc_state = 779 to_intel_crtc_state(crtc->base.state); 780 struct intel_plane_state *plane_state = 781 to_intel_plane_state(plane->base.state); 782 783 drm_dbg_kms(&dev_priv->drm, 784 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 785 plane->base.base.id, plane->base.name, 786 crtc->base.base.id, crtc->base.name); 787 788 intel_set_plane_visible(crtc_state, plane_state, false); 789 intel_plane_fixup_bitmasks(crtc_state); 790 crtc_state->data_rate[plane->id] = 0; 791 crtc_state->data_rate_y[plane->id] = 0; 792 crtc_state->rel_data_rate[plane->id] = 0; 793 crtc_state->rel_data_rate_y[plane->id] = 0; 794 crtc_state->min_cdclk[plane->id] = 0; 795 796 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 797 hsw_ips_disable(crtc_state)) { 798 crtc_state->ips_enabled = false; 799 intel_crtc_wait_for_next_vblank(crtc); 800 } 801 802 /* 803 * Vblank time updates from the shadow to live plane control register 804 * are blocked if the memory self-refresh mode is active at that 805 * moment. So to make sure the plane gets truly disabled, disable 806 * first the self-refresh mode. The self-refresh enable bit in turn 807 * will be checked/applied by the HW only at the next frame start 808 * event which is after the vblank start event, so we need to have a 809 * wait-for-vblank between disabling the plane and the pipe. 810 */ 811 if (HAS_GMCH(dev_priv) && 812 intel_set_memory_cxsr(dev_priv, false)) 813 intel_crtc_wait_for_next_vblank(crtc); 814 815 /* 816 * Gen2 reports pipe underruns whenever all planes are disabled. 817 * So disable underrun reporting before all the planes get disabled. 818 */ 819 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) 820 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 821 822 intel_plane_disable_arm(NULL, plane, crtc_state); 823 intel_crtc_wait_for_next_vblank(crtc); 824 } 825 826 unsigned int 827 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 828 { 829 int x = 0, y = 0; 830 831 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 832 plane_state->view.color_plane[0].offset, 0); 833 834 return y; 835 } 836 837 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 838 { 839 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 840 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 841 enum pipe pipe = crtc->pipe; 842 u32 tmp; 843 844 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 845 846 /* 847 * Display WA #1153: icl 848 * enable hardware to bypass the alpha math 849 * and rounding for per-pixel values 00 and 0xff 850 */ 851 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 852 /* 853 * Display WA # 1605353570: icl 854 * Set the pixel rounding bit to 1 for allowing 855 * passthrough of Frame buffer pixels unmodified 856 * across pipe 857 */ 858 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 859 860 /* 861 * Underrun recovery must always be disabled on display 13+. 862 * DG2 chicken bit meaning is inverted compared to other platforms. 863 */ 864 if (IS_DG2(dev_priv)) 865 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 866 else if ((DISPLAY_VER(dev_priv) >= 13) && (DISPLAY_VER(dev_priv) < 30)) 867 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 868 869 /* Wa_14010547955:dg2 */ 870 if (IS_DG2(dev_priv)) 871 tmp |= DG2_RENDER_CCSTAG_4_3_EN; 872 873 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 874 } 875 876 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 877 { 878 struct drm_crtc *crtc; 879 bool cleanup_done; 880 881 drm_for_each_crtc(crtc, &dev_priv->drm) { 882 struct drm_crtc_commit *commit; 883 spin_lock(&crtc->commit_lock); 884 commit = list_first_entry_or_null(&crtc->commit_list, 885 struct drm_crtc_commit, commit_entry); 886 cleanup_done = commit ? 887 try_wait_for_completion(&commit->cleanup_done) : true; 888 spin_unlock(&crtc->commit_lock); 889 890 if (cleanup_done) 891 continue; 892 893 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 894 895 return true; 896 } 897 898 return false; 899 } 900 901 /* 902 * Finds the encoder associated with the given CRTC. This can only be 903 * used when we know that the CRTC isn't feeding multiple encoders! 904 */ 905 struct intel_encoder * 906 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 907 const struct intel_crtc_state *crtc_state) 908 { 909 const struct drm_connector_state *connector_state; 910 const struct drm_connector *connector; 911 struct intel_encoder *encoder = NULL; 912 struct intel_crtc *primary_crtc; 913 int num_encoders = 0; 914 int i; 915 916 primary_crtc = intel_primary_crtc(crtc_state); 917 918 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 919 if (connector_state->crtc != &primary_crtc->base) 920 continue; 921 922 encoder = to_intel_encoder(connector_state->best_encoder); 923 num_encoders++; 924 } 925 926 drm_WARN(state->base.dev, num_encoders != 1, 927 "%d encoders for pipe %c\n", 928 num_encoders, pipe_name(primary_crtc->pipe)); 929 930 return encoder; 931 } 932 933 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 934 { 935 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 936 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 937 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 938 enum pipe pipe = crtc->pipe; 939 int width = drm_rect_width(dst); 940 int height = drm_rect_height(dst); 941 int x = dst->x1; 942 int y = dst->y1; 943 944 if (!crtc_state->pch_pfit.enabled) 945 return; 946 947 /* Force use of hard-coded filter coefficients 948 * as some pre-programmed values are broken, 949 * e.g. x201. 950 */ 951 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 952 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 953 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 954 else 955 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 956 PF_FILTER_MED_3x3); 957 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 958 PF_WIN_XPOS(x) | PF_WIN_YPOS(y)); 959 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 960 PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height)); 961 } 962 963 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 964 { 965 if (crtc->overlay) 966 (void) intel_overlay_switch_off(crtc->overlay); 967 968 /* Let userspace switch the overlay on again. In most cases userspace 969 * has to recompute where to put it anyway. 970 */ 971 } 972 973 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 974 { 975 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 976 977 if (!crtc_state->nv12_planes) 978 return false; 979 980 /* WA Display #0827: Gen9:all */ 981 if (DISPLAY_VER(dev_priv) == 9) 982 return true; 983 984 return false; 985 } 986 987 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 988 { 989 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 990 991 /* Wa_2006604312:icl,ehl */ 992 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) 993 return true; 994 995 return false; 996 } 997 998 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 999 { 1000 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1001 1002 /* Wa_1604331009:icl,jsl,ehl */ 1003 if (is_hdr_mode(crtc_state) && 1004 crtc_state->active_planes & BIT(PLANE_CURSOR) && 1005 DISPLAY_VER(dev_priv) == 11) 1006 return true; 1007 1008 return false; 1009 } 1010 1011 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, 1012 enum pipe pipe, bool enable) 1013 { 1014 if (DISPLAY_VER(i915) == 9) { 1015 /* 1016 * "Plane N strech max must be programmed to 11b (x1) 1017 * when Async flips are enabled on that plane." 1018 */ 1019 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 1020 SKL_PLANE1_STRETCH_MAX_MASK, 1021 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 1022 } else { 1023 /* Also needed on HSW/BDW albeit undocumented */ 1024 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 1025 HSW_PRI_STRETCH_MAX_MASK, 1026 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 1027 } 1028 } 1029 1030 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 1031 { 1032 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1033 1034 return crtc_state->uapi.async_flip && i915_vtd_active(i915) && 1035 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); 1036 } 1037 1038 static void intel_encoders_audio_enable(struct intel_atomic_state *state, 1039 struct intel_crtc *crtc) 1040 { 1041 const struct intel_crtc_state *crtc_state = 1042 intel_atomic_get_new_crtc_state(state, crtc); 1043 const struct drm_connector_state *conn_state; 1044 struct drm_connector *conn; 1045 int i; 1046 1047 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1048 struct intel_encoder *encoder = 1049 to_intel_encoder(conn_state->best_encoder); 1050 1051 if (conn_state->crtc != &crtc->base) 1052 continue; 1053 1054 if (encoder->audio_enable) 1055 encoder->audio_enable(encoder, crtc_state, conn_state); 1056 } 1057 } 1058 1059 static void intel_encoders_audio_disable(struct intel_atomic_state *state, 1060 struct intel_crtc *crtc) 1061 { 1062 const struct intel_crtc_state *old_crtc_state = 1063 intel_atomic_get_old_crtc_state(state, crtc); 1064 const struct drm_connector_state *old_conn_state; 1065 struct drm_connector *conn; 1066 int i; 1067 1068 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1069 struct intel_encoder *encoder = 1070 to_intel_encoder(old_conn_state->best_encoder); 1071 1072 if (old_conn_state->crtc != &crtc->base) 1073 continue; 1074 1075 if (encoder->audio_disable) 1076 encoder->audio_disable(encoder, old_crtc_state, old_conn_state); 1077 } 1078 } 1079 1080 #define is_enabling(feature, old_crtc_state, new_crtc_state) \ 1081 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ 1082 (new_crtc_state)->feature) 1083 #define is_disabling(feature, old_crtc_state, new_crtc_state) \ 1084 ((old_crtc_state)->feature && \ 1085 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) 1086 1087 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 1088 const struct intel_crtc_state *new_crtc_state) 1089 { 1090 if (!new_crtc_state->hw.active) 1091 return false; 1092 1093 return is_enabling(active_planes, old_crtc_state, new_crtc_state); 1094 } 1095 1096 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 1097 const struct intel_crtc_state *new_crtc_state) 1098 { 1099 if (!old_crtc_state->hw.active) 1100 return false; 1101 1102 return is_disabling(active_planes, old_crtc_state, new_crtc_state); 1103 } 1104 1105 static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, 1106 const struct intel_crtc_state *new_crtc_state) 1107 { 1108 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline || 1109 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || 1110 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || 1111 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || 1112 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full; 1113 } 1114 1115 static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state, 1116 const struct intel_crtc_state *new_crtc_state) 1117 { 1118 return old_crtc_state->cmrr.cmrr_m != new_crtc_state->cmrr.cmrr_m || 1119 old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n; 1120 } 1121 1122 static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state, 1123 struct intel_crtc *crtc) 1124 { 1125 const struct intel_crtc_state *old_crtc_state = 1126 intel_atomic_get_old_crtc_state(state, crtc); 1127 const struct intel_crtc_state *new_crtc_state = 1128 intel_atomic_get_new_crtc_state(state, crtc); 1129 1130 if (!new_crtc_state->hw.active) 1131 return false; 1132 1133 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) || 1134 (new_crtc_state->vrr.enable && 1135 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 1136 vrr_params_changed(old_crtc_state, new_crtc_state))); 1137 } 1138 1139 bool intel_crtc_vrr_disabling(struct intel_atomic_state *state, 1140 struct intel_crtc *crtc) 1141 { 1142 const struct intel_crtc_state *old_crtc_state = 1143 intel_atomic_get_old_crtc_state(state, crtc); 1144 const struct intel_crtc_state *new_crtc_state = 1145 intel_atomic_get_new_crtc_state(state, crtc); 1146 1147 if (!old_crtc_state->hw.active) 1148 return false; 1149 1150 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) || 1151 (old_crtc_state->vrr.enable && 1152 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 1153 vrr_params_changed(old_crtc_state, new_crtc_state))); 1154 } 1155 1156 static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, 1157 const struct intel_crtc_state *new_crtc_state) 1158 { 1159 if (!new_crtc_state->hw.active) 1160 return false; 1161 1162 return is_enabling(has_audio, old_crtc_state, new_crtc_state) || 1163 (new_crtc_state->has_audio && 1164 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1165 } 1166 1167 static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, 1168 const struct intel_crtc_state *new_crtc_state) 1169 { 1170 if (!old_crtc_state->hw.active) 1171 return false; 1172 1173 return is_disabling(has_audio, old_crtc_state, new_crtc_state) || 1174 (old_crtc_state->has_audio && 1175 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1176 } 1177 1178 #undef is_disabling 1179 #undef is_enabling 1180 1181 static void intel_post_plane_update(struct intel_atomic_state *state, 1182 struct intel_crtc *crtc) 1183 { 1184 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1185 const struct intel_crtc_state *old_crtc_state = 1186 intel_atomic_get_old_crtc_state(state, crtc); 1187 const struct intel_crtc_state *new_crtc_state = 1188 intel_atomic_get_new_crtc_state(state, crtc); 1189 enum pipe pipe = crtc->pipe; 1190 1191 intel_psr_post_plane_update(state, crtc); 1192 1193 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 1194 1195 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 1196 intel_update_watermarks(dev_priv); 1197 1198 intel_fbc_post_update(state, crtc); 1199 1200 if (needs_async_flip_vtd_wa(old_crtc_state) && 1201 !needs_async_flip_vtd_wa(new_crtc_state)) 1202 intel_async_flip_vtd_wa(dev_priv, pipe, false); 1203 1204 if (needs_nv12_wa(old_crtc_state) && 1205 !needs_nv12_wa(new_crtc_state)) 1206 skl_wa_827(dev_priv, pipe, false); 1207 1208 if (needs_scalerclk_wa(old_crtc_state) && 1209 !needs_scalerclk_wa(new_crtc_state)) 1210 icl_wa_scalerclkgating(dev_priv, pipe, false); 1211 1212 if (needs_cursorclk_wa(old_crtc_state) && 1213 !needs_cursorclk_wa(new_crtc_state)) 1214 icl_wa_cursorclkgating(dev_priv, pipe, false); 1215 1216 if (intel_crtc_needs_color_update(new_crtc_state)) 1217 intel_color_post_update(new_crtc_state); 1218 1219 if (audio_enabling(old_crtc_state, new_crtc_state)) 1220 intel_encoders_audio_enable(state, crtc); 1221 } 1222 1223 static void intel_post_plane_update_after_readout(struct intel_atomic_state *state, 1224 struct intel_crtc *crtc) 1225 { 1226 const struct intel_crtc_state *new_crtc_state = 1227 intel_atomic_get_new_crtc_state(state, crtc); 1228 1229 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ 1230 hsw_ips_post_update(state, crtc); 1231 1232 /* 1233 * Activate DRRS after state readout to avoid 1234 * dp_m_n vs. dp_m2_n2 confusion on BDW+. 1235 */ 1236 intel_drrs_activate(new_crtc_state); 1237 } 1238 1239 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 1240 struct intel_crtc *crtc) 1241 { 1242 const struct intel_crtc_state *crtc_state = 1243 intel_atomic_get_new_crtc_state(state, crtc); 1244 u8 update_planes = crtc_state->update_planes; 1245 const struct intel_plane_state __maybe_unused *plane_state; 1246 struct intel_plane *plane; 1247 int i; 1248 1249 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1250 if (plane->pipe == crtc->pipe && 1251 update_planes & BIT(plane->id)) 1252 plane->enable_flip_done(plane); 1253 } 1254 } 1255 1256 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 1257 struct intel_crtc *crtc) 1258 { 1259 const struct intel_crtc_state *crtc_state = 1260 intel_atomic_get_new_crtc_state(state, crtc); 1261 u8 update_planes = crtc_state->update_planes; 1262 const struct intel_plane_state __maybe_unused *plane_state; 1263 struct intel_plane *plane; 1264 int i; 1265 1266 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1267 if (plane->pipe == crtc->pipe && 1268 update_planes & BIT(plane->id)) 1269 plane->disable_flip_done(plane); 1270 } 1271 } 1272 1273 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 1274 struct intel_crtc *crtc) 1275 { 1276 const struct intel_crtc_state *old_crtc_state = 1277 intel_atomic_get_old_crtc_state(state, crtc); 1278 const struct intel_crtc_state *new_crtc_state = 1279 intel_atomic_get_new_crtc_state(state, crtc); 1280 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & 1281 ~new_crtc_state->async_flip_planes; 1282 const struct intel_plane_state *old_plane_state; 1283 struct intel_plane *plane; 1284 bool need_vbl_wait = false; 1285 int i; 1286 1287 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1288 if (plane->need_async_flip_toggle_wa && 1289 plane->pipe == crtc->pipe && 1290 disable_async_flip_planes & BIT(plane->id)) { 1291 /* 1292 * Apart from the async flip bit we want to 1293 * preserve the old state for the plane. 1294 */ 1295 intel_plane_async_flip(NULL, plane, 1296 old_crtc_state, old_plane_state, false); 1297 need_vbl_wait = true; 1298 } 1299 } 1300 1301 if (need_vbl_wait) 1302 intel_crtc_wait_for_next_vblank(crtc); 1303 } 1304 1305 static void intel_pre_plane_update(struct intel_atomic_state *state, 1306 struct intel_crtc *crtc) 1307 { 1308 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1309 const struct intel_crtc_state *old_crtc_state = 1310 intel_atomic_get_old_crtc_state(state, crtc); 1311 const struct intel_crtc_state *new_crtc_state = 1312 intel_atomic_get_new_crtc_state(state, crtc); 1313 enum pipe pipe = crtc->pipe; 1314 1315 if (intel_crtc_vrr_disabling(state, crtc)) { 1316 intel_vrr_disable(old_crtc_state); 1317 intel_crtc_update_active_timings(old_crtc_state, false); 1318 } 1319 1320 if (audio_disabling(old_crtc_state, new_crtc_state)) 1321 intel_encoders_audio_disable(state, crtc); 1322 1323 intel_drrs_deactivate(old_crtc_state); 1324 1325 intel_psr_pre_plane_update(state, crtc); 1326 1327 if (hsw_ips_pre_update(state, crtc)) 1328 intel_crtc_wait_for_next_vblank(crtc); 1329 1330 if (intel_fbc_pre_update(state, crtc)) 1331 intel_crtc_wait_for_next_vblank(crtc); 1332 1333 if (!needs_async_flip_vtd_wa(old_crtc_state) && 1334 needs_async_flip_vtd_wa(new_crtc_state)) 1335 intel_async_flip_vtd_wa(dev_priv, pipe, true); 1336 1337 /* Display WA 827 */ 1338 if (!needs_nv12_wa(old_crtc_state) && 1339 needs_nv12_wa(new_crtc_state)) 1340 skl_wa_827(dev_priv, pipe, true); 1341 1342 /* Wa_2006604312:icl,ehl */ 1343 if (!needs_scalerclk_wa(old_crtc_state) && 1344 needs_scalerclk_wa(new_crtc_state)) 1345 icl_wa_scalerclkgating(dev_priv, pipe, true); 1346 1347 /* Wa_1604331009:icl,jsl,ehl */ 1348 if (!needs_cursorclk_wa(old_crtc_state) && 1349 needs_cursorclk_wa(new_crtc_state)) 1350 icl_wa_cursorclkgating(dev_priv, pipe, true); 1351 1352 /* 1353 * Vblank time updates from the shadow to live plane control register 1354 * are blocked if the memory self-refresh mode is active at that 1355 * moment. So to make sure the plane gets truly disabled, disable 1356 * first the self-refresh mode. The self-refresh enable bit in turn 1357 * will be checked/applied by the HW only at the next frame start 1358 * event which is after the vblank start event, so we need to have a 1359 * wait-for-vblank between disabling the plane and the pipe. 1360 */ 1361 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1362 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 1363 intel_crtc_wait_for_next_vblank(crtc); 1364 1365 /* 1366 * IVB workaround: must disable low power watermarks for at least 1367 * one frame before enabling scaling. LP watermarks can be re-enabled 1368 * when scaling is disabled. 1369 * 1370 * WaCxSRDisabledForSpriteScaling:ivb 1371 */ 1372 if (!HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1373 new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv)) 1374 intel_crtc_wait_for_next_vblank(crtc); 1375 1376 /* 1377 * If we're doing a modeset we don't need to do any 1378 * pre-vblank watermark programming here. 1379 */ 1380 if (!intel_crtc_needs_modeset(new_crtc_state)) { 1381 /* 1382 * For platforms that support atomic watermarks, program the 1383 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1384 * will be the intermediate values that are safe for both pre- and 1385 * post- vblank; when vblank happens, the 'active' values will be set 1386 * to the final 'target' values and we'll do this again to get the 1387 * optimal watermarks. For gen9+ platforms, the values we program here 1388 * will be the final target values which will get automatically latched 1389 * at vblank time; no further programming will be necessary. 1390 * 1391 * If a platform hasn't been transitioned to atomic watermarks yet, 1392 * we'll continue to update watermarks the old way, if flags tell 1393 * us to. 1394 */ 1395 if (!intel_initial_watermarks(state, crtc)) 1396 if (new_crtc_state->update_wm_pre) 1397 intel_update_watermarks(dev_priv); 1398 } 1399 1400 /* 1401 * Gen2 reports pipe underruns whenever all planes are disabled. 1402 * So disable underrun reporting before all the planes get disabled. 1403 * 1404 * We do this after .initial_watermarks() so that we have a 1405 * chance of catching underruns with the intermediate watermarks 1406 * vs. the old plane configuration. 1407 */ 1408 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1409 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1410 1411 /* 1412 * WA for platforms where async address update enable bit 1413 * is double buffered and only latched at start of vblank. 1414 */ 1415 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) 1416 intel_crtc_async_flip_disable_wa(state, crtc); 1417 } 1418 1419 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1420 struct intel_crtc *crtc) 1421 { 1422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1423 const struct intel_crtc_state *new_crtc_state = 1424 intel_atomic_get_new_crtc_state(state, crtc); 1425 unsigned int update_mask = new_crtc_state->update_planes; 1426 const struct intel_plane_state *old_plane_state; 1427 struct intel_plane *plane; 1428 unsigned fb_bits = 0; 1429 int i; 1430 1431 intel_crtc_dpms_overlay_disable(crtc); 1432 1433 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1434 if (crtc->pipe != plane->pipe || 1435 !(update_mask & BIT(plane->id))) 1436 continue; 1437 1438 intel_plane_disable_arm(NULL, plane, new_crtc_state); 1439 1440 if (old_plane_state->uapi.visible) 1441 fb_bits |= plane->frontbuffer_bit; 1442 } 1443 1444 intel_frontbuffer_flip(dev_priv, fb_bits); 1445 } 1446 1447 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1448 { 1449 struct drm_i915_private *i915 = to_i915(state->base.dev); 1450 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1451 struct intel_crtc *crtc; 1452 int i; 1453 1454 /* 1455 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 1456 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 1457 */ 1458 if (i915->display.dpll.mgr) { 1459 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1460 if (intel_crtc_needs_modeset(new_crtc_state)) 1461 continue; 1462 1463 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 1464 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1465 } 1466 } 1467 } 1468 1469 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1470 struct intel_crtc *crtc) 1471 { 1472 const struct intel_crtc_state *crtc_state = 1473 intel_atomic_get_new_crtc_state(state, crtc); 1474 const struct drm_connector_state *conn_state; 1475 struct drm_connector *conn; 1476 int i; 1477 1478 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1479 struct intel_encoder *encoder = 1480 to_intel_encoder(conn_state->best_encoder); 1481 1482 if (conn_state->crtc != &crtc->base) 1483 continue; 1484 1485 if (encoder->pre_pll_enable) 1486 encoder->pre_pll_enable(state, encoder, 1487 crtc_state, conn_state); 1488 } 1489 } 1490 1491 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1492 struct intel_crtc *crtc) 1493 { 1494 const struct intel_crtc_state *crtc_state = 1495 intel_atomic_get_new_crtc_state(state, crtc); 1496 const struct drm_connector_state *conn_state; 1497 struct drm_connector *conn; 1498 int i; 1499 1500 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1501 struct intel_encoder *encoder = 1502 to_intel_encoder(conn_state->best_encoder); 1503 1504 if (conn_state->crtc != &crtc->base) 1505 continue; 1506 1507 if (encoder->pre_enable) 1508 encoder->pre_enable(state, encoder, 1509 crtc_state, conn_state); 1510 } 1511 } 1512 1513 static void intel_encoders_enable(struct intel_atomic_state *state, 1514 struct intel_crtc *crtc) 1515 { 1516 const struct intel_crtc_state *crtc_state = 1517 intel_atomic_get_new_crtc_state(state, crtc); 1518 const struct drm_connector_state *conn_state; 1519 struct drm_connector *conn; 1520 int i; 1521 1522 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1523 struct intel_encoder *encoder = 1524 to_intel_encoder(conn_state->best_encoder); 1525 1526 if (conn_state->crtc != &crtc->base) 1527 continue; 1528 1529 if (encoder->enable) 1530 encoder->enable(state, encoder, 1531 crtc_state, conn_state); 1532 intel_opregion_notify_encoder(encoder, true); 1533 } 1534 } 1535 1536 static void intel_encoders_disable(struct intel_atomic_state *state, 1537 struct intel_crtc *crtc) 1538 { 1539 const struct intel_crtc_state *old_crtc_state = 1540 intel_atomic_get_old_crtc_state(state, crtc); 1541 const struct drm_connector_state *old_conn_state; 1542 struct drm_connector *conn; 1543 int i; 1544 1545 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1546 struct intel_encoder *encoder = 1547 to_intel_encoder(old_conn_state->best_encoder); 1548 1549 if (old_conn_state->crtc != &crtc->base) 1550 continue; 1551 1552 intel_opregion_notify_encoder(encoder, false); 1553 if (encoder->disable) 1554 encoder->disable(state, encoder, 1555 old_crtc_state, old_conn_state); 1556 } 1557 } 1558 1559 static void intel_encoders_post_disable(struct intel_atomic_state *state, 1560 struct intel_crtc *crtc) 1561 { 1562 const struct intel_crtc_state *old_crtc_state = 1563 intel_atomic_get_old_crtc_state(state, crtc); 1564 const struct drm_connector_state *old_conn_state; 1565 struct drm_connector *conn; 1566 int i; 1567 1568 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1569 struct intel_encoder *encoder = 1570 to_intel_encoder(old_conn_state->best_encoder); 1571 1572 if (old_conn_state->crtc != &crtc->base) 1573 continue; 1574 1575 if (encoder->post_disable) 1576 encoder->post_disable(state, encoder, 1577 old_crtc_state, old_conn_state); 1578 } 1579 } 1580 1581 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1582 struct intel_crtc *crtc) 1583 { 1584 const struct intel_crtc_state *old_crtc_state = 1585 intel_atomic_get_old_crtc_state(state, crtc); 1586 const struct drm_connector_state *old_conn_state; 1587 struct drm_connector *conn; 1588 int i; 1589 1590 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1591 struct intel_encoder *encoder = 1592 to_intel_encoder(old_conn_state->best_encoder); 1593 1594 if (old_conn_state->crtc != &crtc->base) 1595 continue; 1596 1597 if (encoder->post_pll_disable) 1598 encoder->post_pll_disable(state, encoder, 1599 old_crtc_state, old_conn_state); 1600 } 1601 } 1602 1603 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1604 struct intel_crtc *crtc) 1605 { 1606 const struct intel_crtc_state *crtc_state = 1607 intel_atomic_get_new_crtc_state(state, crtc); 1608 const struct drm_connector_state *conn_state; 1609 struct drm_connector *conn; 1610 int i; 1611 1612 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1613 struct intel_encoder *encoder = 1614 to_intel_encoder(conn_state->best_encoder); 1615 1616 if (conn_state->crtc != &crtc->base) 1617 continue; 1618 1619 if (encoder->update_pipe) 1620 encoder->update_pipe(state, encoder, 1621 crtc_state, conn_state); 1622 } 1623 } 1624 1625 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1626 { 1627 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1628 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1629 1630 if (crtc_state->has_pch_encoder) { 1631 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1632 &crtc_state->fdi_m_n); 1633 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1634 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1635 &crtc_state->dp_m_n); 1636 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1637 &crtc_state->dp_m2_n2); 1638 } 1639 1640 intel_set_transcoder_timings(crtc_state); 1641 1642 ilk_set_pipeconf(crtc_state); 1643 } 1644 1645 static void ilk_crtc_enable(struct intel_atomic_state *state, 1646 struct intel_crtc *crtc) 1647 { 1648 const struct intel_crtc_state *new_crtc_state = 1649 intel_atomic_get_new_crtc_state(state, crtc); 1650 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1651 enum pipe pipe = crtc->pipe; 1652 1653 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1654 return; 1655 1656 /* 1657 * Sometimes spurious CPU pipe underruns happen during FDI 1658 * training, at least with VGA+HDMI cloning. Suppress them. 1659 * 1660 * On ILK we get an occasional spurious CPU pipe underruns 1661 * between eDP port A enable and vdd enable. Also PCH port 1662 * enable seems to result in the occasional CPU pipe underrun. 1663 * 1664 * Spurious PCH underruns also occur during PCH enabling. 1665 */ 1666 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1667 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1668 1669 ilk_configure_cpu_transcoder(new_crtc_state); 1670 1671 intel_set_pipe_src_size(new_crtc_state); 1672 1673 crtc->active = true; 1674 1675 intel_encoders_pre_enable(state, crtc); 1676 1677 if (new_crtc_state->has_pch_encoder) { 1678 ilk_pch_pre_enable(state, crtc); 1679 } else { 1680 assert_fdi_tx_disabled(dev_priv, pipe); 1681 assert_fdi_rx_disabled(dev_priv, pipe); 1682 } 1683 1684 ilk_pfit_enable(new_crtc_state); 1685 1686 /* 1687 * On ILK+ LUT must be loaded before the pipe is running but with 1688 * clocks enabled 1689 */ 1690 intel_color_modeset(new_crtc_state); 1691 1692 intel_initial_watermarks(state, crtc); 1693 intel_enable_transcoder(new_crtc_state); 1694 1695 if (new_crtc_state->has_pch_encoder) 1696 ilk_pch_enable(state, crtc); 1697 1698 intel_crtc_vblank_on(new_crtc_state); 1699 1700 intel_encoders_enable(state, crtc); 1701 1702 if (HAS_PCH_CPT(dev_priv)) 1703 intel_wait_for_pipe_scanline_moving(crtc); 1704 1705 /* 1706 * Must wait for vblank to avoid spurious PCH FIFO underruns. 1707 * And a second vblank wait is needed at least on ILK with 1708 * some interlaced HDMI modes. Let's do the double wait always 1709 * in case there are more corner cases we don't know about. 1710 */ 1711 if (new_crtc_state->has_pch_encoder) { 1712 intel_crtc_wait_for_next_vblank(crtc); 1713 intel_crtc_wait_for_next_vblank(crtc); 1714 } 1715 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1716 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1717 } 1718 1719 /* Display WA #1180: WaDisableScalarClockGating: glk */ 1720 static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state) 1721 { 1722 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1723 1724 return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled; 1725 } 1726 1727 static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable) 1728 { 1729 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1730 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1731 1732 intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe), 1733 mask, enable ? mask : 0); 1734 } 1735 1736 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1737 { 1738 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1739 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1740 1741 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 1742 HSW_LINETIME(crtc_state->linetime) | 1743 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1744 } 1745 1746 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1747 { 1748 struct intel_display *display = to_intel_display(crtc_state); 1749 1750 intel_de_rmw(display, CHICKEN_TRANS(display, crtc_state->cpu_transcoder), 1751 HSW_FRAME_START_DELAY_MASK, 1752 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1753 } 1754 1755 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1756 { 1757 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1758 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1759 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1760 1761 if (crtc_state->has_pch_encoder) { 1762 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1763 &crtc_state->fdi_m_n); 1764 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1765 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1766 &crtc_state->dp_m_n); 1767 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1768 &crtc_state->dp_m2_n2); 1769 } 1770 1771 intel_set_transcoder_timings(crtc_state); 1772 if (HAS_VRR(dev_priv)) 1773 intel_vrr_set_transcoder_timings(crtc_state); 1774 1775 if (cpu_transcoder != TRANSCODER_EDP) 1776 intel_de_write(dev_priv, TRANS_MULT(dev_priv, cpu_transcoder), 1777 crtc_state->pixel_multiplier - 1); 1778 1779 hsw_set_frame_start_delay(crtc_state); 1780 1781 hsw_set_transconf(crtc_state); 1782 } 1783 1784 static void hsw_crtc_enable(struct intel_atomic_state *state, 1785 struct intel_crtc *crtc) 1786 { 1787 struct intel_display *display = to_intel_display(state); 1788 const struct intel_crtc_state *new_crtc_state = 1789 intel_atomic_get_new_crtc_state(state, crtc); 1790 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1791 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1792 struct intel_crtc *pipe_crtc; 1793 int i; 1794 1795 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1796 return; 1797 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) 1798 intel_dmc_enable_pipe(display, pipe_crtc->pipe); 1799 1800 intel_encoders_pre_pll_enable(state, crtc); 1801 1802 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1803 const struct intel_crtc_state *pipe_crtc_state = 1804 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1805 1806 if (pipe_crtc_state->shared_dpll) 1807 intel_enable_shared_dpll(pipe_crtc_state); 1808 } 1809 1810 intel_encoders_pre_enable(state, crtc); 1811 1812 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1813 const struct intel_crtc_state *pipe_crtc_state = 1814 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1815 1816 intel_dsc_enable(pipe_crtc_state); 1817 1818 if (HAS_UNCOMPRESSED_JOINER(dev_priv)) 1819 intel_uncompressed_joiner_enable(pipe_crtc_state); 1820 1821 intel_set_pipe_src_size(pipe_crtc_state); 1822 1823 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 1824 bdw_set_pipe_misc(NULL, pipe_crtc_state); 1825 } 1826 1827 if (!transcoder_is_dsi(cpu_transcoder)) 1828 hsw_configure_cpu_transcoder(new_crtc_state); 1829 1830 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1831 const struct intel_crtc_state *pipe_crtc_state = 1832 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1833 1834 pipe_crtc->active = true; 1835 1836 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) 1837 glk_pipe_scaler_clock_gating_wa(pipe_crtc, true); 1838 1839 if (DISPLAY_VER(dev_priv) >= 9) 1840 skl_pfit_enable(pipe_crtc_state); 1841 else 1842 ilk_pfit_enable(pipe_crtc_state); 1843 1844 /* 1845 * On ILK+ LUT must be loaded before the pipe is running but with 1846 * clocks enabled 1847 */ 1848 intel_color_modeset(pipe_crtc_state); 1849 1850 hsw_set_linetime_wm(pipe_crtc_state); 1851 1852 if (DISPLAY_VER(dev_priv) >= 11) 1853 icl_set_pipe_chicken(pipe_crtc_state); 1854 1855 intel_initial_watermarks(state, pipe_crtc); 1856 } 1857 1858 intel_encoders_enable(state, crtc); 1859 1860 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1861 const struct intel_crtc_state *pipe_crtc_state = 1862 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1863 enum pipe hsw_workaround_pipe; 1864 1865 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) { 1866 intel_crtc_wait_for_next_vblank(pipe_crtc); 1867 glk_pipe_scaler_clock_gating_wa(pipe_crtc, false); 1868 } 1869 1870 /* 1871 * If we change the relative order between pipe/planes 1872 * enabling, we need to change the workaround. 1873 */ 1874 hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe; 1875 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 1876 struct intel_crtc *wa_crtc = 1877 intel_crtc_for_pipe(display, hsw_workaround_pipe); 1878 1879 intel_crtc_wait_for_next_vblank(wa_crtc); 1880 intel_crtc_wait_for_next_vblank(wa_crtc); 1881 } 1882 } 1883 } 1884 1885 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 1886 { 1887 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1888 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1889 enum pipe pipe = crtc->pipe; 1890 1891 /* To avoid upsetting the power well on haswell only disable the pfit if 1892 * it's in use. The hw state code will make sure we get this right. */ 1893 if (!old_crtc_state->pch_pfit.enabled) 1894 return; 1895 1896 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0); 1897 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0); 1898 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0); 1899 } 1900 1901 static void ilk_crtc_disable(struct intel_atomic_state *state, 1902 struct intel_crtc *crtc) 1903 { 1904 const struct intel_crtc_state *old_crtc_state = 1905 intel_atomic_get_old_crtc_state(state, crtc); 1906 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1907 enum pipe pipe = crtc->pipe; 1908 1909 /* 1910 * Sometimes spurious CPU pipe underruns happen when the 1911 * pipe is already disabled, but FDI RX/TX is still enabled. 1912 * Happens at least with VGA+HDMI cloning. Suppress them. 1913 */ 1914 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1915 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1916 1917 intel_encoders_disable(state, crtc); 1918 1919 intel_crtc_vblank_off(old_crtc_state); 1920 1921 intel_disable_transcoder(old_crtc_state); 1922 1923 ilk_pfit_disable(old_crtc_state); 1924 1925 if (old_crtc_state->has_pch_encoder) 1926 ilk_pch_disable(state, crtc); 1927 1928 intel_encoders_post_disable(state, crtc); 1929 1930 if (old_crtc_state->has_pch_encoder) 1931 ilk_pch_post_disable(state, crtc); 1932 1933 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1934 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1935 1936 intel_disable_shared_dpll(old_crtc_state); 1937 } 1938 1939 static void hsw_crtc_disable(struct intel_atomic_state *state, 1940 struct intel_crtc *crtc) 1941 { 1942 struct intel_display *display = to_intel_display(state); 1943 const struct intel_crtc_state *old_crtc_state = 1944 intel_atomic_get_old_crtc_state(state, crtc); 1945 struct intel_crtc *pipe_crtc; 1946 int i; 1947 1948 /* 1949 * FIXME collapse everything to one hook. 1950 * Need care with mst->ddi interactions. 1951 */ 1952 intel_encoders_disable(state, crtc); 1953 intel_encoders_post_disable(state, crtc); 1954 1955 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1956 const struct intel_crtc_state *old_pipe_crtc_state = 1957 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1958 1959 intel_disable_shared_dpll(old_pipe_crtc_state); 1960 } 1961 1962 intel_encoders_post_pll_disable(state, crtc); 1963 1964 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) 1965 intel_dmc_disable_pipe(display, pipe_crtc->pipe); 1966 } 1967 1968 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 1969 { 1970 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1971 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1972 1973 if (!crtc_state->gmch_pfit.control) 1974 return; 1975 1976 /* 1977 * The panel fitter should only be adjusted whilst the pipe is disabled, 1978 * according to register description and PRM. 1979 */ 1980 drm_WARN_ON(&dev_priv->drm, 1981 intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)) & PFIT_ENABLE); 1982 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 1983 1984 intel_de_write(dev_priv, PFIT_PGM_RATIOS(dev_priv), 1985 crtc_state->gmch_pfit.pgm_ratios); 1986 intel_de_write(dev_priv, PFIT_CONTROL(dev_priv), 1987 crtc_state->gmch_pfit.control); 1988 1989 /* Border color in case we don't scale up to the full screen. Black by 1990 * default, change to something else for debugging. */ 1991 intel_de_write(dev_priv, BCLRPAT(dev_priv, crtc->pipe), 0); 1992 } 1993 1994 /* Prefer intel_encoder_is_combo() */ 1995 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 1996 { 1997 if (phy == PHY_NONE) 1998 return false; 1999 else if (IS_ALDERLAKE_S(dev_priv)) 2000 return phy <= PHY_E; 2001 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 2002 return phy <= PHY_D; 2003 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) 2004 return phy <= PHY_C; 2005 else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12)) 2006 return phy <= PHY_B; 2007 else 2008 /* 2009 * DG2 outputs labelled as "combo PHY" in the bspec use 2010 * SNPS PHYs with completely different programming, 2011 * hence we always return false here. 2012 */ 2013 return false; 2014 } 2015 2016 /* Prefer intel_encoder_is_tc() */ 2017 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 2018 { 2019 /* 2020 * Discrete GPU phy's are not attached to FIA's to support TC 2021 * subsystem Legacy or non-legacy, and only support native DP/HDMI 2022 */ 2023 if (IS_DGFX(dev_priv)) 2024 return false; 2025 2026 if (DISPLAY_VER(dev_priv) >= 13) 2027 return phy >= PHY_F && phy <= PHY_I; 2028 else if (IS_TIGERLAKE(dev_priv)) 2029 return phy >= PHY_D && phy <= PHY_I; 2030 else if (IS_ICELAKE(dev_priv)) 2031 return phy >= PHY_C && phy <= PHY_F; 2032 2033 return false; 2034 } 2035 2036 /* Prefer intel_encoder_is_snps() */ 2037 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) 2038 { 2039 /* 2040 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port 2041 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). 2042 */ 2043 return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E; 2044 } 2045 2046 /* Prefer intel_encoder_to_phy() */ 2047 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 2048 { 2049 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) 2050 return PHY_D + port - PORT_D_XELPD; 2051 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) 2052 return PHY_F + port - PORT_TC1; 2053 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) 2054 return PHY_B + port - PORT_TC1; 2055 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) 2056 return PHY_C + port - PORT_TC1; 2057 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 2058 port == PORT_D) 2059 return PHY_A; 2060 2061 return PHY_A + port - PORT_A; 2062 } 2063 2064 /* Prefer intel_encoder_to_tc() */ 2065 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 2066 { 2067 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 2068 return TC_PORT_NONE; 2069 2070 if (DISPLAY_VER(dev_priv) >= 12) 2071 return TC_PORT_1 + port - PORT_TC1; 2072 else 2073 return TC_PORT_1 + port - PORT_C; 2074 } 2075 2076 enum phy intel_encoder_to_phy(struct intel_encoder *encoder) 2077 { 2078 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2079 2080 return intel_port_to_phy(i915, encoder->port); 2081 } 2082 2083 bool intel_encoder_is_combo(struct intel_encoder *encoder) 2084 { 2085 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2086 2087 return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder)); 2088 } 2089 2090 bool intel_encoder_is_snps(struct intel_encoder *encoder) 2091 { 2092 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2093 2094 return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder)); 2095 } 2096 2097 bool intel_encoder_is_tc(struct intel_encoder *encoder) 2098 { 2099 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2100 2101 return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder)); 2102 } 2103 2104 enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder) 2105 { 2106 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2107 2108 return intel_port_to_tc(i915, encoder->port); 2109 } 2110 2111 enum intel_display_power_domain 2112 intel_aux_power_domain(struct intel_digital_port *dig_port) 2113 { 2114 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 2115 2116 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 2117 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); 2118 2119 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 2120 } 2121 2122 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, 2123 struct intel_power_domain_mask *mask) 2124 { 2125 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2126 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2127 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2128 struct drm_encoder *encoder; 2129 enum pipe pipe = crtc->pipe; 2130 2131 bitmap_zero(mask->bits, POWER_DOMAIN_NUM); 2132 2133 if (!crtc_state->hw.active) 2134 return; 2135 2136 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); 2137 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); 2138 if (crtc_state->pch_pfit.enabled || 2139 crtc_state->pch_pfit.force_thru) 2140 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); 2141 2142 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 2143 crtc_state->uapi.encoder_mask) { 2144 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2145 2146 set_bit(intel_encoder->power_domain, mask->bits); 2147 } 2148 2149 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 2150 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); 2151 2152 if (crtc_state->shared_dpll) 2153 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); 2154 2155 if (crtc_state->dsc.compression_enable) 2156 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); 2157 } 2158 2159 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, 2160 struct intel_power_domain_mask *old_domains) 2161 { 2162 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2163 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2164 enum intel_display_power_domain domain; 2165 struct intel_power_domain_mask domains, new_domains; 2166 2167 get_crtc_power_domains(crtc_state, &domains); 2168 2169 bitmap_andnot(new_domains.bits, 2170 domains.bits, 2171 crtc->enabled_power_domains.mask.bits, 2172 POWER_DOMAIN_NUM); 2173 bitmap_andnot(old_domains->bits, 2174 crtc->enabled_power_domains.mask.bits, 2175 domains.bits, 2176 POWER_DOMAIN_NUM); 2177 2178 for_each_power_domain(domain, &new_domains) 2179 intel_display_power_get_in_set(dev_priv, 2180 &crtc->enabled_power_domains, 2181 domain); 2182 } 2183 2184 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, 2185 struct intel_power_domain_mask *domains) 2186 { 2187 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), 2188 &crtc->enabled_power_domains, 2189 domains); 2190 } 2191 2192 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 2193 { 2194 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2195 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2196 2197 if (intel_crtc_has_dp_encoder(crtc_state)) { 2198 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 2199 &crtc_state->dp_m_n); 2200 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 2201 &crtc_state->dp_m2_n2); 2202 } 2203 2204 intel_set_transcoder_timings(crtc_state); 2205 2206 i9xx_set_pipeconf(crtc_state); 2207 } 2208 2209 static void valleyview_crtc_enable(struct intel_atomic_state *state, 2210 struct intel_crtc *crtc) 2211 { 2212 const struct intel_crtc_state *new_crtc_state = 2213 intel_atomic_get_new_crtc_state(state, crtc); 2214 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2215 enum pipe pipe = crtc->pipe; 2216 2217 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2218 return; 2219 2220 i9xx_configure_cpu_transcoder(new_crtc_state); 2221 2222 intel_set_pipe_src_size(new_crtc_state); 2223 2224 intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); 2225 2226 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 2227 intel_de_write(dev_priv, CHV_BLEND(dev_priv, pipe), 2228 CHV_BLEND_LEGACY); 2229 intel_de_write(dev_priv, CHV_CANVAS(dev_priv, pipe), 0); 2230 } 2231 2232 crtc->active = true; 2233 2234 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2235 2236 intel_encoders_pre_pll_enable(state, crtc); 2237 2238 if (IS_CHERRYVIEW(dev_priv)) 2239 chv_enable_pll(new_crtc_state); 2240 else 2241 vlv_enable_pll(new_crtc_state); 2242 2243 intel_encoders_pre_enable(state, crtc); 2244 2245 i9xx_pfit_enable(new_crtc_state); 2246 2247 intel_color_modeset(new_crtc_state); 2248 2249 intel_initial_watermarks(state, crtc); 2250 intel_enable_transcoder(new_crtc_state); 2251 2252 intel_crtc_vblank_on(new_crtc_state); 2253 2254 intel_encoders_enable(state, crtc); 2255 } 2256 2257 static void i9xx_crtc_enable(struct intel_atomic_state *state, 2258 struct intel_crtc *crtc) 2259 { 2260 const struct intel_crtc_state *new_crtc_state = 2261 intel_atomic_get_new_crtc_state(state, crtc); 2262 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2263 enum pipe pipe = crtc->pipe; 2264 2265 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2266 return; 2267 2268 i9xx_configure_cpu_transcoder(new_crtc_state); 2269 2270 intel_set_pipe_src_size(new_crtc_state); 2271 2272 crtc->active = true; 2273 2274 if (DISPLAY_VER(dev_priv) != 2) 2275 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2276 2277 intel_encoders_pre_enable(state, crtc); 2278 2279 i9xx_enable_pll(new_crtc_state); 2280 2281 i9xx_pfit_enable(new_crtc_state); 2282 2283 intel_color_modeset(new_crtc_state); 2284 2285 if (!intel_initial_watermarks(state, crtc)) 2286 intel_update_watermarks(dev_priv); 2287 intel_enable_transcoder(new_crtc_state); 2288 2289 intel_crtc_vblank_on(new_crtc_state); 2290 2291 intel_encoders_enable(state, crtc); 2292 2293 /* prevents spurious underruns */ 2294 if (DISPLAY_VER(dev_priv) == 2) 2295 intel_crtc_wait_for_next_vblank(crtc); 2296 } 2297 2298 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 2299 { 2300 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 2301 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2302 2303 if (!old_crtc_state->gmch_pfit.control) 2304 return; 2305 2306 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); 2307 2308 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 2309 intel_de_read(dev_priv, PFIT_CONTROL(dev_priv))); 2310 intel_de_write(dev_priv, PFIT_CONTROL(dev_priv), 0); 2311 } 2312 2313 static void i9xx_crtc_disable(struct intel_atomic_state *state, 2314 struct intel_crtc *crtc) 2315 { 2316 struct intel_display *display = to_intel_display(state); 2317 struct drm_i915_private *dev_priv = to_i915(display->drm); 2318 struct intel_crtc_state *old_crtc_state = 2319 intel_atomic_get_old_crtc_state(state, crtc); 2320 enum pipe pipe = crtc->pipe; 2321 2322 /* 2323 * On gen2 planes are double buffered but the pipe isn't, so we must 2324 * wait for planes to fully turn off before disabling the pipe. 2325 */ 2326 if (DISPLAY_VER(dev_priv) == 2) 2327 intel_crtc_wait_for_next_vblank(crtc); 2328 2329 intel_encoders_disable(state, crtc); 2330 2331 intel_crtc_vblank_off(old_crtc_state); 2332 2333 intel_disable_transcoder(old_crtc_state); 2334 2335 i9xx_pfit_disable(old_crtc_state); 2336 2337 intel_encoders_post_disable(state, crtc); 2338 2339 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2340 if (IS_CHERRYVIEW(dev_priv)) 2341 chv_disable_pll(dev_priv, pipe); 2342 else if (IS_VALLEYVIEW(dev_priv)) 2343 vlv_disable_pll(dev_priv, pipe); 2344 else 2345 i9xx_disable_pll(old_crtc_state); 2346 } 2347 2348 intel_encoders_post_pll_disable(state, crtc); 2349 2350 if (DISPLAY_VER(dev_priv) != 2) 2351 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 2352 2353 if (!dev_priv->display.funcs.wm->initial_watermarks) 2354 intel_update_watermarks(dev_priv); 2355 2356 /* clock the pipe down to 640x480@60 to potentially save power */ 2357 if (IS_I830(dev_priv)) 2358 i830_enable_pipe(display, pipe); 2359 } 2360 2361 void intel_encoder_destroy(struct drm_encoder *encoder) 2362 { 2363 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2364 2365 drm_encoder_cleanup(encoder); 2366 kfree(intel_encoder); 2367 } 2368 2369 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2370 { 2371 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2372 2373 /* GDG double wide on either pipe, otherwise pipe A only */ 2374 return HAS_DOUBLE_WIDE(dev_priv) && 2375 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 2376 } 2377 2378 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2379 { 2380 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 2381 struct drm_rect src; 2382 2383 /* 2384 * We only use IF-ID interlacing. If we ever use 2385 * PF-ID we'll need to adjust the pixel_rate here. 2386 */ 2387 2388 if (!crtc_state->pch_pfit.enabled) 2389 return pixel_rate; 2390 2391 drm_rect_init(&src, 0, 0, 2392 drm_rect_width(&crtc_state->pipe_src) << 16, 2393 drm_rect_height(&crtc_state->pipe_src) << 16); 2394 2395 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 2396 pixel_rate); 2397 } 2398 2399 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 2400 const struct drm_display_mode *timings) 2401 { 2402 mode->hdisplay = timings->crtc_hdisplay; 2403 mode->htotal = timings->crtc_htotal; 2404 mode->hsync_start = timings->crtc_hsync_start; 2405 mode->hsync_end = timings->crtc_hsync_end; 2406 2407 mode->vdisplay = timings->crtc_vdisplay; 2408 mode->vtotal = timings->crtc_vtotal; 2409 mode->vsync_start = timings->crtc_vsync_start; 2410 mode->vsync_end = timings->crtc_vsync_end; 2411 2412 mode->flags = timings->flags; 2413 mode->type = DRM_MODE_TYPE_DRIVER; 2414 2415 mode->clock = timings->crtc_clock; 2416 2417 drm_mode_set_name(mode); 2418 } 2419 2420 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2421 { 2422 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2423 2424 if (HAS_GMCH(dev_priv)) 2425 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2426 crtc_state->pixel_rate = 2427 crtc_state->hw.pipe_mode.crtc_clock; 2428 else 2429 crtc_state->pixel_rate = 2430 ilk_pipe_pixel_rate(crtc_state); 2431 } 2432 2433 static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state, 2434 struct drm_display_mode *mode) 2435 { 2436 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2437 2438 if (num_pipes == 1) 2439 return; 2440 2441 mode->crtc_clock /= num_pipes; 2442 mode->crtc_hdisplay /= num_pipes; 2443 mode->crtc_hblank_start /= num_pipes; 2444 mode->crtc_hblank_end /= num_pipes; 2445 mode->crtc_hsync_start /= num_pipes; 2446 mode->crtc_hsync_end /= num_pipes; 2447 mode->crtc_htotal /= num_pipes; 2448 } 2449 2450 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, 2451 struct drm_display_mode *mode) 2452 { 2453 int overlap = crtc_state->splitter.pixel_overlap; 2454 int n = crtc_state->splitter.link_count; 2455 2456 if (!crtc_state->splitter.enable) 2457 return; 2458 2459 /* 2460 * eDP MSO uses segment timings from EDID for transcoder 2461 * timings, but full mode for everything else. 2462 * 2463 * h_full = (h_segment - pixel_overlap) * link_count 2464 */ 2465 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; 2466 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; 2467 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; 2468 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; 2469 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; 2470 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; 2471 mode->crtc_clock *= n; 2472 } 2473 2474 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 2475 { 2476 struct drm_display_mode *mode = &crtc_state->hw.mode; 2477 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2478 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2479 2480 /* 2481 * Start with the adjusted_mode crtc timings, which 2482 * have been filled with the transcoder timings. 2483 */ 2484 drm_mode_copy(pipe_mode, adjusted_mode); 2485 2486 /* Expand MSO per-segment transcoder timings to full */ 2487 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2488 2489 /* 2490 * We want the full numbers in adjusted_mode normal timings, 2491 * adjusted_mode crtc timings are left with the raw transcoder 2492 * timings. 2493 */ 2494 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 2495 2496 /* Populate the "user" mode with full numbers */ 2497 drm_mode_copy(mode, pipe_mode); 2498 intel_mode_from_crtc_timings(mode, mode); 2499 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * 2500 intel_crtc_num_joined_pipes(crtc_state); 2501 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); 2502 2503 /* Derive per-pipe timings in case joiner is used */ 2504 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2505 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2506 2507 intel_crtc_compute_pixel_rate(crtc_state); 2508 } 2509 2510 void intel_encoder_get_config(struct intel_encoder *encoder, 2511 struct intel_crtc_state *crtc_state) 2512 { 2513 encoder->get_config(encoder, crtc_state); 2514 2515 intel_crtc_readout_derived_state(crtc_state); 2516 } 2517 2518 static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state) 2519 { 2520 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2521 int width, height; 2522 2523 if (num_pipes == 1) 2524 return; 2525 2526 width = drm_rect_width(&crtc_state->pipe_src); 2527 height = drm_rect_height(&crtc_state->pipe_src); 2528 2529 drm_rect_init(&crtc_state->pipe_src, 0, 0, 2530 width / num_pipes, height); 2531 } 2532 2533 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) 2534 { 2535 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2536 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2537 2538 intel_joiner_compute_pipe_src(crtc_state); 2539 2540 /* 2541 * Pipe horizontal size must be even in: 2542 * - DVO ganged mode 2543 * - LVDS dual channel mode 2544 * - Double wide pipe 2545 */ 2546 if (drm_rect_width(&crtc_state->pipe_src) & 1) { 2547 if (crtc_state->double_wide) { 2548 drm_dbg_kms(&i915->drm, 2549 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", 2550 crtc->base.base.id, crtc->base.name); 2551 return -EINVAL; 2552 } 2553 2554 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 2555 intel_is_dual_link_lvds(i915)) { 2556 drm_dbg_kms(&i915->drm, 2557 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", 2558 crtc->base.base.id, crtc->base.name); 2559 return -EINVAL; 2560 } 2561 } 2562 2563 return 0; 2564 } 2565 2566 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) 2567 { 2568 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2569 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2570 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2571 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2572 int clock_limit = i915->display.cdclk.max_dotclk_freq; 2573 2574 /* 2575 * Start with the adjusted_mode crtc timings, which 2576 * have been filled with the transcoder timings. 2577 */ 2578 drm_mode_copy(pipe_mode, adjusted_mode); 2579 2580 /* Expand MSO per-segment transcoder timings to full */ 2581 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2582 2583 /* Derive per-pipe timings in case joiner is used */ 2584 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2585 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2586 2587 if (DISPLAY_VER(i915) < 4) { 2588 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; 2589 2590 /* 2591 * Enable double wide mode when the dot clock 2592 * is > 90% of the (display) core speed. 2593 */ 2594 if (intel_crtc_supports_double_wide(crtc) && 2595 pipe_mode->crtc_clock > clock_limit) { 2596 clock_limit = i915->display.cdclk.max_dotclk_freq; 2597 crtc_state->double_wide = true; 2598 } 2599 } 2600 2601 if (pipe_mode->crtc_clock > clock_limit) { 2602 drm_dbg_kms(&i915->drm, 2603 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 2604 crtc->base.base.id, crtc->base.name, 2605 pipe_mode->crtc_clock, clock_limit, 2606 str_yes_no(crtc_state->double_wide)); 2607 return -EINVAL; 2608 } 2609 2610 return 0; 2611 } 2612 2613 static bool intel_crtc_needs_wa_14015401596(const struct intel_crtc_state *crtc_state) 2614 { 2615 struct intel_display *display = to_intel_display(crtc_state); 2616 2617 return intel_vrr_possible(crtc_state) && crtc_state->has_psr && 2618 IS_DISPLAY_VER(display, 13, 14); 2619 } 2620 2621 static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state) 2622 { 2623 struct intel_display *display = to_intel_display(crtc_state); 2624 int vblank_delay = 0; 2625 2626 if (!HAS_DSB(display)) 2627 return 0; 2628 2629 /* Wa_14015401596 */ 2630 if (intel_crtc_needs_wa_14015401596(crtc_state)) 2631 vblank_delay = max(vblank_delay, 1); 2632 2633 /* 2634 * Add a minimal vblank delay to make sure the push 2635 * doesn't race with the "wait for safe window" used 2636 * for frame completion with DSB. 2637 */ 2638 if (intel_vrr_possible(crtc_state)) 2639 vblank_delay = max(vblank_delay, 1); 2640 2641 return vblank_delay; 2642 } 2643 2644 static int intel_crtc_compute_vblank_delay(struct intel_atomic_state *state, 2645 struct intel_crtc *crtc) 2646 { 2647 struct intel_display *display = to_intel_display(state); 2648 struct intel_crtc_state *crtc_state = 2649 intel_atomic_get_new_crtc_state(state, crtc); 2650 struct drm_display_mode *adjusted_mode = 2651 &crtc_state->hw.adjusted_mode; 2652 int vblank_delay, max_vblank_delay; 2653 2654 vblank_delay = intel_crtc_vblank_delay(crtc_state); 2655 max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1; 2656 2657 if (vblank_delay > max_vblank_delay) { 2658 drm_dbg_kms(display->drm, "[CRTC:%d:%s] vblank delay (%d) exceeds max (%d)\n", 2659 crtc->base.base.id, crtc->base.name, vblank_delay, max_vblank_delay); 2660 return -EINVAL; 2661 } 2662 2663 adjusted_mode->crtc_vblank_start += vblank_delay; 2664 2665 return 0; 2666 } 2667 2668 static int intel_crtc_compute_config(struct intel_atomic_state *state, 2669 struct intel_crtc *crtc) 2670 { 2671 struct intel_crtc_state *crtc_state = 2672 intel_atomic_get_new_crtc_state(state, crtc); 2673 int ret; 2674 2675 ret = intel_crtc_compute_vblank_delay(state, crtc); 2676 if (ret) 2677 return ret; 2678 2679 ret = intel_dpll_crtc_compute_clock(state, crtc); 2680 if (ret) 2681 return ret; 2682 2683 ret = intel_crtc_compute_pipe_src(crtc_state); 2684 if (ret) 2685 return ret; 2686 2687 ret = intel_crtc_compute_pipe_mode(crtc_state); 2688 if (ret) 2689 return ret; 2690 2691 intel_crtc_compute_pixel_rate(crtc_state); 2692 2693 if (crtc_state->has_pch_encoder) 2694 return ilk_fdi_compute_config(crtc, crtc_state); 2695 2696 return 0; 2697 } 2698 2699 static void 2700 intel_reduce_m_n_ratio(u32 *num, u32 *den) 2701 { 2702 while (*num > DATA_LINK_M_N_MASK || 2703 *den > DATA_LINK_M_N_MASK) { 2704 *num >>= 1; 2705 *den >>= 1; 2706 } 2707 } 2708 2709 static void compute_m_n(u32 *ret_m, u32 *ret_n, 2710 u32 m, u32 n, u32 constant_n) 2711 { 2712 if (constant_n) 2713 *ret_n = constant_n; 2714 else 2715 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2716 2717 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2718 intel_reduce_m_n_ratio(ret_m, ret_n); 2719 } 2720 2721 void 2722 intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, 2723 int pixel_clock, int link_clock, 2724 int bw_overhead, 2725 struct intel_link_m_n *m_n) 2726 { 2727 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); 2728 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, 2729 bw_overhead); 2730 u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); 2731 2732 /* 2733 * Windows/BIOS uses fixed M/N values always. Follow suit. 2734 * 2735 * Also several DP dongles in particular seem to be fussy 2736 * about too large link M/N values. Presumably the 20bit 2737 * value used by Windows/BIOS is acceptable to everyone. 2738 */ 2739 m_n->tu = 64; 2740 compute_m_n(&m_n->data_m, &m_n->data_n, 2741 data_m, data_n, 2742 0x8000000); 2743 2744 compute_m_n(&m_n->link_m, &m_n->link_n, 2745 pixel_clock, link_symbol_clock, 2746 0x80000); 2747 } 2748 2749 void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 2750 { 2751 /* 2752 * There may be no VBT; and if the BIOS enabled SSC we can 2753 * just keep using it to avoid unnecessary flicker. Whereas if the 2754 * BIOS isn't using it, don't assume it will work even if the VBT 2755 * indicates as much. 2756 */ 2757 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 2758 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 2759 PCH_DREF_CONTROL) & 2760 DREF_SSC1_ENABLE; 2761 2762 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2763 drm_dbg_kms(&dev_priv->drm, 2764 "SSC %s by BIOS, overriding VBT which says %s\n", 2765 str_enabled_disabled(bios_lvds_use_ssc), 2766 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); 2767 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; 2768 } 2769 } 2770 } 2771 2772 void intel_zero_m_n(struct intel_link_m_n *m_n) 2773 { 2774 /* corresponds to 0 register value */ 2775 memset(m_n, 0, sizeof(*m_n)); 2776 m_n->tu = 1; 2777 } 2778 2779 void intel_set_m_n(struct drm_i915_private *i915, 2780 const struct intel_link_m_n *m_n, 2781 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 2782 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 2783 { 2784 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 2785 intel_de_write(i915, data_n_reg, m_n->data_n); 2786 intel_de_write(i915, link_m_reg, m_n->link_m); 2787 /* 2788 * On BDW+ writing LINK_N arms the double buffered update 2789 * of all the M/N registers, so it must be written last. 2790 */ 2791 intel_de_write(i915, link_n_reg, m_n->link_n); 2792 } 2793 2794 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 2795 enum transcoder transcoder) 2796 { 2797 if (IS_HASWELL(dev_priv)) 2798 return transcoder == TRANSCODER_EDP; 2799 2800 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); 2801 } 2802 2803 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 2804 enum transcoder transcoder, 2805 const struct intel_link_m_n *m_n) 2806 { 2807 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2808 enum pipe pipe = crtc->pipe; 2809 2810 if (DISPLAY_VER(dev_priv) >= 5) 2811 intel_set_m_n(dev_priv, m_n, 2812 PIPE_DATA_M1(dev_priv, transcoder), 2813 PIPE_DATA_N1(dev_priv, transcoder), 2814 PIPE_LINK_M1(dev_priv, transcoder), 2815 PIPE_LINK_N1(dev_priv, transcoder)); 2816 else 2817 intel_set_m_n(dev_priv, m_n, 2818 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 2819 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 2820 } 2821 2822 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 2823 enum transcoder transcoder, 2824 const struct intel_link_m_n *m_n) 2825 { 2826 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2827 2828 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 2829 return; 2830 2831 intel_set_m_n(dev_priv, m_n, 2832 PIPE_DATA_M2(dev_priv, transcoder), 2833 PIPE_DATA_N2(dev_priv, transcoder), 2834 PIPE_LINK_M2(dev_priv, transcoder), 2835 PIPE_LINK_N2(dev_priv, transcoder)); 2836 } 2837 2838 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 2839 { 2840 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2841 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2842 enum pipe pipe = crtc->pipe; 2843 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2844 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2845 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2846 int vsyncshift = 0; 2847 2848 drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)); 2849 2850 /* We need to be careful not to changed the adjusted mode, for otherwise 2851 * the hw state checker will get angry at the mismatch. */ 2852 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2853 crtc_vtotal = adjusted_mode->crtc_vtotal; 2854 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2855 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2856 2857 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2858 /* the chip adds 2 halflines automatically */ 2859 crtc_vtotal -= 1; 2860 crtc_vblank_end -= 1; 2861 2862 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2863 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 2864 else 2865 vsyncshift = adjusted_mode->crtc_hsync_start - 2866 adjusted_mode->crtc_htotal / 2; 2867 if (vsyncshift < 0) 2868 vsyncshift += adjusted_mode->crtc_htotal; 2869 } 2870 2871 /* 2872 * VBLANK_START no longer works on ADL+, instead we must use 2873 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. 2874 */ 2875 if (DISPLAY_VER(dev_priv) >= 13) { 2876 intel_de_write(dev_priv, 2877 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder), 2878 crtc_vblank_start - crtc_vdisplay); 2879 2880 /* 2881 * VBLANK_START not used by hw, just clear it 2882 * to make it stand out in register dumps. 2883 */ 2884 crtc_vblank_start = 1; 2885 } 2886 2887 if (DISPLAY_VER(dev_priv) >= 4) 2888 intel_de_write(dev_priv, 2889 TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder), 2890 vsyncshift); 2891 2892 intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder), 2893 HACTIVE(adjusted_mode->crtc_hdisplay - 1) | 2894 HTOTAL(adjusted_mode->crtc_htotal - 1)); 2895 intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder), 2896 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | 2897 HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); 2898 intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder), 2899 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | 2900 HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); 2901 2902 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder), 2903 VACTIVE(crtc_vdisplay - 1) | 2904 VTOTAL(crtc_vtotal - 1)); 2905 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder), 2906 VBLANK_START(crtc_vblank_start - 1) | 2907 VBLANK_END(crtc_vblank_end - 1)); 2908 intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder), 2909 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | 2910 VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); 2911 2912 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2913 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 2914 * documented on the DDI_FUNC_CTL register description, EDP Input Select 2915 * bits. */ 2916 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 2917 (pipe == PIPE_B || pipe == PIPE_C)) 2918 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, pipe), 2919 VACTIVE(crtc_vdisplay - 1) | 2920 VTOTAL(crtc_vtotal - 1)); 2921 } 2922 2923 static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state) 2924 { 2925 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2926 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2927 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2928 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2929 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2930 2931 drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)); 2932 2933 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2934 crtc_vtotal = adjusted_mode->crtc_vtotal; 2935 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2936 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2937 2938 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2939 /* the chip adds 2 halflines automatically */ 2940 crtc_vtotal -= 1; 2941 crtc_vblank_end -= 1; 2942 } 2943 2944 if (DISPLAY_VER(dev_priv) >= 13) { 2945 intel_de_write(dev_priv, 2946 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder), 2947 crtc_vblank_start - crtc_vdisplay); 2948 2949 /* 2950 * VBLANK_START not used by hw, just clear it 2951 * to make it stand out in register dumps. 2952 */ 2953 crtc_vblank_start = 1; 2954 } 2955 2956 /* 2957 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode. 2958 * But let's write it anyway to keep the state checker happy. 2959 */ 2960 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder), 2961 VBLANK_START(crtc_vblank_start - 1) | 2962 VBLANK_END(crtc_vblank_end - 1)); 2963 /* 2964 * The double buffer latch point for TRANS_VTOTAL 2965 * is the transcoder's undelayed vblank. 2966 */ 2967 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder), 2968 VACTIVE(crtc_vdisplay - 1) | 2969 VTOTAL(crtc_vtotal - 1)); 2970 } 2971 2972 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 2973 { 2974 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2975 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2976 int width = drm_rect_width(&crtc_state->pipe_src); 2977 int height = drm_rect_height(&crtc_state->pipe_src); 2978 enum pipe pipe = crtc->pipe; 2979 2980 /* pipesrc controls the size that is scaled from, which should 2981 * always be the user's requested size. 2982 */ 2983 intel_de_write(dev_priv, PIPESRC(dev_priv, pipe), 2984 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2985 } 2986 2987 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 2988 { 2989 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2990 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2991 2992 if (DISPLAY_VER(dev_priv) == 2) 2993 return false; 2994 2995 if (DISPLAY_VER(dev_priv) >= 9 || 2996 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2997 return intel_de_read(dev_priv, 2998 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; 2999 else 3000 return intel_de_read(dev_priv, 3001 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; 3002 } 3003 3004 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 3005 struct intel_crtc_state *pipe_config) 3006 { 3007 struct drm_device *dev = crtc->base.dev; 3008 struct drm_i915_private *dev_priv = to_i915(dev); 3009 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 3010 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 3011 u32 tmp; 3012 3013 tmp = intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder)); 3014 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; 3015 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; 3016 3017 if (!transcoder_is_dsi(cpu_transcoder)) { 3018 tmp = intel_de_read(dev_priv, 3019 TRANS_HBLANK(dev_priv, cpu_transcoder)); 3020 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; 3021 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; 3022 } 3023 3024 tmp = intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder)); 3025 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; 3026 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; 3027 3028 tmp = intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder)); 3029 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; 3030 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; 3031 3032 /* FIXME TGL+ DSI transcoders have this! */ 3033 if (!transcoder_is_dsi(cpu_transcoder)) { 3034 tmp = intel_de_read(dev_priv, 3035 TRANS_VBLANK(dev_priv, cpu_transcoder)); 3036 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; 3037 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; 3038 } 3039 tmp = intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder)); 3040 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; 3041 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; 3042 3043 if (intel_pipe_is_interlaced(pipe_config)) { 3044 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; 3045 adjusted_mode->crtc_vtotal += 1; 3046 adjusted_mode->crtc_vblank_end += 1; 3047 } 3048 3049 if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) 3050 adjusted_mode->crtc_vblank_start = 3051 adjusted_mode->crtc_vdisplay + 3052 intel_de_read(dev_priv, 3053 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder)); 3054 } 3055 3056 static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) 3057 { 3058 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3059 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 3060 enum pipe primary_pipe, pipe = crtc->pipe; 3061 int width; 3062 3063 if (num_pipes == 1) 3064 return; 3065 3066 primary_pipe = joiner_primary_pipe(crtc_state); 3067 width = drm_rect_width(&crtc_state->pipe_src); 3068 3069 drm_rect_translate_to(&crtc_state->pipe_src, 3070 (pipe - primary_pipe) * width, 0); 3071 } 3072 3073 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 3074 struct intel_crtc_state *pipe_config) 3075 { 3076 struct drm_device *dev = crtc->base.dev; 3077 struct drm_i915_private *dev_priv = to_i915(dev); 3078 u32 tmp; 3079 3080 tmp = intel_de_read(dev_priv, PIPESRC(dev_priv, crtc->pipe)); 3081 3082 drm_rect_init(&pipe_config->pipe_src, 0, 0, 3083 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, 3084 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); 3085 3086 intel_joiner_adjust_pipe_src(pipe_config); 3087 } 3088 3089 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 3090 { 3091 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3092 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3093 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3094 u32 val = 0; 3095 3096 /* 3097 * - We keep both pipes enabled on 830 3098 * - During modeset the pipe is still disabled and must remain so 3099 * - During fastset the pipe is already enabled and must remain so 3100 */ 3101 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) 3102 val |= TRANSCONF_ENABLE; 3103 3104 if (crtc_state->double_wide) 3105 val |= TRANSCONF_DOUBLE_WIDE; 3106 3107 /* only g4x and later have fancy bpc/dither controls */ 3108 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3109 IS_CHERRYVIEW(dev_priv)) { 3110 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 3111 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 3112 val |= TRANSCONF_DITHER_EN | 3113 TRANSCONF_DITHER_TYPE_SP; 3114 3115 switch (crtc_state->pipe_bpp) { 3116 default: 3117 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3118 MISSING_CASE(crtc_state->pipe_bpp); 3119 fallthrough; 3120 case 18: 3121 val |= TRANSCONF_BPC_6; 3122 break; 3123 case 24: 3124 val |= TRANSCONF_BPC_8; 3125 break; 3126 case 30: 3127 val |= TRANSCONF_BPC_10; 3128 break; 3129 } 3130 } 3131 3132 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 3133 if (DISPLAY_VER(dev_priv) < 4 || 3134 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3135 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; 3136 else 3137 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; 3138 } else { 3139 val |= TRANSCONF_INTERLACE_PROGRESSIVE; 3140 } 3141 3142 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3143 crtc_state->limited_color_range) 3144 val |= TRANSCONF_COLOR_RANGE_SELECT; 3145 3146 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3147 3148 if (crtc_state->wgc_enable) 3149 val |= TRANSCONF_WGC_ENABLE; 3150 3151 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3152 3153 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3154 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3155 } 3156 3157 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 3158 { 3159 if (IS_I830(dev_priv)) 3160 return false; 3161 3162 return DISPLAY_VER(dev_priv) >= 4 || 3163 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 3164 } 3165 3166 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 3167 { 3168 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3169 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3170 enum pipe pipe; 3171 u32 tmp; 3172 3173 if (!i9xx_has_pfit(dev_priv)) 3174 return; 3175 3176 tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)); 3177 if (!(tmp & PFIT_ENABLE)) 3178 return; 3179 3180 /* Check whether the pfit is attached to our pipe. */ 3181 if (DISPLAY_VER(dev_priv) >= 4) 3182 pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp); 3183 else 3184 pipe = PIPE_B; 3185 3186 if (pipe != crtc->pipe) 3187 return; 3188 3189 crtc_state->gmch_pfit.control = tmp; 3190 crtc_state->gmch_pfit.pgm_ratios = 3191 intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv)); 3192 } 3193 3194 static enum intel_output_format 3195 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) 3196 { 3197 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3198 u32 tmp; 3199 3200 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3201 3202 if (tmp & PIPE_MISC_YUV420_ENABLE) { 3203 /* 3204 * We support 4:2:0 in full blend mode only. 3205 * For xe3_lpd+ this is implied in YUV420 Enable bit. 3206 * Ensure the same for prior platforms in YUV420 Mode bit. 3207 */ 3208 if (DISPLAY_VER(dev_priv) < 30) 3209 drm_WARN_ON(&dev_priv->drm, 3210 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); 3211 3212 return INTEL_OUTPUT_FORMAT_YCBCR420; 3213 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { 3214 return INTEL_OUTPUT_FORMAT_YCBCR444; 3215 } else { 3216 return INTEL_OUTPUT_FORMAT_RGB; 3217 } 3218 } 3219 3220 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 3221 struct intel_crtc_state *pipe_config) 3222 { 3223 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3224 enum intel_display_power_domain power_domain; 3225 intel_wakeref_t wakeref; 3226 u32 tmp; 3227 bool ret; 3228 3229 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3230 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3231 if (!wakeref) 3232 return false; 3233 3234 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3235 pipe_config->sink_format = pipe_config->output_format; 3236 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3237 pipe_config->shared_dpll = NULL; 3238 3239 ret = false; 3240 3241 tmp = intel_de_read(dev_priv, 3242 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 3243 if (!(tmp & TRANSCONF_ENABLE)) 3244 goto out; 3245 3246 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3247 IS_CHERRYVIEW(dev_priv)) { 3248 switch (tmp & TRANSCONF_BPC_MASK) { 3249 case TRANSCONF_BPC_6: 3250 pipe_config->pipe_bpp = 18; 3251 break; 3252 case TRANSCONF_BPC_8: 3253 pipe_config->pipe_bpp = 24; 3254 break; 3255 case TRANSCONF_BPC_10: 3256 pipe_config->pipe_bpp = 30; 3257 break; 3258 default: 3259 MISSING_CASE(tmp); 3260 break; 3261 } 3262 } 3263 3264 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3265 (tmp & TRANSCONF_COLOR_RANGE_SELECT)) 3266 pipe_config->limited_color_range = true; 3267 3268 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); 3269 3270 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3271 3272 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3273 (tmp & TRANSCONF_WGC_ENABLE)) 3274 pipe_config->wgc_enable = true; 3275 3276 intel_color_get_config(pipe_config); 3277 3278 if (HAS_DOUBLE_WIDE(dev_priv)) 3279 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; 3280 3281 intel_get_transcoder_timings(crtc, pipe_config); 3282 intel_get_pipe_src_size(crtc, pipe_config); 3283 3284 i9xx_get_pfit_config(pipe_config); 3285 3286 i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state); 3287 3288 if (DISPLAY_VER(dev_priv) >= 4) { 3289 tmp = pipe_config->dpll_hw_state.i9xx.dpll_md; 3290 pipe_config->pixel_multiplier = 3291 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 3292 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 3293 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 3294 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 3295 tmp = pipe_config->dpll_hw_state.i9xx.dpll; 3296 pipe_config->pixel_multiplier = 3297 ((tmp & SDVO_MULTIPLIER_MASK) 3298 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 3299 } else { 3300 /* Note that on i915G/GM the pixel multiplier is in the sdvo 3301 * port and will be fixed up in the encoder->get_config 3302 * function. */ 3303 pipe_config->pixel_multiplier = 1; 3304 } 3305 3306 if (IS_CHERRYVIEW(dev_priv)) 3307 chv_crtc_clock_get(pipe_config); 3308 else if (IS_VALLEYVIEW(dev_priv)) 3309 vlv_crtc_clock_get(pipe_config); 3310 else 3311 i9xx_crtc_clock_get(pipe_config); 3312 3313 /* 3314 * Normally the dotclock is filled in by the encoder .get_config() 3315 * but in case the pipe is enabled w/o any ports we need a sane 3316 * default. 3317 */ 3318 pipe_config->hw.adjusted_mode.crtc_clock = 3319 pipe_config->port_clock / pipe_config->pixel_multiplier; 3320 3321 ret = true; 3322 3323 out: 3324 intel_display_power_put(dev_priv, power_domain, wakeref); 3325 3326 return ret; 3327 } 3328 3329 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3330 { 3331 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3332 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3333 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3334 u32 val = 0; 3335 3336 /* 3337 * - During modeset the pipe is still disabled and must remain so 3338 * - During fastset the pipe is already enabled and must remain so 3339 */ 3340 if (!intel_crtc_needs_modeset(crtc_state)) 3341 val |= TRANSCONF_ENABLE; 3342 3343 switch (crtc_state->pipe_bpp) { 3344 default: 3345 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3346 MISSING_CASE(crtc_state->pipe_bpp); 3347 fallthrough; 3348 case 18: 3349 val |= TRANSCONF_BPC_6; 3350 break; 3351 case 24: 3352 val |= TRANSCONF_BPC_8; 3353 break; 3354 case 30: 3355 val |= TRANSCONF_BPC_10; 3356 break; 3357 case 36: 3358 val |= TRANSCONF_BPC_12; 3359 break; 3360 } 3361 3362 if (crtc_state->dither) 3363 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3364 3365 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3366 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3367 else 3368 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3369 3370 /* 3371 * This would end up with an odd purple hue over 3372 * the entire display. Make sure we don't do it. 3373 */ 3374 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 3375 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3376 3377 if (crtc_state->limited_color_range && 3378 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3379 val |= TRANSCONF_COLOR_RANGE_SELECT; 3380 3381 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3382 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; 3383 3384 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3385 3386 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3387 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3388 3389 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3390 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3391 } 3392 3393 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3394 { 3395 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3396 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3397 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3398 u32 val = 0; 3399 3400 /* 3401 * - During modeset the pipe is still disabled and must remain so 3402 * - During fastset the pipe is already enabled and must remain so 3403 */ 3404 if (!intel_crtc_needs_modeset(crtc_state)) 3405 val |= TRANSCONF_ENABLE; 3406 3407 if (IS_HASWELL(dev_priv) && crtc_state->dither) 3408 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3409 3410 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3411 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3412 else 3413 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3414 3415 if (IS_HASWELL(dev_priv) && 3416 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3417 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; 3418 3419 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val); 3420 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); 3421 } 3422 3423 static void bdw_set_pipe_misc(struct intel_dsb *dsb, 3424 const struct intel_crtc_state *crtc_state) 3425 { 3426 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3427 struct intel_display *display = to_intel_display(crtc->base.dev); 3428 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3429 u32 val = 0; 3430 3431 switch (crtc_state->pipe_bpp) { 3432 case 18: 3433 val |= PIPE_MISC_BPC_6; 3434 break; 3435 case 24: 3436 val |= PIPE_MISC_BPC_8; 3437 break; 3438 case 30: 3439 val |= PIPE_MISC_BPC_10; 3440 break; 3441 case 36: 3442 /* Port output 12BPC defined for ADLP+ */ 3443 if (DISPLAY_VER(dev_priv) >= 13) 3444 val |= PIPE_MISC_BPC_12_ADLP; 3445 break; 3446 default: 3447 MISSING_CASE(crtc_state->pipe_bpp); 3448 break; 3449 } 3450 3451 if (crtc_state->dither) 3452 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; 3453 3454 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3455 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3456 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; 3457 3458 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3459 val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE : 3460 PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND; 3461 3462 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) 3463 val |= PIPE_MISC_HDR_MODE_PRECISION; 3464 3465 if (DISPLAY_VER(dev_priv) >= 12) 3466 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; 3467 3468 /* allow PSR with sprite enabled */ 3469 if (IS_BROADWELL(dev_priv)) 3470 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; 3471 3472 intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val); 3473 } 3474 3475 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) 3476 { 3477 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3478 u32 tmp; 3479 3480 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3481 3482 switch (tmp & PIPE_MISC_BPC_MASK) { 3483 case PIPE_MISC_BPC_6: 3484 return 18; 3485 case PIPE_MISC_BPC_8: 3486 return 24; 3487 case PIPE_MISC_BPC_10: 3488 return 30; 3489 /* 3490 * PORT OUTPUT 12 BPC defined for ADLP+. 3491 * 3492 * TODO: 3493 * For previous platforms with DSI interface, bits 5:7 3494 * are used for storing pipe_bpp irrespective of dithering. 3495 * Since the value of 12 BPC is not defined for these bits 3496 * on older platforms, need to find a workaround for 12 BPC 3497 * MIPI DSI HW readout. 3498 */ 3499 case PIPE_MISC_BPC_12_ADLP: 3500 if (DISPLAY_VER(dev_priv) >= 13) 3501 return 36; 3502 fallthrough; 3503 default: 3504 MISSING_CASE(tmp); 3505 return 0; 3506 } 3507 } 3508 3509 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3510 { 3511 /* 3512 * Account for spread spectrum to avoid 3513 * oversubscribing the link. Max center spread 3514 * is 2.5%; use 5% for safety's sake. 3515 */ 3516 u32 bps = target_clock * bpp * 21 / 20; 3517 return DIV_ROUND_UP(bps, link_bw * 8); 3518 } 3519 3520 void intel_get_m_n(struct drm_i915_private *i915, 3521 struct intel_link_m_n *m_n, 3522 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 3523 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3524 { 3525 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; 3526 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; 3527 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; 3528 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; 3529 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; 3530 } 3531 3532 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3533 enum transcoder transcoder, 3534 struct intel_link_m_n *m_n) 3535 { 3536 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3537 enum pipe pipe = crtc->pipe; 3538 3539 if (DISPLAY_VER(dev_priv) >= 5) 3540 intel_get_m_n(dev_priv, m_n, 3541 PIPE_DATA_M1(dev_priv, transcoder), 3542 PIPE_DATA_N1(dev_priv, transcoder), 3543 PIPE_LINK_M1(dev_priv, transcoder), 3544 PIPE_LINK_N1(dev_priv, transcoder)); 3545 else 3546 intel_get_m_n(dev_priv, m_n, 3547 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 3548 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3549 } 3550 3551 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 3552 enum transcoder transcoder, 3553 struct intel_link_m_n *m_n) 3554 { 3555 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3556 3557 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 3558 return; 3559 3560 intel_get_m_n(dev_priv, m_n, 3561 PIPE_DATA_M2(dev_priv, transcoder), 3562 PIPE_DATA_N2(dev_priv, transcoder), 3563 PIPE_LINK_M2(dev_priv, transcoder), 3564 PIPE_LINK_N2(dev_priv, transcoder)); 3565 } 3566 3567 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 3568 { 3569 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3570 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3571 u32 ctl, pos, size; 3572 enum pipe pipe; 3573 3574 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 3575 if ((ctl & PF_ENABLE) == 0) 3576 return; 3577 3578 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 3579 pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl); 3580 else 3581 pipe = crtc->pipe; 3582 3583 crtc_state->pch_pfit.enabled = true; 3584 3585 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 3586 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 3587 3588 drm_rect_init(&crtc_state->pch_pfit.dst, 3589 REG_FIELD_GET(PF_WIN_XPOS_MASK, pos), 3590 REG_FIELD_GET(PF_WIN_YPOS_MASK, pos), 3591 REG_FIELD_GET(PF_WIN_XSIZE_MASK, size), 3592 REG_FIELD_GET(PF_WIN_YSIZE_MASK, size)); 3593 3594 /* 3595 * We currently do not free assignements of panel fitters on 3596 * ivb/hsw (since we don't use the higher upscaling modes which 3597 * differentiates them) so just WARN about this case for now. 3598 */ 3599 drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe); 3600 } 3601 3602 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3603 struct intel_crtc_state *pipe_config) 3604 { 3605 struct drm_device *dev = crtc->base.dev; 3606 struct drm_i915_private *dev_priv = to_i915(dev); 3607 enum intel_display_power_domain power_domain; 3608 intel_wakeref_t wakeref; 3609 u32 tmp; 3610 bool ret; 3611 3612 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3613 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3614 if (!wakeref) 3615 return false; 3616 3617 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3618 pipe_config->shared_dpll = NULL; 3619 3620 ret = false; 3621 tmp = intel_de_read(dev_priv, 3622 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 3623 if (!(tmp & TRANSCONF_ENABLE)) 3624 goto out; 3625 3626 switch (tmp & TRANSCONF_BPC_MASK) { 3627 case TRANSCONF_BPC_6: 3628 pipe_config->pipe_bpp = 18; 3629 break; 3630 case TRANSCONF_BPC_8: 3631 pipe_config->pipe_bpp = 24; 3632 break; 3633 case TRANSCONF_BPC_10: 3634 pipe_config->pipe_bpp = 30; 3635 break; 3636 case TRANSCONF_BPC_12: 3637 pipe_config->pipe_bpp = 36; 3638 break; 3639 default: 3640 break; 3641 } 3642 3643 if (tmp & TRANSCONF_COLOR_RANGE_SELECT) 3644 pipe_config->limited_color_range = true; 3645 3646 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { 3647 case TRANSCONF_OUTPUT_COLORSPACE_YUV601: 3648 case TRANSCONF_OUTPUT_COLORSPACE_YUV709: 3649 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3650 break; 3651 default: 3652 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3653 break; 3654 } 3655 3656 pipe_config->sink_format = pipe_config->output_format; 3657 3658 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); 3659 3660 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3661 3662 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); 3663 3664 intel_color_get_config(pipe_config); 3665 3666 pipe_config->pixel_multiplier = 1; 3667 3668 ilk_pch_get_config(pipe_config); 3669 3670 intel_get_transcoder_timings(crtc, pipe_config); 3671 intel_get_pipe_src_size(crtc, pipe_config); 3672 3673 ilk_get_pfit_config(pipe_config); 3674 3675 ret = true; 3676 3677 out: 3678 intel_display_power_put(dev_priv, power_domain, wakeref); 3679 3680 return ret; 3681 } 3682 3683 static u8 joiner_pipes(struct drm_i915_private *i915) 3684 { 3685 u8 pipes; 3686 3687 if (DISPLAY_VER(i915) >= 12) 3688 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3689 else if (DISPLAY_VER(i915) >= 11) 3690 pipes = BIT(PIPE_B) | BIT(PIPE_C); 3691 else 3692 pipes = 0; 3693 3694 return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask; 3695 } 3696 3697 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, 3698 enum transcoder cpu_transcoder) 3699 { 3700 enum intel_display_power_domain power_domain; 3701 intel_wakeref_t wakeref; 3702 u32 tmp = 0; 3703 3704 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3705 3706 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3707 tmp = intel_de_read(dev_priv, 3708 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); 3709 3710 return tmp & TRANS_DDI_FUNC_ENABLE; 3711 } 3712 3713 static void enabled_uncompressed_joiner_pipes(struct intel_display *display, 3714 u8 *primary_pipes, u8 *secondary_pipes) 3715 { 3716 struct drm_i915_private *i915 = to_i915(display->drm); 3717 struct intel_crtc *crtc; 3718 3719 *primary_pipes = 0; 3720 *secondary_pipes = 0; 3721 3722 if (!HAS_UNCOMPRESSED_JOINER(display)) 3723 return; 3724 3725 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 3726 joiner_pipes(i915)) { 3727 enum intel_display_power_domain power_domain; 3728 enum pipe pipe = crtc->pipe; 3729 intel_wakeref_t wakeref; 3730 3731 power_domain = POWER_DOMAIN_PIPE(pipe); 3732 with_intel_display_power_if_enabled(i915, power_domain, wakeref) { 3733 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3734 3735 if (tmp & UNCOMPRESSED_JOINER_PRIMARY) 3736 *primary_pipes |= BIT(pipe); 3737 if (tmp & UNCOMPRESSED_JOINER_SECONDARY) 3738 *secondary_pipes |= BIT(pipe); 3739 } 3740 } 3741 } 3742 3743 static void enabled_bigjoiner_pipes(struct intel_display *display, 3744 u8 *primary_pipes, u8 *secondary_pipes) 3745 { 3746 struct drm_i915_private *i915 = to_i915(display->drm); 3747 struct intel_crtc *crtc; 3748 3749 *primary_pipes = 0; 3750 *secondary_pipes = 0; 3751 3752 if (!HAS_BIGJOINER(display)) 3753 return; 3754 3755 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 3756 joiner_pipes(i915)) { 3757 enum intel_display_power_domain power_domain; 3758 enum pipe pipe = crtc->pipe; 3759 intel_wakeref_t wakeref; 3760 3761 power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); 3762 with_intel_display_power_if_enabled(i915, power_domain, wakeref) { 3763 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3764 3765 if (!(tmp & BIG_JOINER_ENABLE)) 3766 continue; 3767 3768 if (tmp & PRIMARY_BIG_JOINER_ENABLE) 3769 *primary_pipes |= BIT(pipe); 3770 else 3771 *secondary_pipes |= BIT(pipe); 3772 } 3773 } 3774 } 3775 3776 static u8 expected_secondary_pipes(u8 primary_pipes, int num_pipes) 3777 { 3778 u8 secondary_pipes = 0; 3779 3780 for (int i = 1; i < num_pipes; i++) 3781 secondary_pipes |= primary_pipes << i; 3782 3783 return secondary_pipes; 3784 } 3785 3786 static u8 expected_uncompressed_joiner_secondary_pipes(u8 uncompjoiner_primary_pipes) 3787 { 3788 return expected_secondary_pipes(uncompjoiner_primary_pipes, 2); 3789 } 3790 3791 static u8 expected_bigjoiner_secondary_pipes(u8 bigjoiner_primary_pipes) 3792 { 3793 return expected_secondary_pipes(bigjoiner_primary_pipes, 2); 3794 } 3795 3796 static u8 get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes) 3797 { 3798 primary_pipes &= GENMASK(pipe, 0); 3799 3800 return primary_pipes ? BIT(fls(primary_pipes) - 1) : 0; 3801 } 3802 3803 static u8 expected_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes) 3804 { 3805 return expected_secondary_pipes(ultrajoiner_primary_pipes, 4); 3806 } 3807 3808 static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes, 3809 u8 ultrajoiner_secondary_pipes) 3810 { 3811 return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3; 3812 } 3813 3814 static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915, 3815 u8 *primary_pipes, u8 *secondary_pipes) 3816 { 3817 struct intel_display *display = &i915->display; 3818 struct intel_crtc *crtc; 3819 3820 *primary_pipes = 0; 3821 *secondary_pipes = 0; 3822 3823 if (!HAS_ULTRAJOINER(display)) 3824 return; 3825 3826 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 3827 joiner_pipes(i915)) { 3828 enum intel_display_power_domain power_domain; 3829 enum pipe pipe = crtc->pipe; 3830 intel_wakeref_t wakeref; 3831 3832 power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); 3833 with_intel_display_power_if_enabled(i915, power_domain, wakeref) { 3834 u32 tmp = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe)); 3835 3836 if (!(tmp & ULTRA_JOINER_ENABLE)) 3837 continue; 3838 3839 if (tmp & PRIMARY_ULTRA_JOINER_ENABLE) 3840 *primary_pipes |= BIT(pipe); 3841 else 3842 *secondary_pipes |= BIT(pipe); 3843 } 3844 } 3845 } 3846 3847 static void enabled_joiner_pipes(struct drm_i915_private *dev_priv, 3848 enum pipe pipe, 3849 u8 *primary_pipe, u8 *secondary_pipes) 3850 { 3851 struct intel_display *display = to_intel_display(&dev_priv->drm); 3852 u8 primary_ultrajoiner_pipes; 3853 u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes; 3854 u8 secondary_ultrajoiner_pipes; 3855 u8 secondary_uncompressed_joiner_pipes, secondary_bigjoiner_pipes; 3856 u8 ultrajoiner_pipes; 3857 u8 uncompressed_joiner_pipes, bigjoiner_pipes; 3858 3859 enabled_ultrajoiner_pipes(dev_priv, &primary_ultrajoiner_pipes, 3860 &secondary_ultrajoiner_pipes); 3861 /* 3862 * For some strange reason the last pipe in the set of four 3863 * shouldn't have ultrajoiner enable bit set in hardware. 3864 * Set the bit anyway to make life easier. 3865 */ 3866 drm_WARN_ON(&dev_priv->drm, 3867 expected_secondary_pipes(primary_ultrajoiner_pipes, 3) != 3868 secondary_ultrajoiner_pipes); 3869 secondary_ultrajoiner_pipes = 3870 fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes, 3871 secondary_ultrajoiner_pipes); 3872 3873 drm_WARN_ON(&dev_priv->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0); 3874 3875 enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes, 3876 &secondary_uncompressed_joiner_pipes); 3877 3878 drm_WARN_ON(display->drm, 3879 (primary_uncompressed_joiner_pipes & secondary_uncompressed_joiner_pipes) != 0); 3880 3881 enabled_bigjoiner_pipes(display, &primary_bigjoiner_pipes, 3882 &secondary_bigjoiner_pipes); 3883 3884 drm_WARN_ON(display->drm, 3885 (primary_bigjoiner_pipes & secondary_bigjoiner_pipes) != 0); 3886 3887 ultrajoiner_pipes = primary_ultrajoiner_pipes | secondary_ultrajoiner_pipes; 3888 uncompressed_joiner_pipes = primary_uncompressed_joiner_pipes | 3889 secondary_uncompressed_joiner_pipes; 3890 bigjoiner_pipes = primary_bigjoiner_pipes | secondary_bigjoiner_pipes; 3891 3892 drm_WARN(display->drm, (ultrajoiner_pipes & bigjoiner_pipes) != ultrajoiner_pipes, 3893 "Ultrajoiner pipes(%#x) should be bigjoiner pipes(%#x)\n", 3894 ultrajoiner_pipes, bigjoiner_pipes); 3895 3896 drm_WARN(display->drm, secondary_ultrajoiner_pipes != 3897 expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), 3898 "Wrong secondary ultrajoiner pipes(expected %#x, current %#x)\n", 3899 expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), 3900 secondary_ultrajoiner_pipes); 3901 3902 drm_WARN(display->drm, (uncompressed_joiner_pipes & bigjoiner_pipes) != 0, 3903 "Uncompressed joiner pipes(%#x) and bigjoiner pipes(%#x) can't intersect\n", 3904 uncompressed_joiner_pipes, bigjoiner_pipes); 3905 3906 drm_WARN(display->drm, secondary_bigjoiner_pipes != 3907 expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), 3908 "Wrong secondary bigjoiner pipes(expected %#x, current %#x)\n", 3909 expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), 3910 secondary_bigjoiner_pipes); 3911 3912 drm_WARN(display->drm, secondary_uncompressed_joiner_pipes != 3913 expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), 3914 "Wrong secondary uncompressed joiner pipes(expected %#x, current %#x)\n", 3915 expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), 3916 secondary_uncompressed_joiner_pipes); 3917 3918 *primary_pipe = 0; 3919 *secondary_pipes = 0; 3920 3921 if (ultrajoiner_pipes & BIT(pipe)) { 3922 *primary_pipe = get_joiner_primary_pipe(pipe, primary_ultrajoiner_pipes); 3923 *secondary_pipes = secondary_ultrajoiner_pipes & 3924 expected_ultrajoiner_secondary_pipes(*primary_pipe); 3925 3926 drm_WARN(display->drm, 3927 expected_ultrajoiner_secondary_pipes(*primary_pipe) != 3928 *secondary_pipes, 3929 "Wrong ultrajoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3930 *primary_pipe, 3931 expected_ultrajoiner_secondary_pipes(*primary_pipe), 3932 *secondary_pipes); 3933 return; 3934 } 3935 3936 if (uncompressed_joiner_pipes & BIT(pipe)) { 3937 *primary_pipe = get_joiner_primary_pipe(pipe, primary_uncompressed_joiner_pipes); 3938 *secondary_pipes = secondary_uncompressed_joiner_pipes & 3939 expected_uncompressed_joiner_secondary_pipes(*primary_pipe); 3940 3941 drm_WARN(display->drm, 3942 expected_uncompressed_joiner_secondary_pipes(*primary_pipe) != 3943 *secondary_pipes, 3944 "Wrong uncompressed joiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3945 *primary_pipe, 3946 expected_uncompressed_joiner_secondary_pipes(*primary_pipe), 3947 *secondary_pipes); 3948 return; 3949 } 3950 3951 if (bigjoiner_pipes & BIT(pipe)) { 3952 *primary_pipe = get_joiner_primary_pipe(pipe, primary_bigjoiner_pipes); 3953 *secondary_pipes = secondary_bigjoiner_pipes & 3954 expected_bigjoiner_secondary_pipes(*primary_pipe); 3955 3956 drm_WARN(display->drm, 3957 expected_bigjoiner_secondary_pipes(*primary_pipe) != 3958 *secondary_pipes, 3959 "Wrong bigjoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3960 *primary_pipe, 3961 expected_bigjoiner_secondary_pipes(*primary_pipe), 3962 *secondary_pipes); 3963 return; 3964 } 3965 } 3966 3967 static u8 hsw_panel_transcoders(struct drm_i915_private *i915) 3968 { 3969 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 3970 3971 if (DISPLAY_VER(i915) >= 11) 3972 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 3973 3974 return panel_transcoder_mask; 3975 } 3976 3977 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3978 { 3979 struct drm_device *dev = crtc->base.dev; 3980 struct drm_i915_private *dev_priv = to_i915(dev); 3981 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); 3982 enum transcoder cpu_transcoder; 3983 u8 primary_pipe, secondary_pipes; 3984 u8 enabled_transcoders = 0; 3985 3986 /* 3987 * XXX: Do intel_display_power_get_if_enabled before reading this (for 3988 * consistency and less surprising code; it's in always on power). 3989 */ 3990 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, 3991 panel_transcoder_mask) { 3992 enum intel_display_power_domain power_domain; 3993 intel_wakeref_t wakeref; 3994 enum pipe trans_pipe; 3995 u32 tmp = 0; 3996 3997 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3998 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3999 tmp = intel_de_read(dev_priv, 4000 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); 4001 4002 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 4003 continue; 4004 4005 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 4006 default: 4007 drm_WARN(dev, 1, 4008 "unknown pipe linked to transcoder %s\n", 4009 transcoder_name(cpu_transcoder)); 4010 fallthrough; 4011 case TRANS_DDI_EDP_INPUT_A_ONOFF: 4012 case TRANS_DDI_EDP_INPUT_A_ON: 4013 trans_pipe = PIPE_A; 4014 break; 4015 case TRANS_DDI_EDP_INPUT_B_ONOFF: 4016 trans_pipe = PIPE_B; 4017 break; 4018 case TRANS_DDI_EDP_INPUT_C_ONOFF: 4019 trans_pipe = PIPE_C; 4020 break; 4021 case TRANS_DDI_EDP_INPUT_D_ONOFF: 4022 trans_pipe = PIPE_D; 4023 break; 4024 } 4025 4026 if (trans_pipe == crtc->pipe) 4027 enabled_transcoders |= BIT(cpu_transcoder); 4028 } 4029 4030 /* single pipe or joiner primary */ 4031 cpu_transcoder = (enum transcoder) crtc->pipe; 4032 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 4033 enabled_transcoders |= BIT(cpu_transcoder); 4034 4035 /* joiner secondary -> consider the primary pipe's transcoder as well */ 4036 enabled_joiner_pipes(dev_priv, crtc->pipe, &primary_pipe, &secondary_pipes); 4037 if (secondary_pipes & BIT(crtc->pipe)) { 4038 cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1; 4039 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 4040 enabled_transcoders |= BIT(cpu_transcoder); 4041 } 4042 4043 return enabled_transcoders; 4044 } 4045 4046 static bool has_edp_transcoders(u8 enabled_transcoders) 4047 { 4048 return enabled_transcoders & BIT(TRANSCODER_EDP); 4049 } 4050 4051 static bool has_dsi_transcoders(u8 enabled_transcoders) 4052 { 4053 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 4054 BIT(TRANSCODER_DSI_1)); 4055 } 4056 4057 static bool has_pipe_transcoders(u8 enabled_transcoders) 4058 { 4059 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 4060 BIT(TRANSCODER_DSI_0) | 4061 BIT(TRANSCODER_DSI_1)); 4062 } 4063 4064 static void assert_enabled_transcoders(struct drm_i915_private *i915, 4065 u8 enabled_transcoders) 4066 { 4067 /* Only one type of transcoder please */ 4068 drm_WARN_ON(&i915->drm, 4069 has_edp_transcoders(enabled_transcoders) + 4070 has_dsi_transcoders(enabled_transcoders) + 4071 has_pipe_transcoders(enabled_transcoders) > 1); 4072 4073 /* Only DSI transcoders can be ganged */ 4074 drm_WARN_ON(&i915->drm, 4075 !has_dsi_transcoders(enabled_transcoders) && 4076 !is_power_of_2(enabled_transcoders)); 4077 } 4078 4079 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 4080 struct intel_crtc_state *pipe_config, 4081 struct intel_display_power_domain_set *power_domain_set) 4082 { 4083 struct drm_device *dev = crtc->base.dev; 4084 struct drm_i915_private *dev_priv = to_i915(dev); 4085 unsigned long enabled_transcoders; 4086 u32 tmp; 4087 4088 enabled_transcoders = hsw_enabled_transcoders(crtc); 4089 if (!enabled_transcoders) 4090 return false; 4091 4092 assert_enabled_transcoders(dev_priv, enabled_transcoders); 4093 4094 /* 4095 * With the exception of DSI we should only ever have 4096 * a single enabled transcoder. With DSI let's just 4097 * pick the first one. 4098 */ 4099 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 4100 4101 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 4102 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 4103 return false; 4104 4105 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { 4106 tmp = intel_de_read(dev_priv, 4107 TRANS_DDI_FUNC_CTL(dev_priv, pipe_config->cpu_transcoder)); 4108 4109 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 4110 pipe_config->pch_pfit.force_thru = true; 4111 } 4112 4113 tmp = intel_de_read(dev_priv, 4114 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 4115 4116 return tmp & TRANSCONF_ENABLE; 4117 } 4118 4119 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 4120 struct intel_crtc_state *pipe_config, 4121 struct intel_display_power_domain_set *power_domain_set) 4122 { 4123 struct intel_display *display = to_intel_display(crtc); 4124 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4125 enum transcoder cpu_transcoder; 4126 enum port port; 4127 u32 tmp; 4128 4129 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 4130 if (port == PORT_A) 4131 cpu_transcoder = TRANSCODER_DSI_A; 4132 else 4133 cpu_transcoder = TRANSCODER_DSI_C; 4134 4135 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 4136 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 4137 continue; 4138 4139 /* 4140 * The PLL needs to be enabled with a valid divider 4141 * configuration, otherwise accessing DSI registers will hang 4142 * the machine. See BSpec North Display Engine 4143 * registers/MIPI[BXT]. We can break out here early, since we 4144 * need the same DSI PLL to be enabled for both DSI ports. 4145 */ 4146 if (!bxt_dsi_pll_is_enabled(dev_priv)) 4147 break; 4148 4149 /* XXX: this works for video mode only */ 4150 tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port)); 4151 if (!(tmp & DPI_ENABLE)) 4152 continue; 4153 4154 tmp = intel_de_read(display, MIPI_CTRL(display, port)); 4155 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 4156 continue; 4157 4158 pipe_config->cpu_transcoder = cpu_transcoder; 4159 break; 4160 } 4161 4162 return transcoder_is_dsi(pipe_config->cpu_transcoder); 4163 } 4164 4165 static void intel_joiner_get_config(struct intel_crtc_state *crtc_state) 4166 { 4167 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4168 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4169 u8 primary_pipe, secondary_pipes; 4170 enum pipe pipe = crtc->pipe; 4171 4172 enabled_joiner_pipes(i915, pipe, &primary_pipe, &secondary_pipes); 4173 4174 if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0) 4175 return; 4176 4177 crtc_state->joiner_pipes = primary_pipe | secondary_pipes; 4178 } 4179 4180 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 4181 struct intel_crtc_state *pipe_config) 4182 { 4183 struct intel_display *display = to_intel_display(crtc); 4184 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4185 bool active; 4186 u32 tmp; 4187 4188 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 4189 POWER_DOMAIN_PIPE(crtc->pipe))) 4190 return false; 4191 4192 pipe_config->shared_dpll = NULL; 4193 4194 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains); 4195 4196 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4197 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) { 4198 drm_WARN_ON(&dev_priv->drm, active); 4199 active = true; 4200 } 4201 4202 if (!active) 4203 goto out; 4204 4205 intel_joiner_get_config(pipe_config); 4206 intel_dsc_get_config(pipe_config); 4207 4208 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 4209 DISPLAY_VER(dev_priv) >= 11) 4210 intel_get_transcoder_timings(crtc, pipe_config); 4211 4212 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 4213 intel_vrr_get_config(pipe_config); 4214 4215 intel_get_pipe_src_size(crtc, pipe_config); 4216 4217 if (IS_HASWELL(dev_priv)) { 4218 u32 tmp = intel_de_read(dev_priv, 4219 TRANSCONF(dev_priv, pipe_config->cpu_transcoder)); 4220 4221 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) 4222 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 4223 else 4224 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 4225 } else { 4226 pipe_config->output_format = 4227 bdw_get_pipe_misc_output_format(crtc); 4228 } 4229 4230 pipe_config->sink_format = pipe_config->output_format; 4231 4232 intel_color_get_config(pipe_config); 4233 4234 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 4235 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 4236 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 4237 pipe_config->ips_linetime = 4238 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 4239 4240 if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 4241 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 4242 if (DISPLAY_VER(dev_priv) >= 9) 4243 skl_scaler_get_config(pipe_config); 4244 else 4245 ilk_get_pfit_config(pipe_config); 4246 } 4247 4248 hsw_ips_get_config(pipe_config); 4249 4250 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 4251 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4252 pipe_config->pixel_multiplier = 4253 intel_de_read(dev_priv, 4254 TRANS_MULT(dev_priv, pipe_config->cpu_transcoder)) + 1; 4255 } else { 4256 pipe_config->pixel_multiplier = 1; 4257 } 4258 4259 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4260 tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder)); 4261 4262 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 4263 } else { 4264 /* no idea if this is correct */ 4265 pipe_config->framestart_delay = 1; 4266 } 4267 4268 out: 4269 intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains); 4270 4271 return active; 4272 } 4273 4274 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 4275 { 4276 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4277 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4278 4279 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state)) 4280 return false; 4281 4282 crtc_state->hw.active = true; 4283 4284 intel_crtc_readout_derived_state(crtc_state); 4285 4286 return true; 4287 } 4288 4289 int intel_dotclock_calculate(int link_freq, 4290 const struct intel_link_m_n *m_n) 4291 { 4292 /* 4293 * The calculation for the data clock -> pixel clock is: 4294 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 4295 * But we want to avoid losing precison if possible, so: 4296 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 4297 * 4298 * and for link freq (10kbs units) -> pixel clock it is: 4299 * link_symbol_clock = link_freq * 10 / link_symbol_size 4300 * pixel_clock = (m * link_symbol_clock) / n 4301 * or for more precision: 4302 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size) 4303 */ 4304 4305 if (!m_n->link_n) 4306 return 0; 4307 4308 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10), 4309 m_n->link_n * intel_dp_link_symbol_size(link_freq)); 4310 } 4311 4312 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) 4313 { 4314 int dotclock; 4315 4316 if (intel_crtc_has_dp_encoder(pipe_config)) 4317 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 4318 &pipe_config->dp_m_n); 4319 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) 4320 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, 4321 pipe_config->pipe_bpp); 4322 else 4323 dotclock = pipe_config->port_clock; 4324 4325 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 4326 !intel_crtc_has_dp_encoder(pipe_config)) 4327 dotclock *= 2; 4328 4329 if (pipe_config->pixel_multiplier) 4330 dotclock /= pipe_config->pixel_multiplier; 4331 4332 return dotclock; 4333 } 4334 4335 /* Returns the currently programmed mode of the given encoder. */ 4336 struct drm_display_mode * 4337 intel_encoder_current_mode(struct intel_encoder *encoder) 4338 { 4339 struct intel_display *display = to_intel_display(encoder); 4340 struct intel_crtc_state *crtc_state; 4341 struct drm_display_mode *mode; 4342 struct intel_crtc *crtc; 4343 enum pipe pipe; 4344 4345 if (!encoder->get_hw_state(encoder, &pipe)) 4346 return NULL; 4347 4348 crtc = intel_crtc_for_pipe(display, pipe); 4349 4350 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 4351 if (!mode) 4352 return NULL; 4353 4354 crtc_state = intel_crtc_state_alloc(crtc); 4355 if (!crtc_state) { 4356 kfree(mode); 4357 return NULL; 4358 } 4359 4360 if (!intel_crtc_get_pipe_config(crtc_state)) { 4361 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4362 kfree(mode); 4363 return NULL; 4364 } 4365 4366 intel_encoder_get_config(encoder, crtc_state); 4367 4368 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 4369 4370 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4371 4372 return mode; 4373 } 4374 4375 static bool encoders_cloneable(const struct intel_encoder *a, 4376 const struct intel_encoder *b) 4377 { 4378 /* masks could be asymmetric, so check both ways */ 4379 return a == b || (a->cloneable & BIT(b->type) && 4380 b->cloneable & BIT(a->type)); 4381 } 4382 4383 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 4384 struct intel_crtc *crtc, 4385 struct intel_encoder *encoder) 4386 { 4387 struct intel_encoder *source_encoder; 4388 struct drm_connector *connector; 4389 struct drm_connector_state *connector_state; 4390 int i; 4391 4392 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4393 if (connector_state->crtc != &crtc->base) 4394 continue; 4395 4396 source_encoder = 4397 to_intel_encoder(connector_state->best_encoder); 4398 if (!encoders_cloneable(encoder, source_encoder)) 4399 return false; 4400 } 4401 4402 return true; 4403 } 4404 4405 static int icl_add_linked_planes(struct intel_atomic_state *state) 4406 { 4407 struct intel_plane *plane, *linked; 4408 struct intel_plane_state *plane_state, *linked_plane_state; 4409 int i; 4410 4411 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4412 linked = plane_state->planar_linked_plane; 4413 4414 if (!linked) 4415 continue; 4416 4417 linked_plane_state = intel_atomic_get_plane_state(state, linked); 4418 if (IS_ERR(linked_plane_state)) 4419 return PTR_ERR(linked_plane_state); 4420 4421 drm_WARN_ON(state->base.dev, 4422 linked_plane_state->planar_linked_plane != plane); 4423 drm_WARN_ON(state->base.dev, 4424 linked_plane_state->planar_slave == plane_state->planar_slave); 4425 } 4426 4427 return 0; 4428 } 4429 4430 static int icl_check_nv12_planes(struct intel_atomic_state *state, 4431 struct intel_crtc *crtc) 4432 { 4433 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4434 struct intel_crtc_state *crtc_state = 4435 intel_atomic_get_new_crtc_state(state, crtc); 4436 struct intel_plane *plane, *linked; 4437 struct intel_plane_state *plane_state; 4438 int i; 4439 4440 if (DISPLAY_VER(dev_priv) < 11) 4441 return 0; 4442 4443 /* 4444 * Destroy all old plane links and make the slave plane invisible 4445 * in the crtc_state->active_planes mask. 4446 */ 4447 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4448 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 4449 continue; 4450 4451 plane_state->planar_linked_plane = NULL; 4452 if (plane_state->planar_slave && !plane_state->uapi.visible) { 4453 crtc_state->enabled_planes &= ~BIT(plane->id); 4454 crtc_state->active_planes &= ~BIT(plane->id); 4455 crtc_state->update_planes |= BIT(plane->id); 4456 crtc_state->data_rate[plane->id] = 0; 4457 crtc_state->rel_data_rate[plane->id] = 0; 4458 } 4459 4460 plane_state->planar_slave = false; 4461 } 4462 4463 if (!crtc_state->nv12_planes) 4464 return 0; 4465 4466 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4467 struct intel_plane_state *linked_state = NULL; 4468 4469 if (plane->pipe != crtc->pipe || 4470 !(crtc_state->nv12_planes & BIT(plane->id))) 4471 continue; 4472 4473 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 4474 if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 4475 continue; 4476 4477 if (crtc_state->active_planes & BIT(linked->id)) 4478 continue; 4479 4480 linked_state = intel_atomic_get_plane_state(state, linked); 4481 if (IS_ERR(linked_state)) 4482 return PTR_ERR(linked_state); 4483 4484 break; 4485 } 4486 4487 if (!linked_state) { 4488 drm_dbg_kms(&dev_priv->drm, 4489 "Need %d free Y planes for planar YUV\n", 4490 hweight8(crtc_state->nv12_planes)); 4491 4492 return -EINVAL; 4493 } 4494 4495 plane_state->planar_linked_plane = linked; 4496 4497 linked_state->planar_slave = true; 4498 linked_state->planar_linked_plane = plane; 4499 crtc_state->enabled_planes |= BIT(linked->id); 4500 crtc_state->active_planes |= BIT(linked->id); 4501 crtc_state->update_planes |= BIT(linked->id); 4502 crtc_state->data_rate[linked->id] = 4503 crtc_state->data_rate_y[plane->id]; 4504 crtc_state->rel_data_rate[linked->id] = 4505 crtc_state->rel_data_rate_y[plane->id]; 4506 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 4507 linked->base.name, plane->base.name); 4508 4509 /* Copy parameters to slave plane */ 4510 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 4511 linked_state->color_ctl = plane_state->color_ctl; 4512 linked_state->view = plane_state->view; 4513 linked_state->decrypt = plane_state->decrypt; 4514 4515 intel_plane_copy_hw_state(linked_state, plane_state); 4516 linked_state->uapi.src = plane_state->uapi.src; 4517 linked_state->uapi.dst = plane_state->uapi.dst; 4518 4519 if (icl_is_hdr_plane(dev_priv, plane->id)) { 4520 if (linked->id == PLANE_7) 4521 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; 4522 else if (linked->id == PLANE_6) 4523 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; 4524 else if (linked->id == PLANE_5) 4525 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; 4526 else if (linked->id == PLANE_4) 4527 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; 4528 else 4529 MISSING_CASE(linked->id); 4530 } 4531 } 4532 4533 return 0; 4534 } 4535 4536 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4537 { 4538 const struct drm_display_mode *pipe_mode = 4539 &crtc_state->hw.pipe_mode; 4540 int linetime_wm; 4541 4542 if (!crtc_state->hw.enable) 4543 return 0; 4544 4545 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4546 pipe_mode->crtc_clock); 4547 4548 return min(linetime_wm, 0x1ff); 4549 } 4550 4551 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4552 const struct intel_cdclk_state *cdclk_state) 4553 { 4554 const struct drm_display_mode *pipe_mode = 4555 &crtc_state->hw.pipe_mode; 4556 int linetime_wm; 4557 4558 if (!crtc_state->hw.enable) 4559 return 0; 4560 4561 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4562 cdclk_state->logical.cdclk); 4563 4564 return min(linetime_wm, 0x1ff); 4565 } 4566 4567 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4568 { 4569 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4570 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4571 const struct drm_display_mode *pipe_mode = 4572 &crtc_state->hw.pipe_mode; 4573 int linetime_wm; 4574 4575 if (!crtc_state->hw.enable) 4576 return 0; 4577 4578 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4579 crtc_state->pixel_rate); 4580 4581 /* Display WA #1135: BXT:ALL GLK:ALL */ 4582 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4583 skl_watermark_ipc_enabled(dev_priv)) 4584 linetime_wm /= 2; 4585 4586 return min(linetime_wm, 0x1ff); 4587 } 4588 4589 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4590 struct intel_crtc *crtc) 4591 { 4592 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4593 struct intel_crtc_state *crtc_state = 4594 intel_atomic_get_new_crtc_state(state, crtc); 4595 const struct intel_cdclk_state *cdclk_state; 4596 4597 if (DISPLAY_VER(dev_priv) >= 9) 4598 crtc_state->linetime = skl_linetime_wm(crtc_state); 4599 else 4600 crtc_state->linetime = hsw_linetime_wm(crtc_state); 4601 4602 if (!hsw_crtc_supports_ips(crtc)) 4603 return 0; 4604 4605 cdclk_state = intel_atomic_get_cdclk_state(state); 4606 if (IS_ERR(cdclk_state)) 4607 return PTR_ERR(cdclk_state); 4608 4609 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4610 cdclk_state); 4611 4612 return 0; 4613 } 4614 4615 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4616 struct intel_crtc *crtc) 4617 { 4618 struct intel_display *display = to_intel_display(crtc); 4619 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4620 struct intel_crtc_state *crtc_state = 4621 intel_atomic_get_new_crtc_state(state, crtc); 4622 int ret; 4623 4624 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) && 4625 intel_crtc_needs_modeset(crtc_state) && 4626 !crtc_state->hw.active) 4627 crtc_state->update_wm_post = true; 4628 4629 if (intel_crtc_needs_modeset(crtc_state)) { 4630 ret = intel_dpll_crtc_get_shared_dpll(state, crtc); 4631 if (ret) 4632 return ret; 4633 } 4634 4635 ret = intel_color_check(state, crtc); 4636 if (ret) 4637 return ret; 4638 4639 ret = intel_wm_compute(state, crtc); 4640 if (ret) { 4641 drm_dbg_kms(&dev_priv->drm, 4642 "[CRTC:%d:%s] watermarks are invalid\n", 4643 crtc->base.base.id, crtc->base.name); 4644 return ret; 4645 } 4646 4647 if (DISPLAY_VER(dev_priv) >= 9) { 4648 if (intel_crtc_needs_modeset(crtc_state) || 4649 intel_crtc_needs_fastset(crtc_state)) { 4650 ret = skl_update_scaler_crtc(crtc_state); 4651 if (ret) 4652 return ret; 4653 } 4654 4655 ret = intel_atomic_setup_scalers(state, crtc); 4656 if (ret) 4657 return ret; 4658 } 4659 4660 if (HAS_IPS(display)) { 4661 ret = hsw_ips_compute_config(state, crtc); 4662 if (ret) 4663 return ret; 4664 } 4665 4666 if (DISPLAY_VER(dev_priv) >= 9 || 4667 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 4668 ret = hsw_compute_linetime_wm(state, crtc); 4669 if (ret) 4670 return ret; 4671 4672 } 4673 4674 ret = intel_psr2_sel_fetch_update(state, crtc); 4675 if (ret) 4676 return ret; 4677 4678 return 0; 4679 } 4680 4681 static int 4682 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 4683 struct intel_crtc_state *crtc_state) 4684 { 4685 struct drm_connector *connector = conn_state->connector; 4686 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 4687 const struct drm_display_info *info = &connector->display_info; 4688 int bpp; 4689 4690 switch (conn_state->max_bpc) { 4691 case 6 ... 7: 4692 bpp = 6 * 3; 4693 break; 4694 case 8 ... 9: 4695 bpp = 8 * 3; 4696 break; 4697 case 10 ... 11: 4698 bpp = 10 * 3; 4699 break; 4700 case 12 ... 16: 4701 bpp = 12 * 3; 4702 break; 4703 default: 4704 MISSING_CASE(conn_state->max_bpc); 4705 return -EINVAL; 4706 } 4707 4708 if (bpp < crtc_state->pipe_bpp) { 4709 drm_dbg_kms(&i915->drm, 4710 "[CONNECTOR:%d:%s] Limiting display bpp to %d " 4711 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", 4712 connector->base.id, connector->name, 4713 bpp, 3 * info->bpc, 4714 3 * conn_state->max_requested_bpc, 4715 crtc_state->pipe_bpp); 4716 4717 crtc_state->pipe_bpp = bpp; 4718 } 4719 4720 return 0; 4721 } 4722 4723 static int 4724 compute_baseline_pipe_bpp(struct intel_atomic_state *state, 4725 struct intel_crtc *crtc) 4726 { 4727 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4728 struct intel_crtc_state *crtc_state = 4729 intel_atomic_get_new_crtc_state(state, crtc); 4730 struct drm_connector *connector; 4731 struct drm_connector_state *connector_state; 4732 int bpp, i; 4733 4734 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 4735 IS_CHERRYVIEW(dev_priv))) 4736 bpp = 10*3; 4737 else if (DISPLAY_VER(dev_priv) >= 5) 4738 bpp = 12*3; 4739 else 4740 bpp = 8*3; 4741 4742 crtc_state->pipe_bpp = bpp; 4743 4744 /* Clamp display bpp to connector max bpp */ 4745 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4746 int ret; 4747 4748 if (connector_state->crtc != &crtc->base) 4749 continue; 4750 4751 ret = compute_sink_pipe_bpp(connector_state, crtc_state); 4752 if (ret) 4753 return ret; 4754 } 4755 4756 return 0; 4757 } 4758 4759 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 4760 { 4761 struct drm_device *dev = state->base.dev; 4762 struct drm_connector *connector; 4763 struct drm_connector_list_iter conn_iter; 4764 unsigned int used_ports = 0; 4765 unsigned int used_mst_ports = 0; 4766 bool ret = true; 4767 4768 /* 4769 * We're going to peek into connector->state, 4770 * hence connection_mutex must be held. 4771 */ 4772 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 4773 4774 /* 4775 * Walk the connector list instead of the encoder 4776 * list to detect the problem on ddi platforms 4777 * where there's just one encoder per digital port. 4778 */ 4779 drm_connector_list_iter_begin(dev, &conn_iter); 4780 drm_for_each_connector_iter(connector, &conn_iter) { 4781 struct drm_connector_state *connector_state; 4782 struct intel_encoder *encoder; 4783 4784 connector_state = 4785 drm_atomic_get_new_connector_state(&state->base, 4786 connector); 4787 if (!connector_state) 4788 connector_state = connector->state; 4789 4790 if (!connector_state->best_encoder) 4791 continue; 4792 4793 encoder = to_intel_encoder(connector_state->best_encoder); 4794 4795 drm_WARN_ON(dev, !connector_state->crtc); 4796 4797 switch (encoder->type) { 4798 case INTEL_OUTPUT_DDI: 4799 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 4800 break; 4801 fallthrough; 4802 case INTEL_OUTPUT_DP: 4803 case INTEL_OUTPUT_HDMI: 4804 case INTEL_OUTPUT_EDP: 4805 /* the same port mustn't appear more than once */ 4806 if (used_ports & BIT(encoder->port)) 4807 ret = false; 4808 4809 used_ports |= BIT(encoder->port); 4810 break; 4811 case INTEL_OUTPUT_DP_MST: 4812 used_mst_ports |= 4813 1 << encoder->port; 4814 break; 4815 default: 4816 break; 4817 } 4818 } 4819 drm_connector_list_iter_end(&conn_iter); 4820 4821 /* can't mix MST and SST/HDMI on the same port */ 4822 if (used_ports & used_mst_ports) 4823 return false; 4824 4825 return ret; 4826 } 4827 4828 static void 4829 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 4830 struct intel_crtc *crtc) 4831 { 4832 struct intel_crtc_state *crtc_state = 4833 intel_atomic_get_new_crtc_state(state, crtc); 4834 4835 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4836 4837 drm_property_replace_blob(&crtc_state->hw.degamma_lut, 4838 crtc_state->uapi.degamma_lut); 4839 drm_property_replace_blob(&crtc_state->hw.gamma_lut, 4840 crtc_state->uapi.gamma_lut); 4841 drm_property_replace_blob(&crtc_state->hw.ctm, 4842 crtc_state->uapi.ctm); 4843 } 4844 4845 static void 4846 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 4847 struct intel_crtc *crtc) 4848 { 4849 struct intel_crtc_state *crtc_state = 4850 intel_atomic_get_new_crtc_state(state, crtc); 4851 4852 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4853 4854 crtc_state->hw.enable = crtc_state->uapi.enable; 4855 crtc_state->hw.active = crtc_state->uapi.active; 4856 drm_mode_copy(&crtc_state->hw.mode, 4857 &crtc_state->uapi.mode); 4858 drm_mode_copy(&crtc_state->hw.adjusted_mode, 4859 &crtc_state->uapi.adjusted_mode); 4860 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 4861 4862 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 4863 } 4864 4865 static void 4866 copy_joiner_crtc_state_nomodeset(struct intel_atomic_state *state, 4867 struct intel_crtc *secondary_crtc) 4868 { 4869 struct intel_crtc_state *secondary_crtc_state = 4870 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4871 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4872 const struct intel_crtc_state *primary_crtc_state = 4873 intel_atomic_get_new_crtc_state(state, primary_crtc); 4874 4875 drm_property_replace_blob(&secondary_crtc_state->hw.degamma_lut, 4876 primary_crtc_state->hw.degamma_lut); 4877 drm_property_replace_blob(&secondary_crtc_state->hw.gamma_lut, 4878 primary_crtc_state->hw.gamma_lut); 4879 drm_property_replace_blob(&secondary_crtc_state->hw.ctm, 4880 primary_crtc_state->hw.ctm); 4881 4882 secondary_crtc_state->uapi.color_mgmt_changed = primary_crtc_state->uapi.color_mgmt_changed; 4883 } 4884 4885 static int 4886 copy_joiner_crtc_state_modeset(struct intel_atomic_state *state, 4887 struct intel_crtc *secondary_crtc) 4888 { 4889 struct intel_crtc_state *secondary_crtc_state = 4890 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4891 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4892 const struct intel_crtc_state *primary_crtc_state = 4893 intel_atomic_get_new_crtc_state(state, primary_crtc); 4894 struct intel_crtc_state *saved_state; 4895 4896 WARN_ON(primary_crtc_state->joiner_pipes != 4897 secondary_crtc_state->joiner_pipes); 4898 4899 saved_state = kmemdup(primary_crtc_state, sizeof(*saved_state), GFP_KERNEL); 4900 if (!saved_state) 4901 return -ENOMEM; 4902 4903 /* preserve some things from the slave's original crtc state */ 4904 saved_state->uapi = secondary_crtc_state->uapi; 4905 saved_state->scaler_state = secondary_crtc_state->scaler_state; 4906 saved_state->shared_dpll = secondary_crtc_state->shared_dpll; 4907 saved_state->crc_enabled = secondary_crtc_state->crc_enabled; 4908 4909 intel_crtc_free_hw_state(secondary_crtc_state); 4910 if (secondary_crtc_state->dp_tunnel_ref.tunnel) 4911 drm_dp_tunnel_ref_put(&secondary_crtc_state->dp_tunnel_ref); 4912 memcpy(secondary_crtc_state, saved_state, sizeof(*secondary_crtc_state)); 4913 kfree(saved_state); 4914 4915 /* Re-init hw state */ 4916 memset(&secondary_crtc_state->hw, 0, sizeof(secondary_crtc_state->hw)); 4917 secondary_crtc_state->hw.enable = primary_crtc_state->hw.enable; 4918 secondary_crtc_state->hw.active = primary_crtc_state->hw.active; 4919 drm_mode_copy(&secondary_crtc_state->hw.mode, 4920 &primary_crtc_state->hw.mode); 4921 drm_mode_copy(&secondary_crtc_state->hw.pipe_mode, 4922 &primary_crtc_state->hw.pipe_mode); 4923 drm_mode_copy(&secondary_crtc_state->hw.adjusted_mode, 4924 &primary_crtc_state->hw.adjusted_mode); 4925 secondary_crtc_state->hw.scaling_filter = primary_crtc_state->hw.scaling_filter; 4926 4927 if (primary_crtc_state->dp_tunnel_ref.tunnel) 4928 drm_dp_tunnel_ref_get(primary_crtc_state->dp_tunnel_ref.tunnel, 4929 &secondary_crtc_state->dp_tunnel_ref); 4930 4931 copy_joiner_crtc_state_nomodeset(state, secondary_crtc); 4932 4933 secondary_crtc_state->uapi.mode_changed = primary_crtc_state->uapi.mode_changed; 4934 secondary_crtc_state->uapi.connectors_changed = primary_crtc_state->uapi.connectors_changed; 4935 secondary_crtc_state->uapi.active_changed = primary_crtc_state->uapi.active_changed; 4936 4937 WARN_ON(primary_crtc_state->joiner_pipes != 4938 secondary_crtc_state->joiner_pipes); 4939 4940 return 0; 4941 } 4942 4943 static int 4944 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 4945 struct intel_crtc *crtc) 4946 { 4947 struct intel_crtc_state *crtc_state = 4948 intel_atomic_get_new_crtc_state(state, crtc); 4949 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4950 struct intel_crtc_state *saved_state; 4951 4952 saved_state = intel_crtc_state_alloc(crtc); 4953 if (!saved_state) 4954 return -ENOMEM; 4955 4956 /* free the old crtc_state->hw members */ 4957 intel_crtc_free_hw_state(crtc_state); 4958 4959 intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state); 4960 4961 /* FIXME: before the switch to atomic started, a new pipe_config was 4962 * kzalloc'd. Code that depends on any field being zero should be 4963 * fixed, so that the crtc_state can be safely duplicated. For now, 4964 * only fields that are know to not cause problems are preserved. */ 4965 4966 saved_state->uapi = crtc_state->uapi; 4967 saved_state->inherited = crtc_state->inherited; 4968 saved_state->scaler_state = crtc_state->scaler_state; 4969 saved_state->shared_dpll = crtc_state->shared_dpll; 4970 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 4971 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 4972 sizeof(saved_state->icl_port_dplls)); 4973 saved_state->crc_enabled = crtc_state->crc_enabled; 4974 if (IS_G4X(dev_priv) || 4975 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4976 saved_state->wm = crtc_state->wm; 4977 4978 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 4979 kfree(saved_state); 4980 4981 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 4982 4983 return 0; 4984 } 4985 4986 static int 4987 intel_modeset_pipe_config(struct intel_atomic_state *state, 4988 struct intel_crtc *crtc, 4989 const struct intel_link_bw_limits *limits) 4990 { 4991 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4992 struct intel_crtc_state *crtc_state = 4993 intel_atomic_get_new_crtc_state(state, crtc); 4994 struct drm_connector *connector; 4995 struct drm_connector_state *connector_state; 4996 int pipe_src_w, pipe_src_h; 4997 int base_bpp, ret, i; 4998 4999 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; 5000 5001 crtc_state->framestart_delay = 1; 5002 5003 /* 5004 * Sanitize sync polarity flags based on requested ones. If neither 5005 * positive or negative polarity is requested, treat this as meaning 5006 * negative polarity. 5007 */ 5008 if (!(crtc_state->hw.adjusted_mode.flags & 5009 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 5010 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 5011 5012 if (!(crtc_state->hw.adjusted_mode.flags & 5013 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 5014 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 5015 5016 ret = compute_baseline_pipe_bpp(state, crtc); 5017 if (ret) 5018 return ret; 5019 5020 crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe); 5021 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; 5022 5023 if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) { 5024 drm_dbg_kms(&i915->drm, 5025 "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n", 5026 crtc->base.base.id, crtc->base.name, 5027 FXP_Q4_ARGS(crtc_state->max_link_bpp_x16)); 5028 crtc_state->bw_constrained = true; 5029 } 5030 5031 base_bpp = crtc_state->pipe_bpp; 5032 5033 /* 5034 * Determine the real pipe dimensions. Note that stereo modes can 5035 * increase the actual pipe size due to the frame doubling and 5036 * insertion of additional space for blanks between the frame. This 5037 * is stored in the crtc timings. We use the requested mode to do this 5038 * computation to clearly distinguish it from the adjusted mode, which 5039 * can be changed by the connectors in the below retry loop. 5040 */ 5041 drm_mode_get_hv_timing(&crtc_state->hw.mode, 5042 &pipe_src_w, &pipe_src_h); 5043 drm_rect_init(&crtc_state->pipe_src, 0, 0, 5044 pipe_src_w, pipe_src_h); 5045 5046 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5047 struct intel_encoder *encoder = 5048 to_intel_encoder(connector_state->best_encoder); 5049 5050 if (connector_state->crtc != &crtc->base) 5051 continue; 5052 5053 if (!check_single_encoder_cloning(state, crtc, encoder)) { 5054 drm_dbg_kms(&i915->drm, 5055 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", 5056 encoder->base.base.id, encoder->base.name); 5057 return -EINVAL; 5058 } 5059 5060 /* 5061 * Determine output_types before calling the .compute_config() 5062 * hooks so that the hooks can use this information safely. 5063 */ 5064 if (encoder->compute_output_type) 5065 crtc_state->output_types |= 5066 BIT(encoder->compute_output_type(encoder, crtc_state, 5067 connector_state)); 5068 else 5069 crtc_state->output_types |= BIT(encoder->type); 5070 } 5071 5072 /* Ensure the port clock defaults are reset when retrying. */ 5073 crtc_state->port_clock = 0; 5074 crtc_state->pixel_multiplier = 1; 5075 5076 /* Fill in default crtc timings, allow encoders to overwrite them. */ 5077 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, 5078 CRTC_STEREO_DOUBLE); 5079 5080 /* Pass our mode to the connectors and the CRTC to give them a chance to 5081 * adjust it according to limitations or connector properties, and also 5082 * a chance to reject the mode entirely. 5083 */ 5084 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5085 struct intel_encoder *encoder = 5086 to_intel_encoder(connector_state->best_encoder); 5087 5088 if (connector_state->crtc != &crtc->base) 5089 continue; 5090 5091 ret = encoder->compute_config(encoder, crtc_state, 5092 connector_state); 5093 if (ret == -EDEADLK) 5094 return ret; 5095 if (ret < 0) { 5096 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n", 5097 encoder->base.base.id, encoder->base.name, ret); 5098 return ret; 5099 } 5100 } 5101 5102 /* Set default port clock if not overwritten by the encoder. Needs to be 5103 * done afterwards in case the encoder adjusts the mode. */ 5104 if (!crtc_state->port_clock) 5105 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock 5106 * crtc_state->pixel_multiplier; 5107 5108 ret = intel_crtc_compute_config(state, crtc); 5109 if (ret == -EDEADLK) 5110 return ret; 5111 if (ret < 0) { 5112 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n", 5113 crtc->base.base.id, crtc->base.name, ret); 5114 return ret; 5115 } 5116 5117 /* Dithering seems to not pass-through bits correctly when it should, so 5118 * only enable it on 6bpc panels and when its not a compliance 5119 * test requesting 6bpc video pattern. 5120 */ 5121 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && 5122 !crtc_state->dither_force_disable; 5123 drm_dbg_kms(&i915->drm, 5124 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 5125 crtc->base.base.id, crtc->base.name, 5126 base_bpp, crtc_state->pipe_bpp, crtc_state->dither); 5127 5128 return 0; 5129 } 5130 5131 static int 5132 intel_modeset_pipe_config_late(struct intel_atomic_state *state, 5133 struct intel_crtc *crtc) 5134 { 5135 struct intel_crtc_state *crtc_state = 5136 intel_atomic_get_new_crtc_state(state, crtc); 5137 struct drm_connector_state *conn_state; 5138 struct drm_connector *connector; 5139 int i; 5140 5141 intel_vrr_compute_config_late(crtc_state); 5142 5143 for_each_new_connector_in_state(&state->base, connector, 5144 conn_state, i) { 5145 struct intel_encoder *encoder = 5146 to_intel_encoder(conn_state->best_encoder); 5147 int ret; 5148 5149 if (conn_state->crtc != &crtc->base || 5150 !encoder->compute_config_late) 5151 continue; 5152 5153 ret = encoder->compute_config_late(encoder, crtc_state, 5154 conn_state); 5155 if (ret) 5156 return ret; 5157 } 5158 5159 return 0; 5160 } 5161 5162 bool intel_fuzzy_clock_check(int clock1, int clock2) 5163 { 5164 int diff; 5165 5166 if (clock1 == clock2) 5167 return true; 5168 5169 if (!clock1 || !clock2) 5170 return false; 5171 5172 diff = abs(clock1 - clock2); 5173 5174 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 5175 return true; 5176 5177 return false; 5178 } 5179 5180 static bool 5181 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 5182 const struct intel_link_m_n *m2_n2) 5183 { 5184 return m_n->tu == m2_n2->tu && 5185 m_n->data_m == m2_n2->data_m && 5186 m_n->data_n == m2_n2->data_n && 5187 m_n->link_m == m2_n2->link_m && 5188 m_n->link_n == m2_n2->link_n; 5189 } 5190 5191 static bool 5192 intel_compare_infoframe(const union hdmi_infoframe *a, 5193 const union hdmi_infoframe *b) 5194 { 5195 return memcmp(a, b, sizeof(*a)) == 0; 5196 } 5197 5198 static bool 5199 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 5200 const struct drm_dp_vsc_sdp *b) 5201 { 5202 return a->pixelformat == b->pixelformat && 5203 a->colorimetry == b->colorimetry && 5204 a->bpc == b->bpc && 5205 a->dynamic_range == b->dynamic_range && 5206 a->content_type == b->content_type; 5207 } 5208 5209 static bool 5210 intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a, 5211 const struct drm_dp_as_sdp *b) 5212 { 5213 return a->vtotal == b->vtotal && 5214 a->target_rr == b->target_rr && 5215 a->duration_incr_ms == b->duration_incr_ms && 5216 a->duration_decr_ms == b->duration_decr_ms && 5217 a->mode == b->mode; 5218 } 5219 5220 static bool 5221 intel_compare_buffer(const u8 *a, const u8 *b, size_t len) 5222 { 5223 return memcmp(a, b, len) == 0; 5224 } 5225 5226 static void __printf(5, 6) 5227 pipe_config_mismatch(struct drm_printer *p, bool fastset, 5228 const struct intel_crtc *crtc, 5229 const char *name, const char *format, ...) 5230 { 5231 struct va_format vaf; 5232 va_list args; 5233 5234 va_start(args, format); 5235 vaf.fmt = format; 5236 vaf.va = &args; 5237 5238 if (fastset) 5239 drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", 5240 crtc->base.base.id, crtc->base.name, name, &vaf); 5241 else 5242 drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n", 5243 crtc->base.base.id, crtc->base.name, name, &vaf); 5244 5245 va_end(args); 5246 } 5247 5248 static void 5249 pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset, 5250 const struct intel_crtc *crtc, 5251 const char *name, 5252 const union hdmi_infoframe *a, 5253 const union hdmi_infoframe *b) 5254 { 5255 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5256 const char *loglevel; 5257 5258 if (fastset) { 5259 if (!drm_debug_enabled(DRM_UT_KMS)) 5260 return; 5261 5262 loglevel = KERN_DEBUG; 5263 } else { 5264 loglevel = KERN_ERR; 5265 } 5266 5267 pipe_config_mismatch(p, fastset, crtc, name, "infoframe"); 5268 5269 drm_printf(p, "expected:\n"); 5270 hdmi_infoframe_log(loglevel, i915->drm.dev, a); 5271 drm_printf(p, "found:\n"); 5272 hdmi_infoframe_log(loglevel, i915->drm.dev, b); 5273 } 5274 5275 static void 5276 pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset, 5277 const struct intel_crtc *crtc, 5278 const char *name, 5279 const struct drm_dp_vsc_sdp *a, 5280 const struct drm_dp_vsc_sdp *b) 5281 { 5282 pipe_config_mismatch(p, fastset, crtc, name, "dp vsc sdp"); 5283 5284 drm_printf(p, "expected:\n"); 5285 drm_dp_vsc_sdp_log(p, a); 5286 drm_printf(p, "found:\n"); 5287 drm_dp_vsc_sdp_log(p, b); 5288 } 5289 5290 static void 5291 pipe_config_dp_as_sdp_mismatch(struct drm_printer *p, bool fastset, 5292 const struct intel_crtc *crtc, 5293 const char *name, 5294 const struct drm_dp_as_sdp *a, 5295 const struct drm_dp_as_sdp *b) 5296 { 5297 pipe_config_mismatch(p, fastset, crtc, name, "dp as sdp"); 5298 5299 drm_printf(p, "expected:\n"); 5300 drm_dp_as_sdp_log(p, a); 5301 drm_printf(p, "found:\n"); 5302 drm_dp_as_sdp_log(p, b); 5303 } 5304 5305 /* Returns the length up to and including the last differing byte */ 5306 static size_t 5307 memcmp_diff_len(const u8 *a, const u8 *b, size_t len) 5308 { 5309 int i; 5310 5311 for (i = len - 1; i >= 0; i--) { 5312 if (a[i] != b[i]) 5313 return i + 1; 5314 } 5315 5316 return 0; 5317 } 5318 5319 static void 5320 pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset, 5321 const struct intel_crtc *crtc, 5322 const char *name, 5323 const u8 *a, const u8 *b, size_t len) 5324 { 5325 pipe_config_mismatch(p, fastset, crtc, name, "buffer"); 5326 5327 /* only dump up to the last difference */ 5328 len = memcmp_diff_len(a, b, len); 5329 5330 drm_print_hex_dump(p, "expected: ", a, len); 5331 drm_print_hex_dump(p, "found: ", b, len); 5332 } 5333 5334 static void 5335 pipe_config_pll_mismatch(struct drm_printer *p, bool fastset, 5336 const struct intel_crtc *crtc, 5337 const char *name, 5338 const struct intel_dpll_hw_state *a, 5339 const struct intel_dpll_hw_state *b) 5340 { 5341 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5342 5343 pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */ 5344 5345 drm_printf(p, "expected:\n"); 5346 intel_dpll_dump_hw_state(i915, p, a); 5347 drm_printf(p, "found:\n"); 5348 intel_dpll_dump_hw_state(i915, p, b); 5349 } 5350 5351 static void 5352 pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset, 5353 const struct intel_crtc *crtc, 5354 const char *name, 5355 const struct intel_cx0pll_state *a, 5356 const struct intel_cx0pll_state *b) 5357 { 5358 struct intel_display *display = to_intel_display(crtc); 5359 char *chipname = a->use_c10 ? "C10" : "C20"; 5360 5361 pipe_config_mismatch(p, fastset, crtc, name, chipname); 5362 5363 drm_printf(p, "expected:\n"); 5364 intel_cx0pll_dump_hw_state(display, a); 5365 drm_printf(p, "found:\n"); 5366 intel_cx0pll_dump_hw_state(display, b); 5367 } 5368 5369 static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_state) 5370 { 5371 struct intel_display *display = to_intel_display(old_crtc_state); 5372 5373 /* 5374 * Allow fastboot to fix up vblank delay (handled via LRR 5375 * codepaths), a bit dodgy as the registers aren't 5376 * double buffered but seems to be working more or less... 5377 */ 5378 return HAS_LRR(display) && old_crtc_state->inherited && 5379 !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI); 5380 } 5381 5382 bool 5383 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 5384 const struct intel_crtc_state *pipe_config, 5385 bool fastset) 5386 { 5387 struct intel_display *display = to_intel_display(current_config); 5388 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 5389 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 5390 struct drm_printer p; 5391 bool ret = true; 5392 5393 if (fastset) 5394 p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL); 5395 else 5396 p = drm_err_printer(&dev_priv->drm, NULL); 5397 5398 #define PIPE_CONF_CHECK_X(name) do { \ 5399 if (current_config->name != pipe_config->name) { \ 5400 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5401 __stringify(name) " is bool"); \ 5402 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5403 "(expected 0x%08x, found 0x%08x)", \ 5404 current_config->name, \ 5405 pipe_config->name); \ 5406 ret = false; \ 5407 } \ 5408 } while (0) 5409 5410 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 5411 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 5412 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5413 __stringify(name) " is bool"); \ 5414 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5415 "(expected 0x%08x, found 0x%08x)", \ 5416 current_config->name & (mask), \ 5417 pipe_config->name & (mask)); \ 5418 ret = false; \ 5419 } \ 5420 } while (0) 5421 5422 #define PIPE_CONF_CHECK_I(name) do { \ 5423 if (current_config->name != pipe_config->name) { \ 5424 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5425 __stringify(name) " is bool"); \ 5426 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5427 "(expected %i, found %i)", \ 5428 current_config->name, \ 5429 pipe_config->name); \ 5430 ret = false; \ 5431 } \ 5432 } while (0) 5433 5434 #define PIPE_CONF_CHECK_LLI(name) do { \ 5435 if (current_config->name != pipe_config->name) { \ 5436 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5437 "(expected %lli, found %lli)", \ 5438 current_config->name, \ 5439 pipe_config->name); \ 5440 ret = false; \ 5441 } \ 5442 } while (0) 5443 5444 #define PIPE_CONF_CHECK_BOOL(name) do { \ 5445 if (current_config->name != pipe_config->name) { \ 5446 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ 5447 __stringify(name) " is not bool"); \ 5448 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5449 "(expected %s, found %s)", \ 5450 str_yes_no(current_config->name), \ 5451 str_yes_no(pipe_config->name)); \ 5452 ret = false; \ 5453 } \ 5454 } while (0) 5455 5456 #define PIPE_CONF_CHECK_P(name) do { \ 5457 if (current_config->name != pipe_config->name) { \ 5458 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5459 "(expected %p, found %p)", \ 5460 current_config->name, \ 5461 pipe_config->name); \ 5462 ret = false; \ 5463 } \ 5464 } while (0) 5465 5466 #define PIPE_CONF_CHECK_M_N(name) do { \ 5467 if (!intel_compare_link_m_n(¤t_config->name, \ 5468 &pipe_config->name)) { \ 5469 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5470 "(expected tu %i data %i/%i link %i/%i, " \ 5471 "found tu %i, data %i/%i link %i/%i)", \ 5472 current_config->name.tu, \ 5473 current_config->name.data_m, \ 5474 current_config->name.data_n, \ 5475 current_config->name.link_m, \ 5476 current_config->name.link_n, \ 5477 pipe_config->name.tu, \ 5478 pipe_config->name.data_m, \ 5479 pipe_config->name.data_n, \ 5480 pipe_config->name.link_m, \ 5481 pipe_config->name.link_n); \ 5482 ret = false; \ 5483 } \ 5484 } while (0) 5485 5486 #define PIPE_CONF_CHECK_PLL(name) do { \ 5487 if (!intel_dpll_compare_hw_state(dev_priv, ¤t_config->name, \ 5488 &pipe_config->name)) { \ 5489 pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5490 ¤t_config->name, \ 5491 &pipe_config->name); \ 5492 ret = false; \ 5493 } \ 5494 } while (0) 5495 5496 #define PIPE_CONF_CHECK_PLL_CX0(name) do { \ 5497 if (!intel_cx0pll_compare_hw_state(¤t_config->name, \ 5498 &pipe_config->name)) { \ 5499 pipe_config_cx0pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5500 ¤t_config->name, \ 5501 &pipe_config->name); \ 5502 ret = false; \ 5503 } \ 5504 } while (0) 5505 5506 #define PIPE_CONF_CHECK_TIMINGS(name) do { \ 5507 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ 5508 PIPE_CONF_CHECK_I(name.crtc_htotal); \ 5509 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ 5510 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ 5511 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ 5512 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ 5513 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ 5514 if (!fastset || !allow_vblank_delay_fastset(current_config)) \ 5515 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ 5516 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ 5517 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ 5518 if (!fastset || !pipe_config->update_lrr) { \ 5519 PIPE_CONF_CHECK_I(name.crtc_vtotal); \ 5520 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ 5521 } \ 5522 } while (0) 5523 5524 #define PIPE_CONF_CHECK_RECT(name) do { \ 5525 PIPE_CONF_CHECK_I(name.x1); \ 5526 PIPE_CONF_CHECK_I(name.x2); \ 5527 PIPE_CONF_CHECK_I(name.y1); \ 5528 PIPE_CONF_CHECK_I(name.y2); \ 5529 } while (0) 5530 5531 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 5532 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 5533 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5534 "(%x) (expected %i, found %i)", \ 5535 (mask), \ 5536 current_config->name & (mask), \ 5537 pipe_config->name & (mask)); \ 5538 ret = false; \ 5539 } \ 5540 } while (0) 5541 5542 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 5543 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 5544 &pipe_config->infoframes.name)) { \ 5545 pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \ 5546 ¤t_config->infoframes.name, \ 5547 &pipe_config->infoframes.name); \ 5548 ret = false; \ 5549 } \ 5550 } while (0) 5551 5552 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 5553 if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 5554 &pipe_config->infoframes.name)) { \ 5555 pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5556 ¤t_config->infoframes.name, \ 5557 &pipe_config->infoframes.name); \ 5558 ret = false; \ 5559 } \ 5560 } while (0) 5561 5562 #define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \ 5563 if (!intel_compare_dp_as_sdp(¤t_config->infoframes.name, \ 5564 &pipe_config->infoframes.name)) { \ 5565 pipe_config_dp_as_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5566 ¤t_config->infoframes.name, \ 5567 &pipe_config->infoframes.name); \ 5568 ret = false; \ 5569 } \ 5570 } while (0) 5571 5572 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \ 5573 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ 5574 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ 5575 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ 5576 pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \ 5577 current_config->name, \ 5578 pipe_config->name, \ 5579 (len)); \ 5580 ret = false; \ 5581 } \ 5582 } while (0) 5583 5584 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \ 5585 if (current_config->gamma_mode == pipe_config->gamma_mode && \ 5586 !intel_color_lut_equal(current_config, \ 5587 current_config->lut, pipe_config->lut, \ 5588 is_pre_csc_lut)) { \ 5589 pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \ 5590 "hw_state doesn't match sw_state"); \ 5591 ret = false; \ 5592 } \ 5593 } while (0) 5594 5595 #define PIPE_CONF_CHECK_CSC(name) do { \ 5596 PIPE_CONF_CHECK_X(name.preoff[0]); \ 5597 PIPE_CONF_CHECK_X(name.preoff[1]); \ 5598 PIPE_CONF_CHECK_X(name.preoff[2]); \ 5599 PIPE_CONF_CHECK_X(name.coeff[0]); \ 5600 PIPE_CONF_CHECK_X(name.coeff[1]); \ 5601 PIPE_CONF_CHECK_X(name.coeff[2]); \ 5602 PIPE_CONF_CHECK_X(name.coeff[3]); \ 5603 PIPE_CONF_CHECK_X(name.coeff[4]); \ 5604 PIPE_CONF_CHECK_X(name.coeff[5]); \ 5605 PIPE_CONF_CHECK_X(name.coeff[6]); \ 5606 PIPE_CONF_CHECK_X(name.coeff[7]); \ 5607 PIPE_CONF_CHECK_X(name.coeff[8]); \ 5608 PIPE_CONF_CHECK_X(name.postoff[0]); \ 5609 PIPE_CONF_CHECK_X(name.postoff[1]); \ 5610 PIPE_CONF_CHECK_X(name.postoff[2]); \ 5611 } while (0) 5612 5613 #define PIPE_CONF_QUIRK(quirk) \ 5614 ((current_config->quirks | pipe_config->quirks) & (quirk)) 5615 5616 PIPE_CONF_CHECK_BOOL(hw.enable); 5617 PIPE_CONF_CHECK_BOOL(hw.active); 5618 5619 PIPE_CONF_CHECK_I(cpu_transcoder); 5620 PIPE_CONF_CHECK_I(mst_master_transcoder); 5621 5622 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 5623 PIPE_CONF_CHECK_I(fdi_lanes); 5624 PIPE_CONF_CHECK_M_N(fdi_m_n); 5625 5626 PIPE_CONF_CHECK_I(lane_count); 5627 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 5628 5629 if (HAS_DOUBLE_BUFFERED_M_N(display)) { 5630 if (!fastset || !pipe_config->update_m_n) 5631 PIPE_CONF_CHECK_M_N(dp_m_n); 5632 } else { 5633 PIPE_CONF_CHECK_M_N(dp_m_n); 5634 PIPE_CONF_CHECK_M_N(dp_m2_n2); 5635 } 5636 5637 PIPE_CONF_CHECK_X(output_types); 5638 5639 PIPE_CONF_CHECK_I(framestart_delay); 5640 PIPE_CONF_CHECK_I(msa_timing_delay); 5641 5642 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); 5643 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); 5644 5645 PIPE_CONF_CHECK_I(pixel_multiplier); 5646 5647 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5648 DRM_MODE_FLAG_INTERLACE); 5649 5650 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 5651 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5652 DRM_MODE_FLAG_PHSYNC); 5653 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5654 DRM_MODE_FLAG_NHSYNC); 5655 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5656 DRM_MODE_FLAG_PVSYNC); 5657 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5658 DRM_MODE_FLAG_NVSYNC); 5659 } 5660 5661 PIPE_CONF_CHECK_I(output_format); 5662 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 5663 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 5664 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5665 PIPE_CONF_CHECK_BOOL(limited_color_range); 5666 5667 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 5668 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 5669 PIPE_CONF_CHECK_BOOL(has_infoframe); 5670 PIPE_CONF_CHECK_BOOL(enhanced_framing); 5671 PIPE_CONF_CHECK_BOOL(fec_enable); 5672 5673 if (!fastset) { 5674 PIPE_CONF_CHECK_BOOL(has_audio); 5675 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5676 } 5677 5678 PIPE_CONF_CHECK_X(gmch_pfit.control); 5679 /* pfit ratios are autocomputed by the hw on gen4+ */ 5680 if (DISPLAY_VER(dev_priv) < 4) 5681 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 5682 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 5683 5684 /* 5685 * Changing the EDP transcoder input mux 5686 * (A_ONOFF vs. A_ON) requires a full modeset. 5687 */ 5688 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 5689 5690 if (!fastset) { 5691 PIPE_CONF_CHECK_RECT(pipe_src); 5692 5693 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 5694 PIPE_CONF_CHECK_RECT(pch_pfit.dst); 5695 5696 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 5697 PIPE_CONF_CHECK_I(pixel_rate); 5698 5699 PIPE_CONF_CHECK_X(gamma_mode); 5700 if (IS_CHERRYVIEW(dev_priv)) 5701 PIPE_CONF_CHECK_X(cgm_mode); 5702 else 5703 PIPE_CONF_CHECK_X(csc_mode); 5704 PIPE_CONF_CHECK_BOOL(gamma_enable); 5705 PIPE_CONF_CHECK_BOOL(csc_enable); 5706 PIPE_CONF_CHECK_BOOL(wgc_enable); 5707 5708 PIPE_CONF_CHECK_I(linetime); 5709 PIPE_CONF_CHECK_I(ips_linetime); 5710 5711 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); 5712 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); 5713 5714 PIPE_CONF_CHECK_CSC(csc); 5715 PIPE_CONF_CHECK_CSC(output_csc); 5716 } 5717 5718 PIPE_CONF_CHECK_BOOL(double_wide); 5719 5720 if (dev_priv->display.dpll.mgr) 5721 PIPE_CONF_CHECK_P(shared_dpll); 5722 5723 /* FIXME convert everything over the dpll_mgr */ 5724 if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv)) 5725 PIPE_CONF_CHECK_PLL(dpll_hw_state); 5726 5727 /* FIXME convert MTL+ platforms over to dpll_mgr */ 5728 if (DISPLAY_VER(dev_priv) >= 14) 5729 PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll); 5730 5731 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 5732 PIPE_CONF_CHECK_X(dsi_pll.div); 5733 5734 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 5735 PIPE_CONF_CHECK_I(pipe_bpp); 5736 5737 if (!fastset || !pipe_config->update_m_n) { 5738 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); 5739 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); 5740 } 5741 PIPE_CONF_CHECK_I(port_clock); 5742 5743 PIPE_CONF_CHECK_I(min_voltage_level); 5744 5745 if (current_config->has_psr || pipe_config->has_psr) 5746 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, 5747 ~intel_hdmi_infoframe_enable(DP_SDP_VSC)); 5748 else 5749 PIPE_CONF_CHECK_X(infoframes.enable); 5750 5751 PIPE_CONF_CHECK_X(infoframes.gcp); 5752 PIPE_CONF_CHECK_INFOFRAME(avi); 5753 PIPE_CONF_CHECK_INFOFRAME(spd); 5754 PIPE_CONF_CHECK_INFOFRAME(hdmi); 5755 if (!fastset) 5756 PIPE_CONF_CHECK_INFOFRAME(drm); 5757 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 5758 PIPE_CONF_CHECK_DP_AS_SDP(as_sdp); 5759 5760 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 5761 PIPE_CONF_CHECK_I(master_transcoder); 5762 PIPE_CONF_CHECK_X(joiner_pipes); 5763 5764 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable); 5765 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb); 5766 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422); 5767 PIPE_CONF_CHECK_BOOL(dsc.config.native_422); 5768 PIPE_CONF_CHECK_BOOL(dsc.config.native_420); 5769 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable); 5770 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth); 5771 PIPE_CONF_CHECK_I(dsc.config.bits_per_component); 5772 PIPE_CONF_CHECK_I(dsc.config.pic_width); 5773 PIPE_CONF_CHECK_I(dsc.config.pic_height); 5774 PIPE_CONF_CHECK_I(dsc.config.slice_width); 5775 PIPE_CONF_CHECK_I(dsc.config.slice_height); 5776 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay); 5777 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay); 5778 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval); 5779 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval); 5780 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value); 5781 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset); 5782 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp); 5783 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp); 5784 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset); 5785 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset); 5786 PIPE_CONF_CHECK_I(dsc.config.initial_offset); 5787 PIPE_CONF_CHECK_I(dsc.config.final_offset); 5788 PIPE_CONF_CHECK_I(dsc.config.rc_model_size); 5789 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0); 5790 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1); 5791 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size); 5792 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset); 5793 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset); 5794 5795 PIPE_CONF_CHECK_BOOL(dsc.compression_enable); 5796 PIPE_CONF_CHECK_I(dsc.num_streams); 5797 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16); 5798 5799 PIPE_CONF_CHECK_BOOL(splitter.enable); 5800 PIPE_CONF_CHECK_I(splitter.link_count); 5801 PIPE_CONF_CHECK_I(splitter.pixel_overlap); 5802 5803 if (!fastset) { 5804 PIPE_CONF_CHECK_BOOL(vrr.enable); 5805 PIPE_CONF_CHECK_I(vrr.vmin); 5806 PIPE_CONF_CHECK_I(vrr.vmax); 5807 PIPE_CONF_CHECK_I(vrr.flipline); 5808 PIPE_CONF_CHECK_I(vrr.pipeline_full); 5809 PIPE_CONF_CHECK_I(vrr.guardband); 5810 PIPE_CONF_CHECK_I(vrr.vsync_start); 5811 PIPE_CONF_CHECK_I(vrr.vsync_end); 5812 PIPE_CONF_CHECK_LLI(cmrr.cmrr_m); 5813 PIPE_CONF_CHECK_LLI(cmrr.cmrr_n); 5814 PIPE_CONF_CHECK_BOOL(cmrr.enable); 5815 } 5816 5817 #undef PIPE_CONF_CHECK_X 5818 #undef PIPE_CONF_CHECK_I 5819 #undef PIPE_CONF_CHECK_LLI 5820 #undef PIPE_CONF_CHECK_BOOL 5821 #undef PIPE_CONF_CHECK_P 5822 #undef PIPE_CONF_CHECK_FLAGS 5823 #undef PIPE_CONF_CHECK_COLOR_LUT 5824 #undef PIPE_CONF_CHECK_TIMINGS 5825 #undef PIPE_CONF_CHECK_RECT 5826 #undef PIPE_CONF_QUIRK 5827 5828 return ret; 5829 } 5830 5831 static void 5832 intel_verify_planes(struct intel_atomic_state *state) 5833 { 5834 struct intel_plane *plane; 5835 const struct intel_plane_state *plane_state; 5836 int i; 5837 5838 for_each_new_intel_plane_in_state(state, plane, 5839 plane_state, i) 5840 assert_plane(plane, plane_state->planar_slave || 5841 plane_state->uapi.visible); 5842 } 5843 5844 static int intel_modeset_pipe(struct intel_atomic_state *state, 5845 struct intel_crtc_state *crtc_state, 5846 const char *reason) 5847 { 5848 struct drm_i915_private *i915 = to_i915(state->base.dev); 5849 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5850 int ret; 5851 5852 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n", 5853 crtc->base.base.id, crtc->base.name, reason); 5854 5855 ret = drm_atomic_add_affected_connectors(&state->base, 5856 &crtc->base); 5857 if (ret) 5858 return ret; 5859 5860 ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc); 5861 if (ret) 5862 return ret; 5863 5864 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); 5865 if (ret) 5866 return ret; 5867 5868 ret = intel_atomic_add_affected_planes(state, crtc); 5869 if (ret) 5870 return ret; 5871 5872 crtc_state->uapi.mode_changed = true; 5873 5874 return 0; 5875 } 5876 5877 /** 5878 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes 5879 * @state: intel atomic state 5880 * @reason: the reason for the full modeset 5881 * @mask: mask of pipes to modeset 5882 * 5883 * Add pipes in @mask to @state and force a full modeset on the enabled ones 5884 * due to the description in @reason. 5885 * This function can be called only before new plane states are computed. 5886 * 5887 * Returns 0 in case of success, negative error code otherwise. 5888 */ 5889 int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state, 5890 const char *reason, u8 mask) 5891 { 5892 struct drm_i915_private *i915 = to_i915(state->base.dev); 5893 struct intel_crtc *crtc; 5894 5895 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) { 5896 struct intel_crtc_state *crtc_state; 5897 int ret; 5898 5899 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5900 if (IS_ERR(crtc_state)) 5901 return PTR_ERR(crtc_state); 5902 5903 if (!crtc_state->hw.enable || 5904 intel_crtc_needs_modeset(crtc_state)) 5905 continue; 5906 5907 ret = intel_modeset_pipe(state, crtc_state, reason); 5908 if (ret) 5909 return ret; 5910 } 5911 5912 return 0; 5913 } 5914 5915 static void 5916 intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state) 5917 { 5918 crtc_state->uapi.mode_changed = true; 5919 5920 crtc_state->update_pipe = false; 5921 crtc_state->update_m_n = false; 5922 crtc_state->update_lrr = false; 5923 } 5924 5925 /** 5926 * intel_modeset_all_pipes_late - force a full modeset on all pipes 5927 * @state: intel atomic state 5928 * @reason: the reason for the full modeset 5929 * 5930 * Add all pipes to @state and force a full modeset on the active ones due to 5931 * the description in @reason. 5932 * This function can be called only after new plane states are computed already. 5933 * 5934 * Returns 0 in case of success, negative error code otherwise. 5935 */ 5936 int intel_modeset_all_pipes_late(struct intel_atomic_state *state, 5937 const char *reason) 5938 { 5939 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5940 struct intel_crtc *crtc; 5941 5942 for_each_intel_crtc(&dev_priv->drm, crtc) { 5943 struct intel_crtc_state *crtc_state; 5944 int ret; 5945 5946 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5947 if (IS_ERR(crtc_state)) 5948 return PTR_ERR(crtc_state); 5949 5950 if (!crtc_state->hw.active || 5951 intel_crtc_needs_modeset(crtc_state)) 5952 continue; 5953 5954 ret = intel_modeset_pipe(state, crtc_state, reason); 5955 if (ret) 5956 return ret; 5957 5958 intel_crtc_flag_modeset(crtc_state); 5959 5960 crtc_state->update_planes |= crtc_state->active_planes; 5961 crtc_state->async_flip_planes = 0; 5962 crtc_state->do_async_flip = false; 5963 } 5964 5965 return 0; 5966 } 5967 5968 int intel_modeset_commit_pipes(struct drm_i915_private *i915, 5969 u8 pipe_mask, 5970 struct drm_modeset_acquire_ctx *ctx) 5971 { 5972 struct drm_atomic_state *state; 5973 struct intel_crtc *crtc; 5974 int ret; 5975 5976 state = drm_atomic_state_alloc(&i915->drm); 5977 if (!state) 5978 return -ENOMEM; 5979 5980 state->acquire_ctx = ctx; 5981 to_intel_atomic_state(state)->internal = true; 5982 5983 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { 5984 struct intel_crtc_state *crtc_state = 5985 intel_atomic_get_crtc_state(state, crtc); 5986 5987 if (IS_ERR(crtc_state)) { 5988 ret = PTR_ERR(crtc_state); 5989 goto out; 5990 } 5991 5992 crtc_state->uapi.connectors_changed = true; 5993 } 5994 5995 ret = drm_atomic_commit(state); 5996 out: 5997 drm_atomic_state_put(state); 5998 5999 return ret; 6000 } 6001 6002 /* 6003 * This implements the workaround described in the "notes" section of the mode 6004 * set sequence documentation. When going from no pipes or single pipe to 6005 * multiple pipes, and planes are enabled after the pipe, we need to wait at 6006 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 6007 */ 6008 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 6009 { 6010 struct intel_crtc_state *crtc_state; 6011 struct intel_crtc *crtc; 6012 struct intel_crtc_state *first_crtc_state = NULL; 6013 struct intel_crtc_state *other_crtc_state = NULL; 6014 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 6015 int i; 6016 6017 /* look at all crtc's that are going to be enabled in during modeset */ 6018 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6019 if (!crtc_state->hw.active || 6020 !intel_crtc_needs_modeset(crtc_state)) 6021 continue; 6022 6023 if (first_crtc_state) { 6024 other_crtc_state = crtc_state; 6025 break; 6026 } else { 6027 first_crtc_state = crtc_state; 6028 first_pipe = crtc->pipe; 6029 } 6030 } 6031 6032 /* No workaround needed? */ 6033 if (!first_crtc_state) 6034 return 0; 6035 6036 /* w/a possibly needed, check how many crtc's are already enabled. */ 6037 for_each_intel_crtc(state->base.dev, crtc) { 6038 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6039 if (IS_ERR(crtc_state)) 6040 return PTR_ERR(crtc_state); 6041 6042 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 6043 6044 if (!crtc_state->hw.active || 6045 intel_crtc_needs_modeset(crtc_state)) 6046 continue; 6047 6048 /* 2 or more enabled crtcs means no need for w/a */ 6049 if (enabled_pipe != INVALID_PIPE) 6050 return 0; 6051 6052 enabled_pipe = crtc->pipe; 6053 } 6054 6055 if (enabled_pipe != INVALID_PIPE) 6056 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 6057 else if (other_crtc_state) 6058 other_crtc_state->hsw_workaround_pipe = first_pipe; 6059 6060 return 0; 6061 } 6062 6063 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 6064 u8 active_pipes) 6065 { 6066 const struct intel_crtc_state *crtc_state; 6067 struct intel_crtc *crtc; 6068 int i; 6069 6070 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6071 if (crtc_state->hw.active) 6072 active_pipes |= BIT(crtc->pipe); 6073 else 6074 active_pipes &= ~BIT(crtc->pipe); 6075 } 6076 6077 return active_pipes; 6078 } 6079 6080 static int intel_modeset_checks(struct intel_atomic_state *state) 6081 { 6082 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6083 6084 state->modeset = true; 6085 6086 if (IS_HASWELL(dev_priv)) 6087 return hsw_mode_set_planes_workaround(state); 6088 6089 return 0; 6090 } 6091 6092 static bool lrr_params_changed(const struct drm_display_mode *old_adjusted_mode, 6093 const struct drm_display_mode *new_adjusted_mode) 6094 { 6095 return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start || 6096 old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end || 6097 old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal; 6098 } 6099 6100 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 6101 struct intel_crtc_state *new_crtc_state) 6102 { 6103 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6104 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 6105 6106 /* only allow LRR when the timings stay within the VRR range */ 6107 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range) 6108 new_crtc_state->update_lrr = false; 6109 6110 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) { 6111 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n", 6112 crtc->base.base.id, crtc->base.name); 6113 } else { 6114 if (allow_vblank_delay_fastset(old_crtc_state)) 6115 new_crtc_state->update_lrr = true; 6116 new_crtc_state->uapi.mode_changed = false; 6117 } 6118 6119 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n, 6120 &new_crtc_state->dp_m_n)) 6121 new_crtc_state->update_m_n = false; 6122 6123 if (!lrr_params_changed(&old_crtc_state->hw.adjusted_mode, 6124 &new_crtc_state->hw.adjusted_mode)) 6125 new_crtc_state->update_lrr = false; 6126 6127 if (intel_crtc_needs_modeset(new_crtc_state)) 6128 intel_crtc_flag_modeset(new_crtc_state); 6129 else 6130 new_crtc_state->update_pipe = true; 6131 } 6132 6133 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 6134 struct intel_crtc *crtc, 6135 u8 plane_ids_mask) 6136 { 6137 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6138 struct intel_plane *plane; 6139 6140 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 6141 struct intel_plane_state *plane_state; 6142 6143 if ((plane_ids_mask & BIT(plane->id)) == 0) 6144 continue; 6145 6146 plane_state = intel_atomic_get_plane_state(state, plane); 6147 if (IS_ERR(plane_state)) 6148 return PTR_ERR(plane_state); 6149 } 6150 6151 return 0; 6152 } 6153 6154 int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 6155 struct intel_crtc *crtc) 6156 { 6157 const struct intel_crtc_state *old_crtc_state = 6158 intel_atomic_get_old_crtc_state(state, crtc); 6159 const struct intel_crtc_state *new_crtc_state = 6160 intel_atomic_get_new_crtc_state(state, crtc); 6161 6162 return intel_crtc_add_planes_to_state(state, crtc, 6163 old_crtc_state->enabled_planes | 6164 new_crtc_state->enabled_planes); 6165 } 6166 6167 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 6168 { 6169 /* See {hsw,vlv,ivb}_plane_ratio() */ 6170 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 6171 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 6172 IS_IVYBRIDGE(dev_priv); 6173 } 6174 6175 static int intel_crtc_add_joiner_planes(struct intel_atomic_state *state, 6176 struct intel_crtc *crtc, 6177 struct intel_crtc *other) 6178 { 6179 const struct intel_plane_state __maybe_unused *plane_state; 6180 struct intel_plane *plane; 6181 u8 plane_ids = 0; 6182 int i; 6183 6184 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 6185 if (plane->pipe == crtc->pipe) 6186 plane_ids |= BIT(plane->id); 6187 } 6188 6189 return intel_crtc_add_planes_to_state(state, other, plane_ids); 6190 } 6191 6192 static int intel_joiner_add_affected_planes(struct intel_atomic_state *state) 6193 { 6194 struct drm_i915_private *i915 = to_i915(state->base.dev); 6195 const struct intel_crtc_state *crtc_state; 6196 struct intel_crtc *crtc; 6197 int i; 6198 6199 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6200 struct intel_crtc *other; 6201 6202 for_each_intel_crtc_in_pipe_mask(&i915->drm, other, 6203 crtc_state->joiner_pipes) { 6204 int ret; 6205 6206 if (crtc == other) 6207 continue; 6208 6209 ret = intel_crtc_add_joiner_planes(state, crtc, other); 6210 if (ret) 6211 return ret; 6212 } 6213 } 6214 6215 return 0; 6216 } 6217 6218 static int intel_atomic_check_planes(struct intel_atomic_state *state) 6219 { 6220 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6221 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6222 struct intel_plane_state __maybe_unused *plane_state; 6223 struct intel_plane *plane; 6224 struct intel_crtc *crtc; 6225 int i, ret; 6226 6227 ret = icl_add_linked_planes(state); 6228 if (ret) 6229 return ret; 6230 6231 ret = intel_joiner_add_affected_planes(state); 6232 if (ret) 6233 return ret; 6234 6235 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 6236 ret = intel_plane_atomic_check(state, plane); 6237 if (ret) { 6238 drm_dbg_atomic(&dev_priv->drm, 6239 "[PLANE:%d:%s] atomic driver check failed\n", 6240 plane->base.base.id, plane->base.name); 6241 return ret; 6242 } 6243 } 6244 6245 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6246 new_crtc_state, i) { 6247 u8 old_active_planes, new_active_planes; 6248 6249 ret = icl_check_nv12_planes(state, crtc); 6250 if (ret) 6251 return ret; 6252 6253 /* 6254 * On some platforms the number of active planes affects 6255 * the planes' minimum cdclk calculation. Add such planes 6256 * to the state before we compute the minimum cdclk. 6257 */ 6258 if (!active_planes_affects_min_cdclk(dev_priv)) 6259 continue; 6260 6261 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 6262 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 6263 6264 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 6265 continue; 6266 6267 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 6268 if (ret) 6269 return ret; 6270 } 6271 6272 return 0; 6273 } 6274 6275 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 6276 { 6277 struct intel_crtc_state __maybe_unused *crtc_state; 6278 struct intel_crtc *crtc; 6279 int i; 6280 6281 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6282 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 6283 int ret; 6284 6285 ret = intel_crtc_atomic_check(state, crtc); 6286 if (ret) { 6287 drm_dbg_atomic(&i915->drm, 6288 "[CRTC:%d:%s] atomic driver check failed\n", 6289 crtc->base.base.id, crtc->base.name); 6290 return ret; 6291 } 6292 } 6293 6294 return 0; 6295 } 6296 6297 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 6298 u8 transcoders) 6299 { 6300 const struct intel_crtc_state *new_crtc_state; 6301 struct intel_crtc *crtc; 6302 int i; 6303 6304 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6305 if (new_crtc_state->hw.enable && 6306 transcoders & BIT(new_crtc_state->cpu_transcoder) && 6307 intel_crtc_needs_modeset(new_crtc_state)) 6308 return true; 6309 } 6310 6311 return false; 6312 } 6313 6314 static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 6315 u8 pipes) 6316 { 6317 const struct intel_crtc_state *new_crtc_state; 6318 struct intel_crtc *crtc; 6319 int i; 6320 6321 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6322 if (new_crtc_state->hw.enable && 6323 pipes & BIT(crtc->pipe) && 6324 intel_crtc_needs_modeset(new_crtc_state)) 6325 return true; 6326 } 6327 6328 return false; 6329 } 6330 6331 static int intel_atomic_check_joiner(struct intel_atomic_state *state, 6332 struct intel_crtc *primary_crtc) 6333 { 6334 struct drm_i915_private *i915 = to_i915(state->base.dev); 6335 struct intel_crtc_state *primary_crtc_state = 6336 intel_atomic_get_new_crtc_state(state, primary_crtc); 6337 struct intel_crtc *secondary_crtc; 6338 6339 if (!primary_crtc_state->joiner_pipes) 6340 return 0; 6341 6342 /* sanity check */ 6343 if (drm_WARN_ON(&i915->drm, 6344 primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state))) 6345 return -EINVAL; 6346 6347 if (primary_crtc_state->joiner_pipes & ~joiner_pipes(i915)) { 6348 drm_dbg_kms(&i915->drm, 6349 "[CRTC:%d:%s] Cannot act as joiner primary " 6350 "(need 0x%x as pipes, only 0x%x possible)\n", 6351 primary_crtc->base.base.id, primary_crtc->base.name, 6352 primary_crtc_state->joiner_pipes, joiner_pipes(i915)); 6353 return -EINVAL; 6354 } 6355 6356 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc, 6357 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 6358 struct intel_crtc_state *secondary_crtc_state; 6359 int ret; 6360 6361 secondary_crtc_state = intel_atomic_get_crtc_state(&state->base, secondary_crtc); 6362 if (IS_ERR(secondary_crtc_state)) 6363 return PTR_ERR(secondary_crtc_state); 6364 6365 /* primary being enabled, secondary was already configured? */ 6366 if (secondary_crtc_state->uapi.enable) { 6367 drm_dbg_kms(&i915->drm, 6368 "[CRTC:%d:%s] secondary is enabled as normal CRTC, but " 6369 "[CRTC:%d:%s] claiming this CRTC for joiner.\n", 6370 secondary_crtc->base.base.id, secondary_crtc->base.name, 6371 primary_crtc->base.base.id, primary_crtc->base.name); 6372 return -EINVAL; 6373 } 6374 6375 /* 6376 * The state copy logic assumes the primary crtc gets processed 6377 * before the secondary crtc during the main compute_config loop. 6378 * This works because the crtcs are created in pipe order, 6379 * and the hardware requires primary pipe < secondary pipe as well. 6380 * Should that change we need to rethink the logic. 6381 */ 6382 if (WARN_ON(drm_crtc_index(&primary_crtc->base) > 6383 drm_crtc_index(&secondary_crtc->base))) 6384 return -EINVAL; 6385 6386 drm_dbg_kms(&i915->drm, 6387 "[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n", 6388 secondary_crtc->base.base.id, secondary_crtc->base.name, 6389 primary_crtc->base.base.id, primary_crtc->base.name); 6390 6391 secondary_crtc_state->joiner_pipes = 6392 primary_crtc_state->joiner_pipes; 6393 6394 ret = copy_joiner_crtc_state_modeset(state, secondary_crtc); 6395 if (ret) 6396 return ret; 6397 } 6398 6399 return 0; 6400 } 6401 6402 static void kill_joiner_secondaries(struct intel_atomic_state *state, 6403 struct intel_crtc *primary_crtc) 6404 { 6405 struct drm_i915_private *i915 = to_i915(state->base.dev); 6406 struct intel_crtc_state *primary_crtc_state = 6407 intel_atomic_get_new_crtc_state(state, primary_crtc); 6408 struct intel_crtc *secondary_crtc; 6409 6410 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc, 6411 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 6412 struct intel_crtc_state *secondary_crtc_state = 6413 intel_atomic_get_new_crtc_state(state, secondary_crtc); 6414 6415 secondary_crtc_state->joiner_pipes = 0; 6416 6417 intel_crtc_copy_uapi_to_hw_state_modeset(state, secondary_crtc); 6418 } 6419 6420 primary_crtc_state->joiner_pipes = 0; 6421 } 6422 6423 /** 6424 * DOC: asynchronous flip implementation 6425 * 6426 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 6427 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 6428 * Correspondingly, support is currently added for primary plane only. 6429 * 6430 * Async flip can only change the plane surface address, so anything else 6431 * changing is rejected from the intel_async_flip_check_hw() function. 6432 * Once this check is cleared, flip done interrupt is enabled using 6433 * the intel_crtc_enable_flip_done() function. 6434 * 6435 * As soon as the surface address register is written, flip done interrupt is 6436 * generated and the requested events are sent to the usersapce in the interrupt 6437 * handler itself. The timestamp and sequence sent during the flip done event 6438 * correspond to the last vblank and have no relation to the actual time when 6439 * the flip done event was sent. 6440 */ 6441 static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 6442 struct intel_crtc *crtc) 6443 { 6444 struct drm_i915_private *i915 = to_i915(state->base.dev); 6445 const struct intel_crtc_state *new_crtc_state = 6446 intel_atomic_get_new_crtc_state(state, crtc); 6447 const struct intel_plane_state *old_plane_state; 6448 struct intel_plane_state *new_plane_state; 6449 struct intel_plane *plane; 6450 int i; 6451 6452 if (!new_crtc_state->uapi.async_flip) 6453 return 0; 6454 6455 if (!new_crtc_state->uapi.active) { 6456 drm_dbg_kms(&i915->drm, 6457 "[CRTC:%d:%s] not active\n", 6458 crtc->base.base.id, crtc->base.name); 6459 return -EINVAL; 6460 } 6461 6462 if (intel_crtc_needs_modeset(new_crtc_state)) { 6463 drm_dbg_kms(&i915->drm, 6464 "[CRTC:%d:%s] modeset required\n", 6465 crtc->base.base.id, crtc->base.name); 6466 return -EINVAL; 6467 } 6468 6469 /* 6470 * FIXME: joiner+async flip is busted currently. 6471 * Remove this check once the issues are fixed. 6472 */ 6473 if (new_crtc_state->joiner_pipes) { 6474 drm_dbg_kms(&i915->drm, 6475 "[CRTC:%d:%s] async flip disallowed with joiner\n", 6476 crtc->base.base.id, crtc->base.name); 6477 return -EINVAL; 6478 } 6479 6480 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6481 new_plane_state, i) { 6482 if (plane->pipe != crtc->pipe) 6483 continue; 6484 6485 /* 6486 * TODO: Async flip is only supported through the page flip IOCTL 6487 * as of now. So support currently added for primary plane only. 6488 * Support for other planes on platforms on which supports 6489 * this(vlv/chv and icl+) should be added when async flip is 6490 * enabled in the atomic IOCTL path. 6491 */ 6492 if (!plane->async_flip) { 6493 drm_dbg_kms(&i915->drm, 6494 "[PLANE:%d:%s] async flip not supported\n", 6495 plane->base.base.id, plane->base.name); 6496 return -EINVAL; 6497 } 6498 6499 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 6500 drm_dbg_kms(&i915->drm, 6501 "[PLANE:%d:%s] no old or new framebuffer\n", 6502 plane->base.base.id, plane->base.name); 6503 return -EINVAL; 6504 } 6505 } 6506 6507 return 0; 6508 } 6509 6510 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 6511 { 6512 struct drm_i915_private *i915 = to_i915(state->base.dev); 6513 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6514 const struct intel_plane_state *new_plane_state, *old_plane_state; 6515 struct intel_plane *plane; 6516 int i; 6517 6518 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6519 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6520 6521 if (!new_crtc_state->uapi.async_flip) 6522 return 0; 6523 6524 if (!new_crtc_state->hw.active) { 6525 drm_dbg_kms(&i915->drm, 6526 "[CRTC:%d:%s] not active\n", 6527 crtc->base.base.id, crtc->base.name); 6528 return -EINVAL; 6529 } 6530 6531 if (intel_crtc_needs_modeset(new_crtc_state)) { 6532 drm_dbg_kms(&i915->drm, 6533 "[CRTC:%d:%s] modeset required\n", 6534 crtc->base.base.id, crtc->base.name); 6535 return -EINVAL; 6536 } 6537 6538 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 6539 drm_dbg_kms(&i915->drm, 6540 "[CRTC:%d:%s] Active planes cannot be in async flip\n", 6541 crtc->base.base.id, crtc->base.name); 6542 return -EINVAL; 6543 } 6544 6545 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6546 new_plane_state, i) { 6547 if (plane->pipe != crtc->pipe) 6548 continue; 6549 6550 /* 6551 * Only async flip capable planes should be in the state 6552 * if we're really about to ask the hardware to perform 6553 * an async flip. We should never get this far otherwise. 6554 */ 6555 if (drm_WARN_ON(&i915->drm, 6556 new_crtc_state->do_async_flip && !plane->async_flip)) 6557 return -EINVAL; 6558 6559 /* 6560 * Only check async flip capable planes other planes 6561 * may be involved in the initial commit due to 6562 * the wm0/ddb optimization. 6563 * 6564 * TODO maybe should track which planes actually 6565 * were requested to do the async flip... 6566 */ 6567 if (!plane->async_flip) 6568 continue; 6569 6570 if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->modifier)) { 6571 drm_dbg_kms(&i915->drm, 6572 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n", 6573 plane->base.base.id, plane->base.name, 6574 new_plane_state->hw.fb->modifier); 6575 return -EINVAL; 6576 } 6577 6578 if (intel_format_info_is_yuv_semiplanar(new_plane_state->hw.fb->format, 6579 new_plane_state->hw.fb->modifier)) { 6580 drm_dbg_kms(&i915->drm, 6581 "[PLANE:%d:%s] Planar formats do not support async flips\n", 6582 plane->base.base.id, plane->base.name); 6583 return -EINVAL; 6584 } 6585 6586 /* 6587 * We turn the first async flip request into a sync flip 6588 * so that we can reconfigure the plane (eg. change modifier). 6589 */ 6590 if (!new_crtc_state->do_async_flip) 6591 continue; 6592 6593 if (old_plane_state->view.color_plane[0].mapping_stride != 6594 new_plane_state->view.color_plane[0].mapping_stride) { 6595 drm_dbg_kms(&i915->drm, 6596 "[PLANE:%d:%s] Stride cannot be changed in async flip\n", 6597 plane->base.base.id, plane->base.name); 6598 return -EINVAL; 6599 } 6600 6601 if (old_plane_state->hw.fb->modifier != 6602 new_plane_state->hw.fb->modifier) { 6603 drm_dbg_kms(&i915->drm, 6604 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", 6605 plane->base.base.id, plane->base.name); 6606 return -EINVAL; 6607 } 6608 6609 if (old_plane_state->hw.fb->format != 6610 new_plane_state->hw.fb->format) { 6611 drm_dbg_kms(&i915->drm, 6612 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", 6613 plane->base.base.id, plane->base.name); 6614 return -EINVAL; 6615 } 6616 6617 if (old_plane_state->hw.rotation != 6618 new_plane_state->hw.rotation) { 6619 drm_dbg_kms(&i915->drm, 6620 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", 6621 plane->base.base.id, plane->base.name); 6622 return -EINVAL; 6623 } 6624 6625 if (skl_plane_aux_dist(old_plane_state, 0) != 6626 skl_plane_aux_dist(new_plane_state, 0)) { 6627 drm_dbg_kms(&i915->drm, 6628 "[PLANE:%d:%s] AUX_DIST cannot be changed in async flip\n", 6629 plane->base.base.id, plane->base.name); 6630 return -EINVAL; 6631 } 6632 6633 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 6634 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 6635 drm_dbg_kms(&i915->drm, 6636 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", 6637 plane->base.base.id, plane->base.name); 6638 return -EINVAL; 6639 } 6640 6641 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 6642 drm_dbg_kms(&i915->drm, 6643 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", 6644 plane->base.base.id, plane->base.name); 6645 return -EINVAL; 6646 } 6647 6648 if (old_plane_state->hw.pixel_blend_mode != 6649 new_plane_state->hw.pixel_blend_mode) { 6650 drm_dbg_kms(&i915->drm, 6651 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", 6652 plane->base.base.id, plane->base.name); 6653 return -EINVAL; 6654 } 6655 6656 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 6657 drm_dbg_kms(&i915->drm, 6658 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", 6659 plane->base.base.id, plane->base.name); 6660 return -EINVAL; 6661 } 6662 6663 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 6664 drm_dbg_kms(&i915->drm, 6665 "[PLANE:%d:%s] Color range cannot be changed in async flip\n", 6666 plane->base.base.id, plane->base.name); 6667 return -EINVAL; 6668 } 6669 6670 /* plane decryption is allow to change only in synchronous flips */ 6671 if (old_plane_state->decrypt != new_plane_state->decrypt) { 6672 drm_dbg_kms(&i915->drm, 6673 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", 6674 plane->base.base.id, plane->base.name); 6675 return -EINVAL; 6676 } 6677 } 6678 6679 return 0; 6680 } 6681 6682 static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state) 6683 { 6684 struct drm_i915_private *i915 = to_i915(state->base.dev); 6685 struct intel_crtc_state *crtc_state; 6686 struct intel_crtc *crtc; 6687 u8 affected_pipes = 0; 6688 u8 modeset_pipes = 0; 6689 int i; 6690 6691 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6692 affected_pipes |= crtc_state->joiner_pipes; 6693 if (intel_crtc_needs_modeset(crtc_state)) 6694 modeset_pipes |= crtc_state->joiner_pipes; 6695 } 6696 6697 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { 6698 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6699 if (IS_ERR(crtc_state)) 6700 return PTR_ERR(crtc_state); 6701 } 6702 6703 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { 6704 int ret; 6705 6706 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6707 6708 crtc_state->uapi.mode_changed = true; 6709 6710 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6711 if (ret) 6712 return ret; 6713 6714 ret = intel_atomic_add_affected_planes(state, crtc); 6715 if (ret) 6716 return ret; 6717 } 6718 6719 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6720 /* Kill old joiner link, we may re-establish afterwards */ 6721 if (intel_crtc_needs_modeset(crtc_state) && 6722 intel_crtc_is_joiner_primary(crtc_state)) 6723 kill_joiner_secondaries(state, crtc); 6724 } 6725 6726 return 0; 6727 } 6728 6729 static int intel_atomic_check_config(struct intel_atomic_state *state, 6730 struct intel_link_bw_limits *limits, 6731 enum pipe *failed_pipe) 6732 { 6733 struct drm_i915_private *i915 = to_i915(state->base.dev); 6734 struct intel_crtc_state *new_crtc_state; 6735 struct intel_crtc *crtc; 6736 int ret; 6737 int i; 6738 6739 *failed_pipe = INVALID_PIPE; 6740 6741 ret = intel_joiner_add_affected_crtcs(state); 6742 if (ret) 6743 return ret; 6744 6745 ret = intel_fdi_add_affected_crtcs(state); 6746 if (ret) 6747 return ret; 6748 6749 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6750 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6751 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 6752 copy_joiner_crtc_state_nomodeset(state, crtc); 6753 else 6754 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 6755 continue; 6756 } 6757 6758 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6759 continue; 6760 6761 ret = intel_crtc_prepare_cleared_state(state, crtc); 6762 if (ret) 6763 goto fail; 6764 6765 if (!new_crtc_state->hw.enable) 6766 continue; 6767 6768 ret = intel_modeset_pipe_config(state, crtc, limits); 6769 if (ret) 6770 goto fail; 6771 } 6772 6773 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6774 if (!intel_crtc_needs_modeset(new_crtc_state)) 6775 continue; 6776 6777 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6778 continue; 6779 6780 if (!new_crtc_state->hw.enable) 6781 continue; 6782 6783 ret = intel_modeset_pipe_config_late(state, crtc); 6784 if (ret) 6785 goto fail; 6786 } 6787 6788 fail: 6789 if (ret) 6790 *failed_pipe = crtc->pipe; 6791 6792 return ret; 6793 } 6794 6795 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state) 6796 { 6797 struct intel_link_bw_limits new_limits; 6798 struct intel_link_bw_limits old_limits; 6799 int ret; 6800 6801 intel_link_bw_init_limits(state, &new_limits); 6802 old_limits = new_limits; 6803 6804 while (true) { 6805 enum pipe failed_pipe; 6806 6807 ret = intel_atomic_check_config(state, &new_limits, 6808 &failed_pipe); 6809 if (ret) { 6810 /* 6811 * The bpp limit for a pipe is below the minimum it supports, set the 6812 * limit to the minimum and recalculate the config. 6813 */ 6814 if (ret == -EINVAL && 6815 intel_link_bw_set_bpp_limit_for_pipe(state, 6816 &old_limits, 6817 &new_limits, 6818 failed_pipe)) 6819 continue; 6820 6821 break; 6822 } 6823 6824 old_limits = new_limits; 6825 6826 ret = intel_link_bw_atomic_check(state, &new_limits); 6827 if (ret != -EAGAIN) 6828 break; 6829 } 6830 6831 return ret; 6832 } 6833 /** 6834 * intel_atomic_check - validate state object 6835 * @dev: drm device 6836 * @_state: state to validate 6837 */ 6838 int intel_atomic_check(struct drm_device *dev, 6839 struct drm_atomic_state *_state) 6840 { 6841 struct intel_display *display = to_intel_display(dev); 6842 struct drm_i915_private *dev_priv = to_i915(dev); 6843 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6844 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6845 struct intel_crtc *crtc; 6846 int ret, i; 6847 bool any_ms = false; 6848 6849 if (!intel_display_driver_check_access(display)) 6850 return -ENODEV; 6851 6852 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6853 new_crtc_state, i) { 6854 /* 6855 * crtc's state no longer considered to be inherited 6856 * after the first userspace/client initiated commit. 6857 */ 6858 if (!state->internal) 6859 new_crtc_state->inherited = false; 6860 6861 if (new_crtc_state->inherited != old_crtc_state->inherited) 6862 new_crtc_state->uapi.mode_changed = true; 6863 6864 if (new_crtc_state->uapi.scaling_filter != 6865 old_crtc_state->uapi.scaling_filter) 6866 new_crtc_state->uapi.mode_changed = true; 6867 } 6868 6869 intel_vrr_check_modeset(state); 6870 6871 ret = drm_atomic_helper_check_modeset(dev, &state->base); 6872 if (ret) 6873 goto fail; 6874 6875 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6876 ret = intel_async_flip_check_uapi(state, crtc); 6877 if (ret) 6878 return ret; 6879 } 6880 6881 ret = intel_atomic_check_config_and_link(state); 6882 if (ret) 6883 goto fail; 6884 6885 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6886 if (!intel_crtc_needs_modeset(new_crtc_state)) 6887 continue; 6888 6889 if (intel_crtc_is_joiner_secondary(new_crtc_state)) { 6890 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable); 6891 continue; 6892 } 6893 6894 ret = intel_atomic_check_joiner(state, crtc); 6895 if (ret) 6896 goto fail; 6897 } 6898 6899 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6900 new_crtc_state, i) { 6901 if (!intel_crtc_needs_modeset(new_crtc_state)) 6902 continue; 6903 6904 intel_joiner_adjust_pipe_src(new_crtc_state); 6905 6906 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 6907 } 6908 6909 /** 6910 * Check if fastset is allowed by external dependencies like other 6911 * pipes and transcoders. 6912 * 6913 * Right now it only forces a fullmodeset when the MST master 6914 * transcoder did not changed but the pipe of the master transcoder 6915 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 6916 * in case of port synced crtcs, if one of the synced crtcs 6917 * needs a full modeset, all other synced crtcs should be 6918 * forced a full modeset. 6919 */ 6920 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6921 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6922 continue; 6923 6924 if (intel_dp_mst_crtc_needs_modeset(state, crtc)) 6925 intel_crtc_flag_modeset(new_crtc_state); 6926 6927 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6928 enum transcoder master = new_crtc_state->mst_master_transcoder; 6929 6930 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) 6931 intel_crtc_flag_modeset(new_crtc_state); 6932 } 6933 6934 if (is_trans_port_sync_mode(new_crtc_state)) { 6935 u8 trans = new_crtc_state->sync_mode_slaves_mask; 6936 6937 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6938 trans |= BIT(new_crtc_state->master_transcoder); 6939 6940 if (intel_cpu_transcoders_need_modeset(state, trans)) 6941 intel_crtc_flag_modeset(new_crtc_state); 6942 } 6943 6944 if (new_crtc_state->joiner_pipes) { 6945 if (intel_pipes_need_modeset(state, new_crtc_state->joiner_pipes)) 6946 intel_crtc_flag_modeset(new_crtc_state); 6947 } 6948 } 6949 6950 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6951 new_crtc_state, i) { 6952 if (!intel_crtc_needs_modeset(new_crtc_state)) 6953 continue; 6954 6955 any_ms = true; 6956 6957 intel_release_shared_dplls(state, crtc); 6958 } 6959 6960 if (any_ms && !check_digital_port_conflicts(state)) { 6961 drm_dbg_kms(&dev_priv->drm, 6962 "rejecting conflicting digital port configuration\n"); 6963 ret = -EINVAL; 6964 goto fail; 6965 } 6966 6967 ret = intel_atomic_check_planes(state); 6968 if (ret) 6969 goto fail; 6970 6971 ret = intel_compute_global_watermarks(state); 6972 if (ret) 6973 goto fail; 6974 6975 ret = intel_bw_atomic_check(state); 6976 if (ret) 6977 goto fail; 6978 6979 ret = intel_cdclk_atomic_check(state, &any_ms); 6980 if (ret) 6981 goto fail; 6982 6983 if (intel_any_crtc_needs_modeset(state)) 6984 any_ms = true; 6985 6986 if (any_ms) { 6987 ret = intel_modeset_checks(state); 6988 if (ret) 6989 goto fail; 6990 6991 ret = intel_modeset_calc_cdclk(state); 6992 if (ret) 6993 return ret; 6994 } 6995 6996 ret = intel_pmdemand_atomic_check(state); 6997 if (ret) 6998 goto fail; 6999 7000 ret = intel_atomic_check_crtcs(state); 7001 if (ret) 7002 goto fail; 7003 7004 ret = intel_fbc_atomic_check(state); 7005 if (ret) 7006 goto fail; 7007 7008 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7009 new_crtc_state, i) { 7010 intel_color_assert_luts(new_crtc_state); 7011 7012 ret = intel_async_flip_check_hw(state, crtc); 7013 if (ret) 7014 goto fail; 7015 7016 /* Either full modeset or fastset (or neither), never both */ 7017 drm_WARN_ON(&dev_priv->drm, 7018 intel_crtc_needs_modeset(new_crtc_state) && 7019 intel_crtc_needs_fastset(new_crtc_state)); 7020 7021 if (!intel_crtc_needs_modeset(new_crtc_state) && 7022 !intel_crtc_needs_fastset(new_crtc_state)) 7023 continue; 7024 7025 intel_crtc_state_dump(new_crtc_state, state, 7026 intel_crtc_needs_modeset(new_crtc_state) ? 7027 "modeset" : "fastset"); 7028 } 7029 7030 return 0; 7031 7032 fail: 7033 if (ret == -EDEADLK) 7034 return ret; 7035 7036 /* 7037 * FIXME would probably be nice to know which crtc specifically 7038 * caused the failure, in cases where we can pinpoint it. 7039 */ 7040 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7041 new_crtc_state, i) 7042 intel_crtc_state_dump(new_crtc_state, state, "failed"); 7043 7044 return ret; 7045 } 7046 7047 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 7048 { 7049 int ret; 7050 7051 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 7052 if (ret < 0) 7053 return ret; 7054 7055 return 0; 7056 } 7057 7058 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 7059 struct intel_crtc_state *crtc_state) 7060 { 7061 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7062 7063 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes) 7064 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7065 7066 if (crtc_state->has_pch_encoder) { 7067 enum pipe pch_transcoder = 7068 intel_crtc_pch_transcoder(crtc); 7069 7070 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 7071 } 7072 } 7073 7074 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 7075 const struct intel_crtc_state *new_crtc_state) 7076 { 7077 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 7078 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7079 7080 /* 7081 * Update pipe size and adjust fitter if needed: the reason for this is 7082 * that in compute_mode_changes we check the native mode (not the pfit 7083 * mode) to see if we can flip rather than do a full mode set. In the 7084 * fastboot case, we'll flip, but if we don't update the pipesrc and 7085 * pfit state, we'll end up with a big fb scanned out into the wrong 7086 * sized surface. 7087 */ 7088 intel_set_pipe_src_size(new_crtc_state); 7089 7090 /* on skylake this is done by detaching scalers */ 7091 if (DISPLAY_VER(dev_priv) >= 9) { 7092 if (new_crtc_state->pch_pfit.enabled) 7093 skl_pfit_enable(new_crtc_state); 7094 } else if (HAS_PCH_SPLIT(dev_priv)) { 7095 if (new_crtc_state->pch_pfit.enabled) 7096 ilk_pfit_enable(new_crtc_state); 7097 else if (old_crtc_state->pch_pfit.enabled) 7098 ilk_pfit_disable(old_crtc_state); 7099 } 7100 7101 /* 7102 * The register is supposedly single buffered so perhaps 7103 * not 100% correct to do this here. But SKL+ calculate 7104 * this based on the adjust pixel rate so pfit changes do 7105 * affect it and so it must be updated for fastsets. 7106 * HSW/BDW only really need this here for fastboot, after 7107 * that the value should not change without a full modeset. 7108 */ 7109 if (DISPLAY_VER(dev_priv) >= 9 || 7110 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 7111 hsw_set_linetime_wm(new_crtc_state); 7112 7113 if (new_crtc_state->update_m_n) 7114 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, 7115 &new_crtc_state->dp_m_n); 7116 7117 if (new_crtc_state->update_lrr) 7118 intel_set_transcoder_timings_lrr(new_crtc_state); 7119 } 7120 7121 static void commit_pipe_pre_planes(struct intel_atomic_state *state, 7122 struct intel_crtc *crtc) 7123 { 7124 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7125 const struct intel_crtc_state *old_crtc_state = 7126 intel_atomic_get_old_crtc_state(state, crtc); 7127 const struct intel_crtc_state *new_crtc_state = 7128 intel_atomic_get_new_crtc_state(state, crtc); 7129 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7130 7131 /* 7132 * During modesets pipe configuration was programmed as the 7133 * CRTC was enabled. 7134 */ 7135 if (!modeset && !new_crtc_state->use_dsb) { 7136 if (intel_crtc_needs_color_update(new_crtc_state)) 7137 intel_color_commit_arm(NULL, new_crtc_state); 7138 7139 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 7140 bdw_set_pipe_misc(NULL, new_crtc_state); 7141 7142 if (intel_crtc_needs_fastset(new_crtc_state)) 7143 intel_pipe_fastset(old_crtc_state, new_crtc_state); 7144 } 7145 7146 intel_psr2_program_trans_man_trk_ctl(new_crtc_state); 7147 7148 intel_atomic_update_watermarks(state, crtc); 7149 } 7150 7151 static void commit_pipe_post_planes(struct intel_atomic_state *state, 7152 struct intel_crtc *crtc) 7153 { 7154 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7155 const struct intel_crtc_state *new_crtc_state = 7156 intel_atomic_get_new_crtc_state(state, crtc); 7157 7158 /* 7159 * Disable the scaler(s) after the plane(s) so that we don't 7160 * get a catastrophic underrun even if the two operations 7161 * end up happening in two different frames. 7162 */ 7163 if (DISPLAY_VER(dev_priv) >= 9 && 7164 !intel_crtc_needs_modeset(new_crtc_state)) 7165 skl_detach_scalers(new_crtc_state); 7166 7167 if (intel_crtc_vrr_enabling(state, crtc)) 7168 intel_vrr_enable(new_crtc_state); 7169 } 7170 7171 static void intel_enable_crtc(struct intel_atomic_state *state, 7172 struct intel_crtc *crtc) 7173 { 7174 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7175 const struct intel_crtc_state *new_crtc_state = 7176 intel_atomic_get_new_crtc_state(state, crtc); 7177 struct intel_crtc *pipe_crtc; 7178 7179 if (!intel_crtc_needs_modeset(new_crtc_state)) 7180 return; 7181 7182 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, 7183 intel_crtc_joined_pipe_mask(new_crtc_state)) { 7184 const struct intel_crtc_state *pipe_crtc_state = 7185 intel_atomic_get_new_crtc_state(state, pipe_crtc); 7186 7187 /* VRR will be enable later, if required */ 7188 intel_crtc_update_active_timings(pipe_crtc_state, false); 7189 } 7190 7191 dev_priv->display.funcs.display->crtc_enable(state, crtc); 7192 7193 /* vblanks work again, re-enable pipe CRC. */ 7194 intel_crtc_enable_pipe_crc(crtc); 7195 } 7196 7197 static void intel_pre_update_crtc(struct intel_atomic_state *state, 7198 struct intel_crtc *crtc) 7199 { 7200 struct drm_i915_private *i915 = to_i915(state->base.dev); 7201 const struct intel_crtc_state *old_crtc_state = 7202 intel_atomic_get_old_crtc_state(state, crtc); 7203 struct intel_crtc_state *new_crtc_state = 7204 intel_atomic_get_new_crtc_state(state, crtc); 7205 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7206 7207 if (old_crtc_state->inherited || 7208 intel_crtc_needs_modeset(new_crtc_state)) { 7209 if (HAS_DPT(i915)) 7210 intel_dpt_configure(crtc); 7211 } 7212 7213 if (!modeset) { 7214 if (new_crtc_state->preload_luts && 7215 intel_crtc_needs_color_update(new_crtc_state)) 7216 intel_color_load_luts(new_crtc_state); 7217 7218 intel_pre_plane_update(state, crtc); 7219 7220 if (intel_crtc_needs_fastset(new_crtc_state)) 7221 intel_encoders_update_pipe(state, crtc); 7222 7223 if (DISPLAY_VER(i915) >= 11 && 7224 intel_crtc_needs_fastset(new_crtc_state)) 7225 icl_set_pipe_chicken(new_crtc_state); 7226 7227 if (vrr_params_changed(old_crtc_state, new_crtc_state) || 7228 cmrr_params_changed(old_crtc_state, new_crtc_state)) 7229 intel_vrr_set_transcoder_timings(new_crtc_state); 7230 } 7231 7232 intel_fbc_update(state, crtc); 7233 7234 drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); 7235 7236 if (!modeset && 7237 intel_crtc_needs_color_update(new_crtc_state) && 7238 !new_crtc_state->use_dsb) 7239 intel_color_commit_noarm(NULL, new_crtc_state); 7240 7241 if (!new_crtc_state->use_dsb) 7242 intel_crtc_planes_update_noarm(NULL, state, crtc); 7243 } 7244 7245 static void intel_update_crtc(struct intel_atomic_state *state, 7246 struct intel_crtc *crtc) 7247 { 7248 const struct intel_crtc_state *old_crtc_state = 7249 intel_atomic_get_old_crtc_state(state, crtc); 7250 struct intel_crtc_state *new_crtc_state = 7251 intel_atomic_get_new_crtc_state(state, crtc); 7252 7253 if (new_crtc_state->use_dsb) { 7254 intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event); 7255 7256 intel_dsb_commit(new_crtc_state->dsb_commit, false); 7257 } else { 7258 /* Perform vblank evasion around commit operation */ 7259 intel_pipe_update_start(state, crtc); 7260 7261 if (new_crtc_state->dsb_commit) 7262 intel_dsb_commit(new_crtc_state->dsb_commit, false); 7263 7264 commit_pipe_pre_planes(state, crtc); 7265 7266 intel_crtc_planes_update_arm(NULL, state, crtc); 7267 7268 commit_pipe_post_planes(state, crtc); 7269 7270 intel_pipe_update_end(state, crtc); 7271 } 7272 7273 /* 7274 * VRR/Seamless M/N update may need to update frame timings. 7275 * 7276 * FIXME Should be synchronized with the start of vblank somehow... 7277 */ 7278 if (intel_crtc_vrr_enabling(state, crtc) || 7279 new_crtc_state->update_m_n || new_crtc_state->update_lrr) 7280 intel_crtc_update_active_timings(new_crtc_state, 7281 new_crtc_state->vrr.enable); 7282 7283 /* 7284 * We usually enable FIFO underrun interrupts as part of the 7285 * CRTC enable sequence during modesets. But when we inherit a 7286 * valid pipe configuration from the BIOS we need to take care 7287 * of enabling them on the CRTC's first fastset. 7288 */ 7289 if (intel_crtc_needs_fastset(new_crtc_state) && 7290 old_crtc_state->inherited) 7291 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 7292 } 7293 7294 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 7295 struct intel_crtc *crtc) 7296 { 7297 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7298 const struct intel_crtc_state *old_crtc_state = 7299 intel_atomic_get_old_crtc_state(state, crtc); 7300 struct intel_crtc *pipe_crtc; 7301 7302 /* 7303 * We need to disable pipe CRC before disabling the pipe, 7304 * or we race against vblank off. 7305 */ 7306 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, 7307 intel_crtc_joined_pipe_mask(old_crtc_state)) 7308 intel_crtc_disable_pipe_crc(pipe_crtc); 7309 7310 dev_priv->display.funcs.display->crtc_disable(state, crtc); 7311 7312 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, 7313 intel_crtc_joined_pipe_mask(old_crtc_state)) { 7314 const struct intel_crtc_state *new_pipe_crtc_state = 7315 intel_atomic_get_new_crtc_state(state, pipe_crtc); 7316 7317 pipe_crtc->active = false; 7318 intel_fbc_disable(pipe_crtc); 7319 7320 if (!new_pipe_crtc_state->hw.active) 7321 intel_initial_watermarks(state, pipe_crtc); 7322 } 7323 } 7324 7325 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 7326 { 7327 struct drm_i915_private *i915 = to_i915(state->base.dev); 7328 const struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7329 struct intel_crtc *crtc; 7330 u8 disable_pipes = 0; 7331 int i; 7332 7333 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7334 new_crtc_state, i) { 7335 if (!intel_crtc_needs_modeset(new_crtc_state)) 7336 continue; 7337 7338 /* 7339 * Needs to be done even for pipes 7340 * that weren't enabled previously. 7341 */ 7342 intel_pre_plane_update(state, crtc); 7343 7344 if (!old_crtc_state->hw.active) 7345 continue; 7346 7347 disable_pipes |= BIT(crtc->pipe); 7348 } 7349 7350 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7351 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7352 continue; 7353 7354 intel_crtc_disable_planes(state, crtc); 7355 7356 drm_vblank_work_flush_all(&crtc->base); 7357 } 7358 7359 /* Only disable port sync and MST slaves */ 7360 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7361 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7362 continue; 7363 7364 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 7365 continue; 7366 7367 /* In case of Transcoder port Sync master slave CRTCs can be 7368 * assigned in any order and we need to make sure that 7369 * slave CRTCs are disabled first and then master CRTC since 7370 * Slave vblanks are masked till Master Vblanks. 7371 */ 7372 if (!is_trans_port_sync_slave(old_crtc_state) && 7373 !intel_dp_mst_is_slave_trans(old_crtc_state)) 7374 continue; 7375 7376 intel_old_crtc_state_disables(state, crtc); 7377 7378 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 7379 } 7380 7381 /* Disable everything else left on */ 7382 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 7383 if ((disable_pipes & BIT(crtc->pipe)) == 0) 7384 continue; 7385 7386 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 7387 continue; 7388 7389 intel_old_crtc_state_disables(state, crtc); 7390 7391 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 7392 } 7393 7394 drm_WARN_ON(&i915->drm, disable_pipes); 7395 } 7396 7397 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 7398 { 7399 struct intel_crtc_state *new_crtc_state; 7400 struct intel_crtc *crtc; 7401 int i; 7402 7403 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7404 if (!new_crtc_state->hw.active) 7405 continue; 7406 7407 intel_enable_crtc(state, crtc); 7408 intel_pre_update_crtc(state, crtc); 7409 } 7410 7411 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7412 if (!new_crtc_state->hw.active) 7413 continue; 7414 7415 intel_update_crtc(state, crtc); 7416 } 7417 } 7418 7419 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 7420 { 7421 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7422 struct intel_crtc *crtc; 7423 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7424 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 7425 u8 update_pipes = 0, modeset_pipes = 0; 7426 int i; 7427 7428 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7429 enum pipe pipe = crtc->pipe; 7430 7431 if (!new_crtc_state->hw.active) 7432 continue; 7433 7434 /* ignore allocations for crtc's that have been turned off. */ 7435 if (!intel_crtc_needs_modeset(new_crtc_state)) { 7436 entries[pipe] = old_crtc_state->wm.skl.ddb; 7437 update_pipes |= BIT(pipe); 7438 } else { 7439 modeset_pipes |= BIT(pipe); 7440 } 7441 } 7442 7443 /* 7444 * Whenever the number of active pipes changes, we need to make sure we 7445 * update the pipes in the right order so that their ddb allocations 7446 * never overlap with each other between CRTC updates. Otherwise we'll 7447 * cause pipe underruns and other bad stuff. 7448 * 7449 * So first lets enable all pipes that do not need a fullmodeset as 7450 * those don't have any external dependency. 7451 */ 7452 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7453 enum pipe pipe = crtc->pipe; 7454 7455 if ((update_pipes & BIT(pipe)) == 0) 7456 continue; 7457 7458 intel_pre_update_crtc(state, crtc); 7459 } 7460 7461 intel_dbuf_mbus_pre_ddb_update(state); 7462 7463 while (update_pipes) { 7464 /* 7465 * Commit in reverse order to make joiner primary 7466 * send the uapi events after secondaries are done. 7467 */ 7468 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, 7469 new_crtc_state, i) { 7470 enum pipe pipe = crtc->pipe; 7471 7472 if ((update_pipes & BIT(pipe)) == 0) 7473 continue; 7474 7475 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7476 entries, I915_MAX_PIPES, pipe)) 7477 continue; 7478 7479 entries[pipe] = new_crtc_state->wm.skl.ddb; 7480 update_pipes &= ~BIT(pipe); 7481 7482 intel_update_crtc(state, crtc); 7483 7484 /* 7485 * If this is an already active pipe, it's DDB changed, 7486 * and this isn't the last pipe that needs updating 7487 * then we need to wait for a vblank to pass for the 7488 * new ddb allocation to take effect. 7489 */ 7490 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 7491 &old_crtc_state->wm.skl.ddb) && 7492 (update_pipes | modeset_pipes)) 7493 intel_crtc_wait_for_next_vblank(crtc); 7494 } 7495 } 7496 7497 intel_dbuf_mbus_post_ddb_update(state); 7498 7499 update_pipes = modeset_pipes; 7500 7501 /* 7502 * Enable all pipes that needs a modeset and do not depends on other 7503 * pipes 7504 */ 7505 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7506 enum pipe pipe = crtc->pipe; 7507 7508 if ((modeset_pipes & BIT(pipe)) == 0) 7509 continue; 7510 7511 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7512 continue; 7513 7514 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 7515 is_trans_port_sync_master(new_crtc_state)) 7516 continue; 7517 7518 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7519 7520 intel_enable_crtc(state, crtc); 7521 } 7522 7523 /* 7524 * Then we enable all remaining pipes that depend on other 7525 * pipes: MST slaves and port sync masters 7526 */ 7527 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7528 enum pipe pipe = crtc->pipe; 7529 7530 if ((modeset_pipes & BIT(pipe)) == 0) 7531 continue; 7532 7533 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7534 continue; 7535 7536 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7537 7538 intel_enable_crtc(state, crtc); 7539 } 7540 7541 /* 7542 * Finally we do the plane updates/etc. for all pipes that got enabled. 7543 */ 7544 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7545 enum pipe pipe = crtc->pipe; 7546 7547 if ((update_pipes & BIT(pipe)) == 0) 7548 continue; 7549 7550 intel_pre_update_crtc(state, crtc); 7551 } 7552 7553 /* 7554 * Commit in reverse order to make joiner primary 7555 * send the uapi events after secondaries are done. 7556 */ 7557 for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) { 7558 enum pipe pipe = crtc->pipe; 7559 7560 if ((update_pipes & BIT(pipe)) == 0) 7561 continue; 7562 7563 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7564 entries, I915_MAX_PIPES, pipe)); 7565 7566 entries[pipe] = new_crtc_state->wm.skl.ddb; 7567 update_pipes &= ~BIT(pipe); 7568 7569 intel_update_crtc(state, crtc); 7570 } 7571 7572 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 7573 drm_WARN_ON(&dev_priv->drm, update_pipes); 7574 } 7575 7576 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 7577 { 7578 struct drm_i915_private *i915 = to_i915(intel_state->base.dev); 7579 struct drm_plane *plane; 7580 struct drm_plane_state *new_plane_state; 7581 int ret, i; 7582 7583 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { 7584 if (new_plane_state->fence) { 7585 ret = dma_fence_wait_timeout(new_plane_state->fence, false, 7586 i915_fence_timeout(i915)); 7587 if (ret <= 0) 7588 break; 7589 7590 dma_fence_put(new_plane_state->fence); 7591 new_plane_state->fence = NULL; 7592 } 7593 } 7594 } 7595 7596 static void intel_atomic_dsb_wait_commit(struct intel_crtc_state *crtc_state) 7597 { 7598 if (crtc_state->dsb_commit) 7599 intel_dsb_wait(crtc_state->dsb_commit); 7600 7601 intel_color_wait_commit(crtc_state); 7602 } 7603 7604 static void intel_atomic_dsb_cleanup(struct intel_crtc_state *crtc_state) 7605 { 7606 if (crtc_state->dsb_commit) { 7607 intel_dsb_cleanup(crtc_state->dsb_commit); 7608 crtc_state->dsb_commit = NULL; 7609 } 7610 7611 intel_color_cleanup_commit(crtc_state); 7612 } 7613 7614 static void intel_atomic_cleanup_work(struct work_struct *work) 7615 { 7616 struct intel_atomic_state *state = 7617 container_of(work, struct intel_atomic_state, cleanup_work); 7618 struct drm_i915_private *i915 = to_i915(state->base.dev); 7619 struct intel_crtc_state *old_crtc_state; 7620 struct intel_crtc *crtc; 7621 int i; 7622 7623 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) 7624 intel_atomic_dsb_cleanup(old_crtc_state); 7625 7626 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 7627 drm_atomic_helper_commit_cleanup_done(&state->base); 7628 drm_atomic_state_put(&state->base); 7629 } 7630 7631 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 7632 { 7633 struct drm_i915_private *i915 = to_i915(state->base.dev); 7634 struct intel_plane *plane; 7635 struct intel_plane_state *plane_state; 7636 int i; 7637 7638 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7639 struct drm_framebuffer *fb = plane_state->hw.fb; 7640 int cc_plane; 7641 int ret; 7642 7643 if (!fb) 7644 continue; 7645 7646 cc_plane = intel_fb_rc_ccs_cc_plane(fb); 7647 if (cc_plane < 0) 7648 continue; 7649 7650 /* 7651 * The layout of the fast clear color value expected by HW 7652 * (the DRM ABI requiring this value to be located in fb at 7653 * offset 0 of cc plane, plane #2 previous generations or 7654 * plane #1 for flat ccs): 7655 * - 4 x 4 bytes per-channel value 7656 * (in surface type specific float/int format provided by the fb user) 7657 * - 8 bytes native color value used by the display 7658 * (converted/written by GPU during a fast clear operation using the 7659 * above per-channel values) 7660 * 7661 * The commit's FB prepare hook already ensured that FB obj is pinned and the 7662 * caller made sure that the object is synced wrt. the related color clear value 7663 * GPU write on it. 7664 */ 7665 ret = intel_bo_read_from_page(intel_fb_bo(fb), 7666 fb->offsets[cc_plane] + 16, 7667 &plane_state->ccval, 7668 sizeof(plane_state->ccval)); 7669 /* The above could only fail if the FB obj has an unexpected backing store type. */ 7670 drm_WARN_ON(&i915->drm, ret); 7671 } 7672 } 7673 7674 static void intel_atomic_dsb_prepare(struct intel_atomic_state *state, 7675 struct intel_crtc *crtc) 7676 { 7677 intel_color_prepare_commit(state, crtc); 7678 } 7679 7680 static void intel_atomic_dsb_finish(struct intel_atomic_state *state, 7681 struct intel_crtc *crtc) 7682 { 7683 const struct intel_crtc_state *old_crtc_state = 7684 intel_atomic_get_old_crtc_state(state, crtc); 7685 struct intel_crtc_state *new_crtc_state = 7686 intel_atomic_get_new_crtc_state(state, crtc); 7687 7688 if (!new_crtc_state->hw.active) 7689 return; 7690 7691 if (state->base.legacy_cursor_update) 7692 return; 7693 7694 /* FIXME deal with everything */ 7695 new_crtc_state->use_dsb = 7696 new_crtc_state->update_planes && 7697 !new_crtc_state->do_async_flip && 7698 !new_crtc_state->has_psr && 7699 !new_crtc_state->scaler_state.scaler_users && 7700 !old_crtc_state->scaler_state.scaler_users && 7701 !intel_crtc_needs_modeset(new_crtc_state) && 7702 !intel_crtc_needs_fastset(new_crtc_state); 7703 7704 if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank) 7705 return; 7706 7707 /* 7708 * Rough estimate: 7709 * ~64 registers per each plane * 8 planes = 512 7710 * Double that for pipe stuff and other overhead. 7711 */ 7712 new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 7713 new_crtc_state->use_dsb ? 1024 : 16); 7714 if (!new_crtc_state->dsb_commit) { 7715 new_crtc_state->use_dsb = false; 7716 intel_color_cleanup_commit(new_crtc_state); 7717 return; 7718 } 7719 7720 if (new_crtc_state->use_dsb) { 7721 if (intel_crtc_needs_color_update(new_crtc_state)) 7722 intel_color_commit_noarm(new_crtc_state->dsb_commit, 7723 new_crtc_state); 7724 intel_crtc_planes_update_noarm(new_crtc_state->dsb_commit, 7725 state, crtc); 7726 7727 intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit); 7728 7729 if (intel_crtc_needs_color_update(new_crtc_state)) 7730 intel_color_commit_arm(new_crtc_state->dsb_commit, 7731 new_crtc_state); 7732 bdw_set_pipe_misc(new_crtc_state->dsb_commit, 7733 new_crtc_state); 7734 intel_crtc_planes_update_arm(new_crtc_state->dsb_commit, 7735 state, crtc); 7736 7737 intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state); 7738 7739 if (!new_crtc_state->dsb_color_vblank) { 7740 intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1); 7741 intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit); 7742 intel_dsb_interrupt(new_crtc_state->dsb_commit); 7743 } 7744 } 7745 7746 if (new_crtc_state->dsb_color_vblank) 7747 intel_dsb_chain(state, new_crtc_state->dsb_commit, 7748 new_crtc_state->dsb_color_vblank, true); 7749 7750 intel_dsb_finish(new_crtc_state->dsb_commit); 7751 } 7752 7753 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 7754 { 7755 struct drm_device *dev = state->base.dev; 7756 struct drm_i915_private *dev_priv = to_i915(dev); 7757 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7758 struct intel_crtc *crtc; 7759 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; 7760 intel_wakeref_t wakeref = NULL; 7761 int i; 7762 7763 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7764 intel_atomic_dsb_prepare(state, crtc); 7765 7766 intel_atomic_commit_fence_wait(state); 7767 7768 intel_td_flush(dev_priv); 7769 7770 intel_atomic_prepare_plane_clear_colors(state); 7771 7772 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7773 intel_atomic_dsb_finish(state, crtc); 7774 7775 drm_atomic_helper_wait_for_dependencies(&state->base); 7776 drm_dp_mst_atomic_wait_for_dependencies(&state->base); 7777 intel_atomic_global_state_wait_for_dependencies(state); 7778 7779 /* 7780 * During full modesets we write a lot of registers, wait 7781 * for PLLs, etc. Doing that while DC states are enabled 7782 * is not a good idea. 7783 * 7784 * During fastsets and other updates we also need to 7785 * disable DC states due to the following scenario: 7786 * 1. DC5 exit and PSR exit happen 7787 * 2. Some or all _noarm() registers are written 7788 * 3. Due to some long delay PSR is re-entered 7789 * 4. DC5 entry -> DMC saves the already written new 7790 * _noarm() registers and the old not yet written 7791 * _arm() registers 7792 * 5. DC5 exit -> DMC restores a mixture of old and 7793 * new register values and arms the update 7794 * 6. PSR exit -> hardware latches a mixture of old and 7795 * new register values -> corrupted frame, or worse 7796 * 7. New _arm() registers are finally written 7797 * 8. Hardware finally latches a complete set of new 7798 * register values, and subsequent frames will be OK again 7799 * 7800 * Also note that due to the pipe CSC hardware issues on 7801 * SKL/GLK DC states must remain off until the pipe CSC 7802 * state readout has happened. Otherwise we risk corrupting 7803 * the CSC latched register values with the readout (see 7804 * skl_read_csc() and skl_color_commit_noarm()). 7805 */ 7806 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); 7807 7808 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7809 new_crtc_state, i) { 7810 if (intel_crtc_needs_modeset(new_crtc_state) || 7811 intel_crtc_needs_fastset(new_crtc_state)) 7812 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); 7813 } 7814 7815 intel_commit_modeset_disables(state); 7816 7817 intel_dp_tunnel_atomic_alloc_bw(state); 7818 7819 /* FIXME: Eventually get rid of our crtc->config pointer */ 7820 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7821 crtc->config = new_crtc_state; 7822 7823 /* 7824 * In XE_LPD+ Pmdemand combines many parameters such as voltage index, 7825 * plls, cdclk frequency, QGV point selection parameter etc. Voltage 7826 * index, cdclk/ddiclk frequencies are supposed to be configured before 7827 * the cdclk config is set. 7828 */ 7829 intel_pmdemand_pre_plane_update(state); 7830 7831 if (state->modeset) { 7832 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 7833 7834 intel_set_cdclk_pre_plane_update(state); 7835 7836 intel_modeset_verify_disabled(state); 7837 } 7838 7839 intel_sagv_pre_plane_update(state); 7840 7841 /* Complete the events for pipes that have now been disabled */ 7842 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7843 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7844 7845 /* Complete events for now disable pipes here. */ 7846 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 7847 spin_lock_irq(&dev->event_lock); 7848 drm_crtc_send_vblank_event(&crtc->base, 7849 new_crtc_state->uapi.event); 7850 spin_unlock_irq(&dev->event_lock); 7851 7852 new_crtc_state->uapi.event = NULL; 7853 } 7854 } 7855 7856 intel_encoders_update_prepare(state); 7857 7858 intel_dbuf_pre_plane_update(state); 7859 7860 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7861 if (new_crtc_state->do_async_flip) 7862 intel_crtc_enable_flip_done(state, crtc); 7863 } 7864 7865 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7866 dev_priv->display.funcs.display->commit_modeset_enables(state); 7867 7868 intel_program_dpkgc_latency(state); 7869 7870 if (state->modeset) 7871 intel_set_cdclk_post_plane_update(state); 7872 7873 intel_wait_for_vblank_workers(state); 7874 7875 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 7876 * already, but still need the state for the delayed optimization. To 7877 * fix this: 7878 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 7879 * - schedule that vblank worker _before_ calling hw_done 7880 * - at the start of commit_tail, cancel it _synchrously 7881 * - switch over to the vblank wait helper in the core after that since 7882 * we don't need out special handling any more. 7883 */ 7884 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 7885 7886 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7887 if (new_crtc_state->do_async_flip) 7888 intel_crtc_disable_flip_done(state, crtc); 7889 7890 intel_atomic_dsb_wait_commit(new_crtc_state); 7891 } 7892 7893 /* 7894 * Now that the vblank has passed, we can go ahead and program the 7895 * optimal watermarks on platforms that need two-step watermark 7896 * programming. 7897 * 7898 * TODO: Move this (and other cleanup) to an async worker eventually. 7899 */ 7900 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7901 new_crtc_state, i) { 7902 /* 7903 * Gen2 reports pipe underruns whenever all planes are disabled. 7904 * So re-enable underrun reporting after some planes get enabled. 7905 * 7906 * We do this before .optimize_watermarks() so that we have a 7907 * chance of catching underruns with the intermediate watermarks 7908 * vs. the new plane configuration. 7909 */ 7910 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 7911 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7912 7913 intel_optimize_watermarks(state, crtc); 7914 } 7915 7916 intel_dbuf_post_plane_update(state); 7917 7918 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7919 intel_post_plane_update(state, crtc); 7920 7921 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); 7922 7923 intel_modeset_verify_crtc(state, crtc); 7924 7925 intel_post_plane_update_after_readout(state, crtc); 7926 7927 /* 7928 * DSB cleanup is done in cleanup_work aligning with framebuffer 7929 * cleanup. So copy and reset the dsb structure to sync with 7930 * commit_done and later do dsb cleanup in cleanup_work. 7931 * 7932 * FIXME get rid of this funny new->old swapping 7933 */ 7934 old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank); 7935 old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit); 7936 } 7937 7938 /* Underruns don't always raise interrupts, so check manually */ 7939 intel_check_cpu_fifo_underruns(dev_priv); 7940 intel_check_pch_fifo_underruns(dev_priv); 7941 7942 if (state->modeset) 7943 intel_verify_planes(state); 7944 7945 intel_sagv_post_plane_update(state); 7946 intel_pmdemand_post_plane_update(state); 7947 7948 drm_atomic_helper_commit_hw_done(&state->base); 7949 intel_atomic_global_state_commit_done(state); 7950 7951 if (state->modeset) { 7952 /* As one of the primary mmio accessors, KMS has a high 7953 * likelihood of triggering bugs in unclaimed access. After we 7954 * finish modesetting, see if an error has been flagged, and if 7955 * so enable debugging for the next modeset - and hope we catch 7956 * the culprit. 7957 */ 7958 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 7959 } 7960 /* 7961 * Delay re-enabling DC states by 17 ms to avoid the off->on->off 7962 * toggling overhead at and above 60 FPS. 7963 */ 7964 intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17); 7965 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7966 7967 /* 7968 * Defer the cleanup of the old state to a separate worker to not 7969 * impede the current task (userspace for blocking modesets) that 7970 * are executed inline. For out-of-line asynchronous modesets/flips, 7971 * deferring to a new worker seems overkill, but we would place a 7972 * schedule point (cond_resched()) here anyway to keep latencies 7973 * down. 7974 */ 7975 INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work); 7976 queue_work(dev_priv->display.wq.cleanup, &state->cleanup_work); 7977 } 7978 7979 static void intel_atomic_commit_work(struct work_struct *work) 7980 { 7981 struct intel_atomic_state *state = 7982 container_of(work, struct intel_atomic_state, base.commit_work); 7983 7984 intel_atomic_commit_tail(state); 7985 } 7986 7987 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7988 { 7989 struct intel_plane_state *old_plane_state, *new_plane_state; 7990 struct intel_plane *plane; 7991 int i; 7992 7993 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7994 new_plane_state, i) 7995 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 7996 to_intel_frontbuffer(new_plane_state->hw.fb), 7997 plane->frontbuffer_bit); 7998 } 7999 8000 static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock) 8001 { 8002 int ret; 8003 8004 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 8005 if (ret) 8006 return ret; 8007 8008 ret = intel_atomic_global_state_setup_commit(state); 8009 if (ret) 8010 return ret; 8011 8012 return 0; 8013 } 8014 8015 static int intel_atomic_swap_state(struct intel_atomic_state *state) 8016 { 8017 int ret; 8018 8019 ret = drm_atomic_helper_swap_state(&state->base, true); 8020 if (ret) 8021 return ret; 8022 8023 intel_atomic_swap_global_state(state); 8024 8025 intel_shared_dpll_swap_state(state); 8026 8027 intel_atomic_track_fbs(state); 8028 8029 return 0; 8030 } 8031 8032 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, 8033 bool nonblock) 8034 { 8035 struct intel_atomic_state *state = to_intel_atomic_state(_state); 8036 struct drm_i915_private *dev_priv = to_i915(dev); 8037 int ret = 0; 8038 8039 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 8040 8041 /* 8042 * The intel_legacy_cursor_update() fast path takes care 8043 * of avoiding the vblank waits for simple cursor 8044 * movement and flips. For cursor on/off and size changes, 8045 * we want to perform the vblank waits so that watermark 8046 * updates happen during the correct frames. Gen9+ have 8047 * double buffered watermarks and so shouldn't need this. 8048 * 8049 * Unset state->legacy_cursor_update before the call to 8050 * drm_atomic_helper_setup_commit() because otherwise 8051 * drm_atomic_helper_wait_for_flip_done() is a noop and 8052 * we get FIFO underruns because we didn't wait 8053 * for vblank. 8054 * 8055 * FIXME doing watermarks and fb cleanup from a vblank worker 8056 * (assuming we had any) would solve these problems. 8057 */ 8058 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) { 8059 struct intel_crtc_state *new_crtc_state; 8060 struct intel_crtc *crtc; 8061 int i; 8062 8063 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 8064 if (new_crtc_state->wm.need_postvbl_update || 8065 new_crtc_state->update_wm_post) 8066 state->base.legacy_cursor_update = false; 8067 } 8068 8069 ret = intel_atomic_prepare_commit(state); 8070 if (ret) { 8071 drm_dbg_atomic(&dev_priv->drm, 8072 "Preparing state failed with %i\n", ret); 8073 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 8074 return ret; 8075 } 8076 8077 ret = intel_atomic_setup_commit(state, nonblock); 8078 if (!ret) 8079 ret = intel_atomic_swap_state(state); 8080 8081 if (ret) { 8082 drm_atomic_helper_unprepare_planes(dev, &state->base); 8083 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 8084 return ret; 8085 } 8086 8087 drm_atomic_state_get(&state->base); 8088 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 8089 8090 if (nonblock && state->modeset) { 8091 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); 8092 } else if (nonblock) { 8093 queue_work(dev_priv->display.wq.flip, &state->base.commit_work); 8094 } else { 8095 if (state->modeset) 8096 flush_workqueue(dev_priv->display.wq.modeset); 8097 intel_atomic_commit_tail(state); 8098 } 8099 8100 return 0; 8101 } 8102 8103 /** 8104 * intel_plane_destroy - destroy a plane 8105 * @plane: plane to destroy 8106 * 8107 * Common destruction function for all types of planes (primary, cursor, 8108 * sprite). 8109 */ 8110 void intel_plane_destroy(struct drm_plane *plane) 8111 { 8112 drm_plane_cleanup(plane); 8113 kfree(to_intel_plane(plane)); 8114 } 8115 8116 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 8117 { 8118 struct drm_device *dev = encoder->base.dev; 8119 struct intel_encoder *source_encoder; 8120 u32 possible_clones = 0; 8121 8122 for_each_intel_encoder(dev, source_encoder) { 8123 if (encoders_cloneable(encoder, source_encoder)) 8124 possible_clones |= drm_encoder_mask(&source_encoder->base); 8125 } 8126 8127 return possible_clones; 8128 } 8129 8130 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 8131 { 8132 struct drm_device *dev = encoder->base.dev; 8133 struct intel_crtc *crtc; 8134 u32 possible_crtcs = 0; 8135 8136 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) 8137 possible_crtcs |= drm_crtc_mask(&crtc->base); 8138 8139 return possible_crtcs; 8140 } 8141 8142 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 8143 { 8144 if (!IS_MOBILE(dev_priv)) 8145 return false; 8146 8147 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 8148 return false; 8149 8150 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 8151 return false; 8152 8153 return true; 8154 } 8155 8156 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 8157 { 8158 if (DISPLAY_VER(dev_priv) >= 9) 8159 return false; 8160 8161 if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)) 8162 return false; 8163 8164 if (HAS_PCH_LPT_H(dev_priv) && 8165 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 8166 return false; 8167 8168 /* DDI E can't be used if DDI A requires 4 lanes */ 8169 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 8170 return false; 8171 8172 if (!dev_priv->display.vbt.int_crt_support) 8173 return false; 8174 8175 return true; 8176 } 8177 8178 bool assert_port_valid(struct drm_i915_private *i915, enum port port) 8179 { 8180 return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)), 8181 "Platform does not support port %c\n", port_name(port)); 8182 } 8183 8184 void intel_setup_outputs(struct drm_i915_private *dev_priv) 8185 { 8186 struct intel_display *display = &dev_priv->display; 8187 struct intel_encoder *encoder; 8188 bool dpd_is_edp = false; 8189 8190 intel_pps_unlock_regs_wa(display); 8191 8192 if (!HAS_DISPLAY(dev_priv)) 8193 return; 8194 8195 if (HAS_DDI(dev_priv)) { 8196 if (intel_ddi_crt_present(dev_priv)) 8197 intel_crt_init(display); 8198 8199 intel_bios_for_each_encoder(display, intel_ddi_init); 8200 8201 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 8202 vlv_dsi_init(dev_priv); 8203 } else if (HAS_PCH_SPLIT(dev_priv)) { 8204 int found; 8205 8206 /* 8207 * intel_edp_init_connector() depends on this completing first, 8208 * to prevent the registration of both eDP and LVDS and the 8209 * incorrect sharing of the PPS. 8210 */ 8211 intel_lvds_init(dev_priv); 8212 intel_crt_init(display); 8213 8214 dpd_is_edp = intel_dp_is_port_edp(display, PORT_D); 8215 8216 if (ilk_has_edp_a(dev_priv)) 8217 g4x_dp_init(dev_priv, DP_A, PORT_A); 8218 8219 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 8220 /* PCH SDVOB multiplex with HDMIB */ 8221 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 8222 if (!found) 8223 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 8224 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 8225 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B); 8226 } 8227 8228 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 8229 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 8230 8231 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 8232 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 8233 8234 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 8235 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C); 8236 8237 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 8238 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D); 8239 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 8240 bool has_edp, has_port; 8241 8242 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) 8243 intel_crt_init(display); 8244 8245 /* 8246 * The DP_DETECTED bit is the latched state of the DDC 8247 * SDA pin at boot. However since eDP doesn't require DDC 8248 * (no way to plug in a DP->HDMI dongle) the DDC pins for 8249 * eDP ports may have been muxed to an alternate function. 8250 * Thus we can't rely on the DP_DETECTED bit alone to detect 8251 * eDP ports. Consult the VBT as well as DP_DETECTED to 8252 * detect eDP ports. 8253 * 8254 * Sadly the straps seem to be missing sometimes even for HDMI 8255 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 8256 * and VBT for the presence of the port. Additionally we can't 8257 * trust the port type the VBT declares as we've seen at least 8258 * HDMI ports that the VBT claim are DP or eDP. 8259 */ 8260 has_edp = intel_dp_is_port_edp(display, PORT_B); 8261 has_port = intel_bios_is_port_present(display, PORT_B); 8262 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 8263 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B); 8264 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 8265 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 8266 8267 has_edp = intel_dp_is_port_edp(display, PORT_C); 8268 has_port = intel_bios_is_port_present(display, PORT_C); 8269 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 8270 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C); 8271 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 8272 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 8273 8274 if (IS_CHERRYVIEW(dev_priv)) { 8275 /* 8276 * eDP not supported on port D, 8277 * so no need to worry about it 8278 */ 8279 has_port = intel_bios_is_port_present(display, PORT_D); 8280 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 8281 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D); 8282 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 8283 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 8284 } 8285 8286 vlv_dsi_init(dev_priv); 8287 } else if (IS_PINEVIEW(dev_priv)) { 8288 intel_lvds_init(dev_priv); 8289 intel_crt_init(display); 8290 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { 8291 bool found = false; 8292 8293 if (IS_MOBILE(dev_priv)) 8294 intel_lvds_init(dev_priv); 8295 8296 intel_crt_init(display); 8297 8298 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 8299 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 8300 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 8301 if (!found && IS_G4X(dev_priv)) { 8302 drm_dbg_kms(&dev_priv->drm, 8303 "probing HDMI on SDVOB\n"); 8304 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 8305 } 8306 8307 if (!found && IS_G4X(dev_priv)) 8308 g4x_dp_init(dev_priv, DP_B, PORT_B); 8309 } 8310 8311 /* Before G4X SDVOC doesn't have its own detect register */ 8312 8313 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 8314 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 8315 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 8316 } 8317 8318 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 8319 8320 if (IS_G4X(dev_priv)) { 8321 drm_dbg_kms(&dev_priv->drm, 8322 "probing HDMI on SDVOC\n"); 8323 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 8324 } 8325 if (IS_G4X(dev_priv)) 8326 g4x_dp_init(dev_priv, DP_C, PORT_C); 8327 } 8328 8329 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 8330 g4x_dp_init(dev_priv, DP_D, PORT_D); 8331 8332 if (SUPPORTS_TV(dev_priv)) 8333 intel_tv_init(display); 8334 } else if (DISPLAY_VER(dev_priv) == 2) { 8335 if (IS_I85X(dev_priv)) 8336 intel_lvds_init(dev_priv); 8337 8338 intel_crt_init(display); 8339 intel_dvo_init(dev_priv); 8340 } 8341 8342 for_each_intel_encoder(&dev_priv->drm, encoder) { 8343 encoder->base.possible_crtcs = 8344 intel_encoder_possible_crtcs(encoder); 8345 encoder->base.possible_clones = 8346 intel_encoder_possible_clones(encoder); 8347 } 8348 8349 intel_init_pch_refclk(dev_priv); 8350 8351 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 8352 } 8353 8354 static int max_dotclock(struct drm_i915_private *i915) 8355 { 8356 struct intel_display *display = &i915->display; 8357 int max_dotclock = display->cdclk.max_dotclk_freq; 8358 8359 if (HAS_ULTRAJOINER(display)) 8360 max_dotclock *= 4; 8361 else if (HAS_UNCOMPRESSED_JOINER(display) || HAS_BIGJOINER(display)) 8362 max_dotclock *= 2; 8363 8364 return max_dotclock; 8365 } 8366 8367 enum drm_mode_status intel_mode_valid(struct drm_device *dev, 8368 const struct drm_display_mode *mode) 8369 { 8370 struct drm_i915_private *dev_priv = to_i915(dev); 8371 int hdisplay_max, htotal_max; 8372 int vdisplay_max, vtotal_max; 8373 8374 /* 8375 * Can't reject DBLSCAN here because Xorg ddxen can add piles 8376 * of DBLSCAN modes to the output's mode list when they detect 8377 * the scaling mode property on the connector. And they don't 8378 * ask the kernel to validate those modes in any way until 8379 * modeset time at which point the client gets a protocol error. 8380 * So in order to not upset those clients we silently ignore the 8381 * DBLSCAN flag on such connectors. For other connectors we will 8382 * reject modes with the DBLSCAN flag in encoder->compute_config(). 8383 * And we always reject DBLSCAN modes in connector->mode_valid() 8384 * as we never want such modes on the connector's mode list. 8385 */ 8386 8387 if (mode->vscan > 1) 8388 return MODE_NO_VSCAN; 8389 8390 if (mode->flags & DRM_MODE_FLAG_HSKEW) 8391 return MODE_H_ILLEGAL; 8392 8393 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 8394 DRM_MODE_FLAG_NCSYNC | 8395 DRM_MODE_FLAG_PCSYNC)) 8396 return MODE_HSYNC; 8397 8398 if (mode->flags & (DRM_MODE_FLAG_BCAST | 8399 DRM_MODE_FLAG_PIXMUX | 8400 DRM_MODE_FLAG_CLKDIV2)) 8401 return MODE_BAD; 8402 8403 /* 8404 * Reject clearly excessive dotclocks early to 8405 * avoid having to worry about huge integers later. 8406 */ 8407 if (mode->clock > max_dotclock(dev_priv)) 8408 return MODE_CLOCK_HIGH; 8409 8410 /* Transcoder timing limits */ 8411 if (DISPLAY_VER(dev_priv) >= 11) { 8412 hdisplay_max = 16384; 8413 vdisplay_max = 8192; 8414 htotal_max = 16384; 8415 vtotal_max = 8192; 8416 } else if (DISPLAY_VER(dev_priv) >= 9 || 8417 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 8418 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 8419 vdisplay_max = 4096; 8420 htotal_max = 8192; 8421 vtotal_max = 8192; 8422 } else if (DISPLAY_VER(dev_priv) >= 3) { 8423 hdisplay_max = 4096; 8424 vdisplay_max = 4096; 8425 htotal_max = 8192; 8426 vtotal_max = 8192; 8427 } else { 8428 hdisplay_max = 2048; 8429 vdisplay_max = 2048; 8430 htotal_max = 4096; 8431 vtotal_max = 4096; 8432 } 8433 8434 if (mode->hdisplay > hdisplay_max || 8435 mode->hsync_start > htotal_max || 8436 mode->hsync_end > htotal_max || 8437 mode->htotal > htotal_max) 8438 return MODE_H_ILLEGAL; 8439 8440 if (mode->vdisplay > vdisplay_max || 8441 mode->vsync_start > vtotal_max || 8442 mode->vsync_end > vtotal_max || 8443 mode->vtotal > vtotal_max) 8444 return MODE_V_ILLEGAL; 8445 8446 return MODE_OK; 8447 } 8448 8449 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv, 8450 const struct drm_display_mode *mode) 8451 { 8452 /* 8453 * Additional transcoder timing limits, 8454 * excluding BXT/GLK DSI transcoders. 8455 */ 8456 if (DISPLAY_VER(dev_priv) >= 5) { 8457 if (mode->hdisplay < 64 || 8458 mode->htotal - mode->hdisplay < 32) 8459 return MODE_H_ILLEGAL; 8460 8461 if (mode->vtotal - mode->vdisplay < 5) 8462 return MODE_V_ILLEGAL; 8463 } else { 8464 if (mode->htotal - mode->hdisplay < 32) 8465 return MODE_H_ILLEGAL; 8466 8467 if (mode->vtotal - mode->vdisplay < 3) 8468 return MODE_V_ILLEGAL; 8469 } 8470 8471 /* 8472 * Cantiga+ cannot handle modes with a hsync front porch of 0. 8473 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 8474 */ 8475 if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) && 8476 mode->hsync_start == mode->hdisplay) 8477 return MODE_H_ILLEGAL; 8478 8479 return MODE_OK; 8480 } 8481 8482 enum drm_mode_status 8483 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 8484 const struct drm_display_mode *mode, 8485 int num_joined_pipes) 8486 { 8487 int plane_width_max, plane_height_max; 8488 8489 /* 8490 * intel_mode_valid() should be 8491 * sufficient on older platforms. 8492 */ 8493 if (DISPLAY_VER(dev_priv) < 9) 8494 return MODE_OK; 8495 8496 /* 8497 * Most people will probably want a fullscreen 8498 * plane so let's not advertize modes that are 8499 * too big for that. 8500 */ 8501 if (DISPLAY_VER(dev_priv) >= 30) { 8502 plane_width_max = 6144 * num_joined_pipes; 8503 plane_height_max = 4800; 8504 } else if (DISPLAY_VER(dev_priv) >= 11) { 8505 plane_width_max = 5120 * num_joined_pipes; 8506 plane_height_max = 4320; 8507 } else { 8508 plane_width_max = 5120; 8509 plane_height_max = 4096; 8510 } 8511 8512 if (mode->hdisplay > plane_width_max) 8513 return MODE_H_ILLEGAL; 8514 8515 if (mode->vdisplay > plane_height_max) 8516 return MODE_V_ILLEGAL; 8517 8518 return MODE_OK; 8519 } 8520 8521 static const struct intel_display_funcs skl_display_funcs = { 8522 .get_pipe_config = hsw_get_pipe_config, 8523 .crtc_enable = hsw_crtc_enable, 8524 .crtc_disable = hsw_crtc_disable, 8525 .commit_modeset_enables = skl_commit_modeset_enables, 8526 .get_initial_plane_config = skl_get_initial_plane_config, 8527 .fixup_initial_plane_config = skl_fixup_initial_plane_config, 8528 }; 8529 8530 static const struct intel_display_funcs ddi_display_funcs = { 8531 .get_pipe_config = hsw_get_pipe_config, 8532 .crtc_enable = hsw_crtc_enable, 8533 .crtc_disable = hsw_crtc_disable, 8534 .commit_modeset_enables = intel_commit_modeset_enables, 8535 .get_initial_plane_config = i9xx_get_initial_plane_config, 8536 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8537 }; 8538 8539 static const struct intel_display_funcs pch_split_display_funcs = { 8540 .get_pipe_config = ilk_get_pipe_config, 8541 .crtc_enable = ilk_crtc_enable, 8542 .crtc_disable = ilk_crtc_disable, 8543 .commit_modeset_enables = intel_commit_modeset_enables, 8544 .get_initial_plane_config = i9xx_get_initial_plane_config, 8545 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8546 }; 8547 8548 static const struct intel_display_funcs vlv_display_funcs = { 8549 .get_pipe_config = i9xx_get_pipe_config, 8550 .crtc_enable = valleyview_crtc_enable, 8551 .crtc_disable = i9xx_crtc_disable, 8552 .commit_modeset_enables = intel_commit_modeset_enables, 8553 .get_initial_plane_config = i9xx_get_initial_plane_config, 8554 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8555 }; 8556 8557 static const struct intel_display_funcs i9xx_display_funcs = { 8558 .get_pipe_config = i9xx_get_pipe_config, 8559 .crtc_enable = i9xx_crtc_enable, 8560 .crtc_disable = i9xx_crtc_disable, 8561 .commit_modeset_enables = intel_commit_modeset_enables, 8562 .get_initial_plane_config = i9xx_get_initial_plane_config, 8563 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8564 }; 8565 8566 /** 8567 * intel_init_display_hooks - initialize the display modesetting hooks 8568 * @dev_priv: device private 8569 */ 8570 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 8571 { 8572 if (DISPLAY_VER(dev_priv) >= 9) { 8573 dev_priv->display.funcs.display = &skl_display_funcs; 8574 } else if (HAS_DDI(dev_priv)) { 8575 dev_priv->display.funcs.display = &ddi_display_funcs; 8576 } else if (HAS_PCH_SPLIT(dev_priv)) { 8577 dev_priv->display.funcs.display = &pch_split_display_funcs; 8578 } else if (IS_CHERRYVIEW(dev_priv) || 8579 IS_VALLEYVIEW(dev_priv)) { 8580 dev_priv->display.funcs.display = &vlv_display_funcs; 8581 } else { 8582 dev_priv->display.funcs.display = &i9xx_display_funcs; 8583 } 8584 } 8585 8586 int intel_initial_commit(struct drm_device *dev) 8587 { 8588 struct drm_atomic_state *state = NULL; 8589 struct drm_modeset_acquire_ctx ctx; 8590 struct intel_crtc *crtc; 8591 int ret = 0; 8592 8593 state = drm_atomic_state_alloc(dev); 8594 if (!state) 8595 return -ENOMEM; 8596 8597 drm_modeset_acquire_init(&ctx, 0); 8598 8599 state->acquire_ctx = &ctx; 8600 to_intel_atomic_state(state)->internal = true; 8601 8602 retry: 8603 for_each_intel_crtc(dev, crtc) { 8604 struct intel_crtc_state *crtc_state = 8605 intel_atomic_get_crtc_state(state, crtc); 8606 8607 if (IS_ERR(crtc_state)) { 8608 ret = PTR_ERR(crtc_state); 8609 goto out; 8610 } 8611 8612 if (crtc_state->hw.active) { 8613 struct intel_encoder *encoder; 8614 8615 ret = drm_atomic_add_affected_planes(state, &crtc->base); 8616 if (ret) 8617 goto out; 8618 8619 /* 8620 * FIXME hack to force a LUT update to avoid the 8621 * plane update forcing the pipe gamma on without 8622 * having a proper LUT loaded. Remove once we 8623 * have readout for pipe gamma enable. 8624 */ 8625 crtc_state->uapi.color_mgmt_changed = true; 8626 8627 for_each_intel_encoder_mask(dev, encoder, 8628 crtc_state->uapi.encoder_mask) { 8629 if (encoder->initial_fastset_check && 8630 !encoder->initial_fastset_check(encoder, crtc_state)) { 8631 ret = drm_atomic_add_affected_connectors(state, 8632 &crtc->base); 8633 if (ret) 8634 goto out; 8635 } 8636 } 8637 } 8638 } 8639 8640 ret = drm_atomic_commit(state); 8641 8642 out: 8643 if (ret == -EDEADLK) { 8644 drm_atomic_state_clear(state); 8645 drm_modeset_backoff(&ctx); 8646 goto retry; 8647 } 8648 8649 drm_atomic_state_put(state); 8650 8651 drm_modeset_drop_locks(&ctx); 8652 drm_modeset_acquire_fini(&ctx); 8653 8654 return ret; 8655 } 8656 8657 void i830_enable_pipe(struct intel_display *display, enum pipe pipe) 8658 { 8659 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8660 enum transcoder cpu_transcoder = (enum transcoder)pipe; 8661 /* 640x480@60Hz, ~25175 kHz */ 8662 struct dpll clock = { 8663 .m1 = 18, 8664 .m2 = 7, 8665 .p1 = 13, 8666 .p2 = 4, 8667 .n = 2, 8668 }; 8669 u32 dpll, fp; 8670 int i; 8671 8672 drm_WARN_ON(display->drm, 8673 i9xx_calc_dpll_params(48000, &clock) != 25154); 8674 8675 drm_dbg_kms(display->drm, 8676 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 8677 pipe_name(pipe), clock.vco, clock.dot); 8678 8679 fp = i9xx_dpll_compute_fp(&clock); 8680 dpll = DPLL_DVO_2X_MODE | 8681 DPLL_VGA_MODE_DIS | 8682 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 8683 PLL_P2_DIVIDE_BY_4 | 8684 PLL_REF_INPUT_DREFCLK | 8685 DPLL_VCO_ENABLE; 8686 8687 intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder), 8688 HACTIVE(640 - 1) | HTOTAL(800 - 1)); 8689 intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder), 8690 HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); 8691 intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder), 8692 HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); 8693 intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), 8694 VACTIVE(480 - 1) | VTOTAL(525 - 1)); 8695 intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), 8696 VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); 8697 intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder), 8698 VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); 8699 intel_de_write(display, PIPESRC(display, pipe), 8700 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); 8701 8702 intel_de_write(display, FP0(pipe), fp); 8703 intel_de_write(display, FP1(pipe), fp); 8704 8705 /* 8706 * Apparently we need to have VGA mode enabled prior to changing 8707 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 8708 * dividers, even though the register value does change. 8709 */ 8710 intel_de_write(display, DPLL(display, pipe), 8711 dpll & ~DPLL_VGA_MODE_DIS); 8712 intel_de_write(display, DPLL(display, pipe), dpll); 8713 8714 /* Wait for the clocks to stabilize. */ 8715 intel_de_posting_read(display, DPLL(display, pipe)); 8716 udelay(150); 8717 8718 /* The pixel multiplier can only be updated once the 8719 * DPLL is enabled and the clocks are stable. 8720 * 8721 * So write it again. 8722 */ 8723 intel_de_write(display, DPLL(display, pipe), dpll); 8724 8725 /* We do this three times for luck */ 8726 for (i = 0; i < 3 ; i++) { 8727 intel_de_write(display, DPLL(display, pipe), dpll); 8728 intel_de_posting_read(display, DPLL(display, pipe)); 8729 udelay(150); /* wait for warmup */ 8730 } 8731 8732 intel_de_write(display, TRANSCONF(display, pipe), TRANSCONF_ENABLE); 8733 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8734 8735 intel_wait_for_pipe_scanline_moving(crtc); 8736 } 8737 8738 void i830_disable_pipe(struct intel_display *display, enum pipe pipe) 8739 { 8740 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8741 8742 drm_dbg_kms(display->drm, "disabling pipe %c due to force quirk\n", 8743 pipe_name(pipe)); 8744 8745 drm_WARN_ON(display->drm, 8746 intel_de_read(display, DSPCNTR(display, PLANE_A)) & DISP_ENABLE); 8747 drm_WARN_ON(display->drm, 8748 intel_de_read(display, DSPCNTR(display, PLANE_B)) & DISP_ENABLE); 8749 drm_WARN_ON(display->drm, 8750 intel_de_read(display, DSPCNTR(display, PLANE_C)) & DISP_ENABLE); 8751 drm_WARN_ON(display->drm, 8752 intel_de_read(display, CURCNTR(display, PIPE_A)) & MCURSOR_MODE_MASK); 8753 drm_WARN_ON(display->drm, 8754 intel_de_read(display, CURCNTR(display, PIPE_B)) & MCURSOR_MODE_MASK); 8755 8756 intel_de_write(display, TRANSCONF(display, pipe), 0); 8757 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8758 8759 intel_wait_for_pipe_scanline_stopped(crtc); 8760 8761 intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS); 8762 intel_de_posting_read(display, DPLL(display, pipe)); 8763 } 8764 8765 void intel_hpd_poll_fini(struct drm_i915_private *i915) 8766 { 8767 struct intel_connector *connector; 8768 struct drm_connector_list_iter conn_iter; 8769 8770 /* Kill all the work that may have been queued by hpd. */ 8771 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 8772 for_each_intel_connector_iter(connector, &conn_iter) { 8773 if (connector->modeset_retry_work.func && 8774 cancel_work_sync(&connector->modeset_retry_work)) 8775 drm_connector_put(&connector->base); 8776 if (connector->hdcp.shim) { 8777 cancel_delayed_work_sync(&connector->hdcp.check_work); 8778 cancel_work_sync(&connector->hdcp.prop_work); 8779 } 8780 } 8781 drm_connector_list_iter_end(&conn_iter); 8782 } 8783 8784 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915) 8785 { 8786 return IS_DISPLAY_VER(i915, 6, 11) && i915_vtd_active(i915); 8787 } 8788