1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dma-resv.h> 28 #include <linux/i2c.h> 29 #include <linux/input.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 #include <linux/string_helpers.h> 34 35 #include <drm/display/drm_dp_helper.h> 36 #include <drm/display/drm_dp_tunnel.h> 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_atomic_uapi.h> 40 #include <drm/drm_damage_helper.h> 41 #include <drm/drm_edid.h> 42 #include <drm/drm_fixed.h> 43 #include <drm/drm_fourcc.h> 44 #include <drm/drm_print.h> 45 #include <drm/drm_probe_helper.h> 46 #include <drm/drm_rect.h> 47 #include <drm/drm_vblank.h> 48 49 #include "g4x_dp.h" 50 #include "g4x_hdmi.h" 51 #include "hsw_ips.h" 52 #include "i915_config.h" 53 #include "i915_drv.h" 54 #include "i915_reg.h" 55 #include "i9xx_plane.h" 56 #include "i9xx_plane_regs.h" 57 #include "i9xx_wm.h" 58 #include "intel_alpm.h" 59 #include "intel_atomic.h" 60 #include "intel_audio.h" 61 #include "intel_bo.h" 62 #include "intel_bw.h" 63 #include "intel_casf.h" 64 #include "intel_cdclk.h" 65 #include "intel_clock_gating.h" 66 #include "intel_color.h" 67 #include "intel_crt.h" 68 #include "intel_crtc.h" 69 #include "intel_crtc_state_dump.h" 70 #include "intel_cursor.h" 71 #include "intel_cursor_regs.h" 72 #include "intel_cx0_phy.h" 73 #include "intel_ddi.h" 74 #include "intel_de.h" 75 #include "intel_display_driver.h" 76 #include "intel_display_power.h" 77 #include "intel_display_regs.h" 78 #include "intel_display_rpm.h" 79 #include "intel_display_types.h" 80 #include "intel_display_utils.h" 81 #include "intel_display_wa.h" 82 #include "intel_dmc.h" 83 #include "intel_dp.h" 84 #include "intel_dp_link_training.h" 85 #include "intel_dp_mst.h" 86 #include "intel_dp_tunnel.h" 87 #include "intel_dpll.h" 88 #include "intel_dpll_mgr.h" 89 #include "intel_dpt.h" 90 #include "intel_dpt_common.h" 91 #include "intel_drrs.h" 92 #include "intel_dsb.h" 93 #include "intel_dsi.h" 94 #include "intel_dvo.h" 95 #include "intel_fb.h" 96 #include "intel_fbc.h" 97 #include "intel_fdi.h" 98 #include "intel_fifo_underrun.h" 99 #include "intel_flipq.h" 100 #include "intel_frontbuffer.h" 101 #include "intel_hdmi.h" 102 #include "intel_hotplug.h" 103 #include "intel_link_bw.h" 104 #include "intel_lt_phy.h" 105 #include "intel_lvds.h" 106 #include "intel_lvds_regs.h" 107 #include "intel_modeset_setup.h" 108 #include "intel_modeset_verify.h" 109 #include "intel_overlay.h" 110 #include "intel_panel.h" 111 #include "intel_pch_display.h" 112 #include "intel_pch_refclk.h" 113 #include "intel_pfit.h" 114 #include "intel_pipe_crc.h" 115 #include "intel_plane.h" 116 #include "intel_plane_initial.h" 117 #include "intel_pmdemand.h" 118 #include "intel_pps.h" 119 #include "intel_psr.h" 120 #include "intel_psr_regs.h" 121 #include "intel_sdvo.h" 122 #include "intel_snps_phy.h" 123 #include "intel_tc.h" 124 #include "intel_tdf.h" 125 #include "intel_tv.h" 126 #include "intel_vblank.h" 127 #include "intel_vdsc.h" 128 #include "intel_vdsc_regs.h" 129 #include "intel_vga.h" 130 #include "intel_vrr.h" 131 #include "intel_wm.h" 132 #include "skl_scaler.h" 133 #include "skl_universal_plane.h" 134 #include "skl_watermark.h" 135 #include "vlv_dsi.h" 136 #include "vlv_dsi_pll.h" 137 #include "vlv_dsi_regs.h" 138 139 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 140 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 141 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 142 static void bdw_set_pipe_misc(struct intel_dsb *dsb, 143 const struct intel_crtc_state *crtc_state); 144 145 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 146 { 147 return (crtc_state->active_planes & 148 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 149 } 150 151 /* WA Display #0827: Gen9:all */ 152 static void 153 skl_wa_827(struct intel_display *display, enum pipe pipe, bool enable) 154 { 155 intel_de_rmw(display, CLKGATE_DIS_PSL(pipe), 156 DUPS1_GATING_DIS | DUPS2_GATING_DIS, 157 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); 158 } 159 160 /* Wa_2006604312:icl,ehl */ 161 static void 162 icl_wa_scalerclkgating(struct intel_display *display, enum pipe pipe, 163 bool enable) 164 { 165 intel_de_rmw(display, CLKGATE_DIS_PSL(pipe), 166 DPFR_GATING_DIS, 167 enable ? DPFR_GATING_DIS : 0); 168 } 169 170 /* Wa_1604331009:icl,jsl,ehl */ 171 static void 172 icl_wa_cursorclkgating(struct intel_display *display, enum pipe pipe, 173 bool enable) 174 { 175 intel_de_rmw(display, CLKGATE_DIS_PSL(pipe), 176 CURSOR_GATING_DIS, 177 enable ? CURSOR_GATING_DIS : 0); 178 } 179 180 static bool 181 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 182 { 183 return crtc_state->master_transcoder != INVALID_TRANSCODER; 184 } 185 186 bool 187 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 188 { 189 return crtc_state->sync_mode_slaves_mask != 0; 190 } 191 192 bool 193 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 194 { 195 return is_trans_port_sync_master(crtc_state) || 196 is_trans_port_sync_slave(crtc_state); 197 } 198 199 static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state) 200 { 201 return ffs(crtc_state->joiner_pipes) - 1; 202 } 203 204 /* 205 * The following helper functions, despite being named for bigjoiner, 206 * are applicable to both bigjoiner and uncompressed joiner configurations. 207 */ 208 static bool is_bigjoiner(const struct intel_crtc_state *crtc_state) 209 { 210 return hweight8(crtc_state->joiner_pipes) >= 2; 211 } 212 213 static u8 bigjoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 214 { 215 if (!is_bigjoiner(crtc_state)) 216 return 0; 217 218 return crtc_state->joiner_pipes & (0b01010101 << joiner_primary_pipe(crtc_state)); 219 } 220 221 static unsigned int bigjoiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 222 { 223 if (!is_bigjoiner(crtc_state)) 224 return 0; 225 226 return crtc_state->joiner_pipes & (0b10101010 << joiner_primary_pipe(crtc_state)); 227 } 228 229 bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state) 230 { 231 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 232 233 if (!is_bigjoiner(crtc_state)) 234 return false; 235 236 return BIT(crtc->pipe) & bigjoiner_primary_pipes(crtc_state); 237 } 238 239 bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state) 240 { 241 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 242 243 if (!is_bigjoiner(crtc_state)) 244 return false; 245 246 return BIT(crtc->pipe) & bigjoiner_secondary_pipes(crtc_state); 247 } 248 249 u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state) 250 { 251 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 252 253 if (!is_bigjoiner(crtc_state)) 254 return BIT(crtc->pipe); 255 256 return bigjoiner_primary_pipes(crtc_state); 257 } 258 259 u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state) 260 { 261 return bigjoiner_secondary_pipes(crtc_state); 262 } 263 264 bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state) 265 { 266 return intel_crtc_num_joined_pipes(crtc_state) >= 4; 267 } 268 269 static u8 ultrajoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 270 { 271 if (!intel_crtc_is_ultrajoiner(crtc_state)) 272 return 0; 273 274 return crtc_state->joiner_pipes & (0b00010001 << joiner_primary_pipe(crtc_state)); 275 } 276 277 bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state) 278 { 279 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 280 281 return intel_crtc_is_ultrajoiner(crtc_state) && 282 BIT(crtc->pipe) & ultrajoiner_primary_pipes(crtc_state); 283 } 284 285 /* 286 * The ultrajoiner enable bit doesn't seem to follow primary/secondary logic or 287 * any other logic, so lets just add helper function to 288 * at least hide this hassle.. 289 */ 290 static u8 ultrajoiner_enable_pipes(const struct intel_crtc_state *crtc_state) 291 { 292 if (!intel_crtc_is_ultrajoiner(crtc_state)) 293 return 0; 294 295 return crtc_state->joiner_pipes & (0b01110111 << joiner_primary_pipe(crtc_state)); 296 } 297 298 bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state) 299 { 300 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 301 302 return intel_crtc_is_ultrajoiner(crtc_state) && 303 BIT(crtc->pipe) & ultrajoiner_enable_pipes(crtc_state); 304 } 305 306 u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 307 { 308 if (crtc_state->joiner_pipes) 309 return crtc_state->joiner_pipes & ~BIT(joiner_primary_pipe(crtc_state)); 310 else 311 return 0; 312 } 313 314 bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state) 315 { 316 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 317 318 return crtc_state->joiner_pipes && 319 crtc->pipe != joiner_primary_pipe(crtc_state); 320 } 321 322 bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state) 323 { 324 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 325 326 return crtc_state->joiner_pipes && 327 crtc->pipe == joiner_primary_pipe(crtc_state); 328 } 329 330 int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state) 331 { 332 return hweight8(intel_crtc_joined_pipe_mask(crtc_state)); 333 } 334 335 u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) 336 { 337 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 338 339 return BIT(crtc->pipe) | crtc_state->joiner_pipes; 340 } 341 342 struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state) 343 { 344 struct intel_display *display = to_intel_display(crtc_state); 345 346 if (intel_crtc_is_joiner_secondary(crtc_state)) 347 return intel_crtc_for_pipe(display, joiner_primary_pipe(crtc_state)); 348 else 349 return to_intel_crtc(crtc_state->uapi.crtc); 350 } 351 352 static void 353 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 354 { 355 struct intel_display *display = to_intel_display(old_crtc_state); 356 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 357 358 if (DISPLAY_VER(display) >= 4) { 359 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 360 361 /* Wait for the Pipe State to go off */ 362 if (intel_de_wait_for_clear_ms(display, TRANSCONF(display, cpu_transcoder), 363 TRANSCONF_STATE_ENABLE, 100)) 364 drm_WARN(display->drm, 1, "pipe_off wait timed out\n"); 365 } else { 366 intel_wait_for_pipe_scanline_stopped(crtc); 367 } 368 } 369 370 void assert_transcoder(struct intel_display *display, 371 enum transcoder cpu_transcoder, bool state) 372 { 373 bool cur_state; 374 enum intel_display_power_domain power_domain; 375 intel_wakeref_t wakeref; 376 377 /* we keep both pipes enabled on 830 */ 378 if (display->platform.i830) 379 state = true; 380 381 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 382 wakeref = intel_display_power_get_if_enabled(display, power_domain); 383 if (wakeref) { 384 u32 val = intel_de_read(display, 385 TRANSCONF(display, cpu_transcoder)); 386 cur_state = !!(val & TRANSCONF_ENABLE); 387 388 intel_display_power_put(display, power_domain, wakeref); 389 } else { 390 cur_state = false; 391 } 392 393 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 394 "transcoder %s assertion failure (expected %s, current %s)\n", 395 transcoder_name(cpu_transcoder), str_on_off(state), 396 str_on_off(cur_state)); 397 } 398 399 static void assert_plane(struct intel_plane *plane, bool state) 400 { 401 struct intel_display *display = to_intel_display(plane->base.dev); 402 enum pipe pipe; 403 bool cur_state; 404 405 cur_state = plane->get_hw_state(plane, &pipe); 406 407 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 408 "%s assertion failure (expected %s, current %s)\n", 409 plane->base.name, str_on_off(state), 410 str_on_off(cur_state)); 411 } 412 413 #define assert_plane_enabled(p) assert_plane(p, true) 414 #define assert_plane_disabled(p) assert_plane(p, false) 415 416 static void assert_planes_disabled(struct intel_crtc *crtc) 417 { 418 struct intel_display *display = to_intel_display(crtc); 419 struct intel_plane *plane; 420 421 for_each_intel_plane_on_crtc(display->drm, crtc, plane) 422 assert_plane_disabled(plane); 423 } 424 425 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 426 { 427 struct intel_display *display = to_intel_display(new_crtc_state); 428 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 429 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 430 enum pipe pipe = crtc->pipe; 431 u32 val; 432 433 drm_dbg_kms(display->drm, "enabling pipe %c\n", pipe_name(pipe)); 434 435 assert_planes_disabled(crtc); 436 437 /* 438 * A pipe without a PLL won't actually be able to drive bits from 439 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 440 * need the check. 441 */ 442 if (HAS_GMCH(display)) { 443 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 444 assert_dsi_pll_enabled(display); 445 else 446 assert_pll_enabled(display, pipe); 447 } else { 448 if (new_crtc_state->has_pch_encoder) { 449 /* if driving the PCH, we need FDI enabled */ 450 assert_fdi_rx_pll_enabled(display, 451 intel_crtc_pch_transcoder(crtc)); 452 assert_fdi_tx_pll_enabled(display, 453 (enum pipe) cpu_transcoder); 454 } 455 /* FIXME: assert CPU port conditions for SNB+ */ 456 } 457 458 /* Wa_22012358565:adl-p */ 459 if (DISPLAY_VER(display) == 13) 460 intel_de_rmw(display, PIPE_ARB_CTL(display, pipe), 461 0, PIPE_ARB_USE_PROG_SLOTS); 462 463 if (DISPLAY_VER(display) >= 14) { 464 u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA; 465 u32 set = 0; 466 467 if (DISPLAY_VER(display) == 14) 468 set |= DP_FEC_BS_JITTER_WA; 469 470 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 471 clear, set); 472 } 473 474 val = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); 475 if (val & TRANSCONF_ENABLE) { 476 /* we keep both pipes enabled on 830 */ 477 drm_WARN_ON(display->drm, !display->platform.i830); 478 return; 479 } 480 481 /* Wa_1409098942:adlp+ */ 482 if (DISPLAY_VER(display) >= 13 && 483 new_crtc_state->dsc.compression_enable) { 484 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 485 val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK, 486 TRANSCONF_PIXEL_COUNT_SCALING_X4); 487 } 488 489 intel_de_write(display, TRANSCONF(display, cpu_transcoder), 490 val | TRANSCONF_ENABLE); 491 intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder)); 492 493 /* 494 * Until the pipe starts PIPEDSL reads will return a stale value, 495 * which causes an apparent vblank timestamp jump when PIPEDSL 496 * resets to its proper value. That also messes up the frame count 497 * when it's derived from the timestamps. So let's wait for the 498 * pipe to start properly before we call drm_crtc_vblank_on() 499 */ 500 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 501 intel_wait_for_pipe_scanline_moving(crtc); 502 } 503 504 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 505 { 506 struct intel_display *display = to_intel_display(old_crtc_state); 507 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 508 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 509 enum pipe pipe = crtc->pipe; 510 u32 val; 511 512 drm_dbg_kms(display->drm, "disabling pipe %c\n", pipe_name(pipe)); 513 514 /* 515 * Make sure planes won't keep trying to pump pixels to us, 516 * or we might hang the display. 517 */ 518 assert_planes_disabled(crtc); 519 520 val = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); 521 if ((val & TRANSCONF_ENABLE) == 0) 522 return; 523 524 /* 525 * Double wide has implications for planes 526 * so best keep it disabled when not needed. 527 */ 528 if (old_crtc_state->double_wide) 529 val &= ~TRANSCONF_DOUBLE_WIDE; 530 531 /* Don't disable pipe or pipe PLLs if needed */ 532 if (!display->platform.i830) 533 val &= ~TRANSCONF_ENABLE; 534 535 /* Wa_1409098942:adlp+ */ 536 if (DISPLAY_VER(display) >= 13 && 537 old_crtc_state->dsc.compression_enable) 538 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 539 540 intel_de_write(display, TRANSCONF(display, cpu_transcoder), val); 541 542 if (DISPLAY_VER(display) >= 12) 543 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 544 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 545 546 if ((val & TRANSCONF_ENABLE) == 0) 547 intel_wait_for_pipe_off(old_crtc_state); 548 } 549 550 u32 intel_plane_fb_max_stride(struct intel_display *display, 551 const struct drm_format_info *info, 552 u64 modifier) 553 { 554 struct intel_crtc *crtc; 555 struct intel_plane *plane; 556 557 /* 558 * We assume the primary plane for pipe A has 559 * the highest stride limits of them all, 560 * if in case pipe A is disabled, use the first pipe from pipe_mask. 561 */ 562 crtc = intel_first_crtc(display); 563 if (!crtc) 564 return 0; 565 566 plane = to_intel_plane(crtc->base.primary); 567 568 return plane->max_stride(plane, info, modifier, 569 DRM_MODE_ROTATE_0); 570 } 571 572 u32 intel_dumb_fb_max_stride(struct drm_device *drm, 573 u32 pixel_format, u64 modifier) 574 { 575 struct intel_display *display = to_intel_display(drm); 576 577 if (!HAS_DISPLAY(display)) 578 return 0; 579 580 return intel_plane_fb_max_stride(display, 581 drm_get_format_info(drm, pixel_format, modifier), 582 modifier); 583 } 584 585 void intel_set_plane_visible(struct intel_crtc_state *crtc_state, 586 struct intel_plane_state *plane_state, 587 bool visible) 588 { 589 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 590 591 plane_state->uapi.visible = visible; 592 593 if (visible) 594 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 595 else 596 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 597 } 598 599 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) 600 { 601 struct intel_display *display = to_intel_display(crtc_state); 602 struct drm_plane *plane; 603 604 /* 605 * Active_planes aliases if multiple "primary" or cursor planes 606 * have been used on the same (or wrong) pipe. plane_mask uses 607 * unique ids, hence we can use that to reconstruct active_planes. 608 */ 609 crtc_state->enabled_planes = 0; 610 crtc_state->active_planes = 0; 611 612 drm_for_each_plane_mask(plane, display->drm, 613 crtc_state->uapi.plane_mask) { 614 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 615 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 616 } 617 } 618 619 void intel_plane_disable_noatomic(struct intel_crtc *crtc, 620 struct intel_plane *plane) 621 { 622 struct intel_display *display = to_intel_display(crtc); 623 struct intel_crtc_state *crtc_state = 624 to_intel_crtc_state(crtc->base.state); 625 struct intel_plane_state *plane_state = 626 to_intel_plane_state(plane->base.state); 627 628 drm_dbg_kms(display->drm, 629 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 630 plane->base.base.id, plane->base.name, 631 crtc->base.base.id, crtc->base.name); 632 633 intel_plane_set_invisible(crtc_state, plane_state); 634 intel_set_plane_visible(crtc_state, plane_state, false); 635 intel_plane_fixup_bitmasks(crtc_state); 636 637 skl_wm_plane_disable_noatomic(crtc, plane); 638 639 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 640 hsw_ips_disable(crtc_state)) { 641 crtc_state->ips_enabled = false; 642 intel_plane_initial_vblank_wait(crtc); 643 } 644 645 /* 646 * Vblank time updates from the shadow to live plane control register 647 * are blocked if the memory self-refresh mode is active at that 648 * moment. So to make sure the plane gets truly disabled, disable 649 * first the self-refresh mode. The self-refresh enable bit in turn 650 * will be checked/applied by the HW only at the next frame start 651 * event which is after the vblank start event, so we need to have a 652 * wait-for-vblank between disabling the plane and the pipe. 653 */ 654 if (HAS_GMCH(display) && 655 intel_set_memory_cxsr(display, false)) 656 intel_plane_initial_vblank_wait(crtc); 657 658 /* 659 * Gen2 reports pipe underruns whenever all planes are disabled. 660 * So disable underrun reporting before all the planes get disabled. 661 */ 662 if (DISPLAY_VER(display) == 2 && !crtc_state->active_planes) 663 intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, false); 664 665 intel_plane_disable_arm(NULL, plane, crtc_state); 666 intel_plane_initial_vblank_wait(crtc); 667 } 668 669 unsigned int 670 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 671 { 672 int x = 0, y = 0; 673 674 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 675 plane_state->view.color_plane[0].offset, 0); 676 677 return y; 678 } 679 680 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 681 { 682 struct intel_display *display = to_intel_display(crtc_state); 683 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 684 enum pipe pipe = crtc->pipe; 685 u32 tmp; 686 687 tmp = intel_de_read(display, PIPE_CHICKEN(pipe)); 688 689 /* 690 * Display WA #1153: icl 691 * enable hardware to bypass the alpha math 692 * and rounding for per-pixel values 00 and 0xff 693 */ 694 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 695 /* 696 * Display WA # 1605353570: icl 697 * Set the pixel rounding bit to 1 for allowing 698 * passthrough of Frame buffer pixels unmodified 699 * across pipe 700 */ 701 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 702 703 /* 704 * Underrun recovery must always be disabled on display 13+. 705 * DG2 chicken bit meaning is inverted compared to other platforms. 706 */ 707 if (display->platform.dg2) 708 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 709 else if ((DISPLAY_VER(display) >= 13) && (DISPLAY_VER(display) < 30)) 710 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 711 712 /* Wa_14010547955:dg2 */ 713 if (display->platform.dg2) 714 tmp |= DG2_RENDER_CCSTAG_4_3_EN; 715 716 intel_de_write(display, PIPE_CHICKEN(pipe), tmp); 717 } 718 719 bool intel_has_pending_fb_unpin(struct intel_display *display) 720 { 721 struct drm_crtc *crtc; 722 bool cleanup_done; 723 724 drm_for_each_crtc(crtc, display->drm) { 725 struct drm_crtc_commit *commit; 726 spin_lock(&crtc->commit_lock); 727 commit = list_first_entry_or_null(&crtc->commit_list, 728 struct drm_crtc_commit, commit_entry); 729 cleanup_done = commit ? 730 try_wait_for_completion(&commit->cleanup_done) : true; 731 spin_unlock(&crtc->commit_lock); 732 733 if (cleanup_done) 734 continue; 735 736 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 737 738 return true; 739 } 740 741 return false; 742 } 743 744 /* 745 * Finds the encoder associated with the given CRTC. This can only be 746 * used when we know that the CRTC isn't feeding multiple encoders! 747 */ 748 struct intel_encoder * 749 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 750 const struct intel_crtc_state *crtc_state) 751 { 752 const struct drm_connector_state *connector_state; 753 const struct drm_connector *connector; 754 struct intel_encoder *encoder = NULL; 755 struct intel_crtc *primary_crtc; 756 int num_encoders = 0; 757 int i; 758 759 primary_crtc = intel_primary_crtc(crtc_state); 760 761 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 762 if (connector_state->crtc != &primary_crtc->base) 763 continue; 764 765 encoder = to_intel_encoder(connector_state->best_encoder); 766 num_encoders++; 767 } 768 769 drm_WARN(state->base.dev, num_encoders != 1, 770 "%d encoders for pipe %c\n", 771 num_encoders, pipe_name(primary_crtc->pipe)); 772 773 return encoder; 774 } 775 776 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 777 { 778 if (crtc->overlay) 779 (void) intel_overlay_switch_off(crtc->overlay); 780 781 /* Let userspace switch the overlay on again. In most cases userspace 782 * has to recompute where to put it anyway. 783 */ 784 } 785 786 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 787 { 788 struct intel_display *display = to_intel_display(crtc_state); 789 790 if (!crtc_state->nv12_planes) 791 return false; 792 793 /* WA Display #0827: Gen9:all */ 794 if (DISPLAY_VER(display) == 9) 795 return true; 796 797 return false; 798 } 799 800 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 801 { 802 struct intel_display *display = to_intel_display(crtc_state); 803 804 /* Wa_2006604312:icl,ehl */ 805 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(display) == 11) 806 return true; 807 808 return false; 809 } 810 811 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 812 { 813 struct intel_display *display = to_intel_display(crtc_state); 814 815 /* Wa_1604331009:icl,jsl,ehl */ 816 if (is_hdr_mode(crtc_state) && 817 crtc_state->active_planes & BIT(PLANE_CURSOR) && 818 DISPLAY_VER(display) == 11) 819 return true; 820 821 return false; 822 } 823 824 static void intel_async_flip_vtd_wa(struct intel_display *display, 825 enum pipe pipe, bool enable) 826 { 827 if (DISPLAY_VER(display) == 9) { 828 /* 829 * "Plane N stretch max must be programmed to 11b (x1) 830 * when Async flips are enabled on that plane." 831 */ 832 intel_de_rmw(display, CHICKEN_PIPESL_1(pipe), 833 SKL_PLANE1_STRETCH_MAX_MASK, 834 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 835 } else { 836 /* Also needed on HSW/BDW albeit undocumented */ 837 intel_de_rmw(display, CHICKEN_PIPESL_1(pipe), 838 HSW_PRI_STRETCH_MAX_MASK, 839 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 840 } 841 } 842 843 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 844 { 845 struct intel_display *display = to_intel_display(crtc_state); 846 847 return crtc_state->uapi.async_flip && intel_display_vtd_active(display) && 848 (DISPLAY_VER(display) == 9 || display->platform.broadwell || 849 display->platform.haswell); 850 } 851 852 static void intel_encoders_audio_enable(struct intel_atomic_state *state, 853 struct intel_crtc *crtc) 854 { 855 const struct intel_crtc_state *crtc_state = 856 intel_atomic_get_new_crtc_state(state, crtc); 857 const struct drm_connector_state *conn_state; 858 struct drm_connector *conn; 859 int i; 860 861 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 862 struct intel_encoder *encoder = 863 to_intel_encoder(conn_state->best_encoder); 864 865 if (conn_state->crtc != &crtc->base) 866 continue; 867 868 if (encoder->audio_enable) 869 encoder->audio_enable(encoder, crtc_state, conn_state); 870 } 871 } 872 873 static void intel_encoders_audio_disable(struct intel_atomic_state *state, 874 struct intel_crtc *crtc) 875 { 876 const struct intel_crtc_state *old_crtc_state = 877 intel_atomic_get_old_crtc_state(state, crtc); 878 const struct drm_connector_state *old_conn_state; 879 struct drm_connector *conn; 880 int i; 881 882 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 883 struct intel_encoder *encoder = 884 to_intel_encoder(old_conn_state->best_encoder); 885 886 if (old_conn_state->crtc != &crtc->base) 887 continue; 888 889 if (encoder->audio_disable) 890 encoder->audio_disable(encoder, old_crtc_state, old_conn_state); 891 } 892 } 893 894 #define is_enabling(feature, old_crtc_state, new_crtc_state) \ 895 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ 896 (new_crtc_state)->feature) 897 #define is_disabling(feature, old_crtc_state, new_crtc_state) \ 898 ((old_crtc_state)->feature && \ 899 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) 900 901 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 902 const struct intel_crtc_state *new_crtc_state) 903 { 904 if (!new_crtc_state->hw.active) 905 return false; 906 907 return is_enabling(active_planes, old_crtc_state, new_crtc_state); 908 } 909 910 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 911 const struct intel_crtc_state *new_crtc_state) 912 { 913 if (!old_crtc_state->hw.active) 914 return false; 915 916 return is_disabling(active_planes, old_crtc_state, new_crtc_state); 917 } 918 919 static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, 920 const struct intel_crtc_state *new_crtc_state) 921 { 922 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline || 923 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || 924 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || 925 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || 926 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full || 927 old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start || 928 old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end; 929 } 930 931 static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state, 932 const struct intel_crtc_state *new_crtc_state) 933 { 934 return old_crtc_state->cmrr.cmrr_m != new_crtc_state->cmrr.cmrr_m || 935 old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n; 936 } 937 938 static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state, 939 struct intel_crtc *crtc) 940 { 941 const struct intel_crtc_state *old_crtc_state = 942 intel_atomic_get_old_crtc_state(state, crtc); 943 const struct intel_crtc_state *new_crtc_state = 944 intel_atomic_get_new_crtc_state(state, crtc); 945 946 if (!new_crtc_state->hw.active) 947 return false; 948 949 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) || 950 (new_crtc_state->vrr.enable && 951 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 952 vrr_params_changed(old_crtc_state, new_crtc_state))); 953 } 954 955 bool intel_crtc_vrr_disabling(struct intel_atomic_state *state, 956 struct intel_crtc *crtc) 957 { 958 const struct intel_crtc_state *old_crtc_state = 959 intel_atomic_get_old_crtc_state(state, crtc); 960 const struct intel_crtc_state *new_crtc_state = 961 intel_atomic_get_new_crtc_state(state, crtc); 962 963 if (!old_crtc_state->hw.active) 964 return false; 965 966 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) || 967 (old_crtc_state->vrr.enable && 968 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 969 vrr_params_changed(old_crtc_state, new_crtc_state))); 970 } 971 972 static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, 973 const struct intel_crtc_state *new_crtc_state) 974 { 975 if (!new_crtc_state->hw.active) 976 return false; 977 978 return is_enabling(has_audio, old_crtc_state, new_crtc_state) || 979 (new_crtc_state->has_audio && 980 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 981 } 982 983 static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, 984 const struct intel_crtc_state *new_crtc_state) 985 { 986 if (!old_crtc_state->hw.active) 987 return false; 988 989 return is_disabling(has_audio, old_crtc_state, new_crtc_state) || 990 (old_crtc_state->has_audio && 991 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 992 } 993 994 static bool intel_casf_enabling(const struct intel_crtc_state *new_crtc_state, 995 const struct intel_crtc_state *old_crtc_state) 996 { 997 if (!new_crtc_state->hw.active) 998 return false; 999 1000 return is_enabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state); 1001 } 1002 1003 static bool intel_casf_disabling(const struct intel_crtc_state *old_crtc_state, 1004 const struct intel_crtc_state *new_crtc_state) 1005 { 1006 if (!new_crtc_state->hw.active) 1007 return false; 1008 1009 return is_disabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state); 1010 } 1011 1012 #undef is_disabling 1013 #undef is_enabling 1014 1015 static void intel_post_plane_update(struct intel_atomic_state *state, 1016 struct intel_crtc *crtc) 1017 { 1018 struct intel_display *display = to_intel_display(state); 1019 const struct intel_crtc_state *old_crtc_state = 1020 intel_atomic_get_old_crtc_state(state, crtc); 1021 const struct intel_crtc_state *new_crtc_state = 1022 intel_atomic_get_new_crtc_state(state, crtc); 1023 enum pipe pipe = crtc->pipe; 1024 1025 intel_frontbuffer_flip(display, new_crtc_state->fb_bits); 1026 1027 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 1028 intel_update_watermarks(display); 1029 1030 intel_fbc_post_update(state, crtc); 1031 1032 if (needs_async_flip_vtd_wa(old_crtc_state) && 1033 !needs_async_flip_vtd_wa(new_crtc_state)) 1034 intel_async_flip_vtd_wa(display, pipe, false); 1035 1036 if (needs_nv12_wa(old_crtc_state) && 1037 !needs_nv12_wa(new_crtc_state)) 1038 skl_wa_827(display, pipe, false); 1039 1040 if (needs_scalerclk_wa(old_crtc_state) && 1041 !needs_scalerclk_wa(new_crtc_state)) 1042 icl_wa_scalerclkgating(display, pipe, false); 1043 1044 if (needs_cursorclk_wa(old_crtc_state) && 1045 !needs_cursorclk_wa(new_crtc_state)) 1046 icl_wa_cursorclkgating(display, pipe, false); 1047 1048 if (intel_crtc_needs_color_update(new_crtc_state)) 1049 intel_color_post_update(new_crtc_state); 1050 1051 if (audio_enabling(old_crtc_state, new_crtc_state)) 1052 intel_encoders_audio_enable(state, crtc); 1053 1054 if (intel_display_wa(display, 14011503117)) { 1055 if (old_crtc_state->pch_pfit.enabled != new_crtc_state->pch_pfit.enabled) 1056 adl_scaler_ecc_unmask(new_crtc_state); 1057 } 1058 1059 intel_alpm_post_plane_update(state, crtc); 1060 1061 intel_psr_post_plane_update(state, crtc); 1062 } 1063 1064 static void intel_post_plane_update_after_readout(struct intel_atomic_state *state, 1065 struct intel_crtc *crtc) 1066 { 1067 const struct intel_crtc_state *new_crtc_state = 1068 intel_atomic_get_new_crtc_state(state, crtc); 1069 1070 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ 1071 hsw_ips_post_update(state, crtc); 1072 1073 /* 1074 * Activate DRRS after state readout to avoid 1075 * dp_m_n vs. dp_m2_n2 confusion on BDW+. 1076 */ 1077 intel_drrs_activate(new_crtc_state); 1078 } 1079 1080 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 1081 struct intel_crtc *crtc) 1082 { 1083 const struct intel_crtc_state *crtc_state = 1084 intel_atomic_get_new_crtc_state(state, crtc); 1085 u8 update_planes = crtc_state->update_planes; 1086 const struct intel_plane_state __maybe_unused *plane_state; 1087 struct intel_plane *plane; 1088 int i; 1089 1090 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1091 if (plane->pipe == crtc->pipe && 1092 update_planes & BIT(plane->id)) 1093 plane->enable_flip_done(plane); 1094 } 1095 } 1096 1097 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 1098 struct intel_crtc *crtc) 1099 { 1100 const struct intel_crtc_state *crtc_state = 1101 intel_atomic_get_new_crtc_state(state, crtc); 1102 u8 update_planes = crtc_state->update_planes; 1103 const struct intel_plane_state __maybe_unused *plane_state; 1104 struct intel_plane *plane; 1105 int i; 1106 1107 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1108 if (plane->pipe == crtc->pipe && 1109 update_planes & BIT(plane->id)) 1110 plane->disable_flip_done(plane); 1111 } 1112 } 1113 1114 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 1115 struct intel_crtc *crtc) 1116 { 1117 const struct intel_crtc_state *old_crtc_state = 1118 intel_atomic_get_old_crtc_state(state, crtc); 1119 const struct intel_crtc_state *new_crtc_state = 1120 intel_atomic_get_new_crtc_state(state, crtc); 1121 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & 1122 ~new_crtc_state->async_flip_planes; 1123 const struct intel_plane_state *old_plane_state; 1124 struct intel_plane *plane; 1125 bool need_vbl_wait = false; 1126 int i; 1127 1128 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1129 if (plane->need_async_flip_toggle_wa && 1130 plane->pipe == crtc->pipe && 1131 disable_async_flip_planes & BIT(plane->id)) { 1132 /* 1133 * Apart from the async flip bit we want to 1134 * preserve the old state for the plane. 1135 */ 1136 intel_plane_async_flip(NULL, plane, 1137 old_crtc_state, old_plane_state, false); 1138 need_vbl_wait = true; 1139 } 1140 } 1141 1142 if (need_vbl_wait) 1143 intel_crtc_wait_for_next_vblank(crtc); 1144 } 1145 1146 static void intel_pre_plane_update(struct intel_atomic_state *state, 1147 struct intel_crtc *crtc) 1148 { 1149 struct intel_display *display = to_intel_display(state); 1150 const struct intel_crtc_state *old_crtc_state = 1151 intel_atomic_get_old_crtc_state(state, crtc); 1152 const struct intel_crtc_state *new_crtc_state = 1153 intel_atomic_get_new_crtc_state(state, crtc); 1154 enum pipe pipe = crtc->pipe; 1155 1156 intel_alpm_pre_plane_update(state, crtc); 1157 intel_psr_pre_plane_update(state, crtc); 1158 1159 if (intel_crtc_vrr_disabling(state, crtc)) { 1160 intel_vrr_disable(old_crtc_state); 1161 intel_crtc_update_active_timings(old_crtc_state, false); 1162 } 1163 1164 if (audio_disabling(old_crtc_state, new_crtc_state)) 1165 intel_encoders_audio_disable(state, crtc); 1166 1167 if (intel_casf_disabling(old_crtc_state, new_crtc_state)) 1168 intel_casf_disable(new_crtc_state); 1169 1170 intel_drrs_deactivate(old_crtc_state); 1171 1172 if (hsw_ips_pre_update(state, crtc)) 1173 intel_crtc_wait_for_next_vblank(crtc); 1174 1175 if (intel_fbc_pre_update(state, crtc)) 1176 intel_crtc_wait_for_next_vblank(crtc); 1177 1178 if (!needs_async_flip_vtd_wa(old_crtc_state) && 1179 needs_async_flip_vtd_wa(new_crtc_state)) 1180 intel_async_flip_vtd_wa(display, pipe, true); 1181 1182 /* Display WA 827 */ 1183 if (!needs_nv12_wa(old_crtc_state) && 1184 needs_nv12_wa(new_crtc_state)) 1185 skl_wa_827(display, pipe, true); 1186 1187 /* Wa_2006604312:icl,ehl */ 1188 if (!needs_scalerclk_wa(old_crtc_state) && 1189 needs_scalerclk_wa(new_crtc_state)) 1190 icl_wa_scalerclkgating(display, pipe, true); 1191 1192 /* Wa_1604331009:icl,jsl,ehl */ 1193 if (!needs_cursorclk_wa(old_crtc_state) && 1194 needs_cursorclk_wa(new_crtc_state)) 1195 icl_wa_cursorclkgating(display, pipe, true); 1196 1197 /* 1198 * Vblank time updates from the shadow to live plane control register 1199 * are blocked if the memory self-refresh mode is active at that 1200 * moment. So to make sure the plane gets truly disabled, disable 1201 * first the self-refresh mode. The self-refresh enable bit in turn 1202 * will be checked/applied by the HW only at the next frame start 1203 * event which is after the vblank start event, so we need to have a 1204 * wait-for-vblank between disabling the plane and the pipe. 1205 */ 1206 if (HAS_GMCH(display) && old_crtc_state->hw.active && 1207 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false)) 1208 intel_crtc_wait_for_next_vblank(crtc); 1209 1210 /* 1211 * IVB workaround: must disable low power watermarks for at least 1212 * one frame before enabling scaling. LP watermarks can be re-enabled 1213 * when scaling is disabled. 1214 * 1215 * WaCxSRDisabledForSpriteScaling:ivb 1216 */ 1217 if (!HAS_GMCH(display) && old_crtc_state->hw.active && 1218 new_crtc_state->disable_cxsr && ilk_disable_cxsr(display)) 1219 intel_crtc_wait_for_next_vblank(crtc); 1220 1221 /* 1222 * If we're doing a modeset we don't need to do any 1223 * pre-vblank watermark programming here. 1224 */ 1225 if (!intel_crtc_needs_modeset(new_crtc_state)) { 1226 /* 1227 * For platforms that support atomic watermarks, program the 1228 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1229 * will be the intermediate values that are safe for both pre- and 1230 * post- vblank; when vblank happens, the 'active' values will be set 1231 * to the final 'target' values and we'll do this again to get the 1232 * optimal watermarks. For gen9+ platforms, the values we program here 1233 * will be the final target values which will get automatically latched 1234 * at vblank time; no further programming will be necessary. 1235 * 1236 * If a platform hasn't been transitioned to atomic watermarks yet, 1237 * we'll continue to update watermarks the old way, if flags tell 1238 * us to. 1239 */ 1240 if (!intel_initial_watermarks(state, crtc)) 1241 if (new_crtc_state->update_wm_pre) 1242 intel_update_watermarks(display); 1243 } 1244 1245 /* 1246 * Gen2 reports pipe underruns whenever all planes are disabled. 1247 * So disable underrun reporting before all the planes get disabled. 1248 * 1249 * We do this after .initial_watermarks() so that we have a 1250 * chance of catching underruns with the intermediate watermarks 1251 * vs. the old plane configuration. 1252 */ 1253 if (DISPLAY_VER(display) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1254 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 1255 1256 /* 1257 * WA for platforms where async address update enable bit 1258 * is double buffered and only latched at start of vblank. 1259 */ 1260 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) 1261 intel_crtc_async_flip_disable_wa(state, crtc); 1262 } 1263 1264 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1265 struct intel_crtc *crtc) 1266 { 1267 struct intel_display *display = to_intel_display(state); 1268 const struct intel_crtc_state *new_crtc_state = 1269 intel_atomic_get_new_crtc_state(state, crtc); 1270 unsigned int update_mask = new_crtc_state->update_planes; 1271 const struct intel_plane_state *old_plane_state; 1272 struct intel_plane *plane; 1273 unsigned fb_bits = 0; 1274 int i; 1275 1276 intel_crtc_dpms_overlay_disable(crtc); 1277 1278 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1279 if (crtc->pipe != plane->pipe || 1280 !(update_mask & BIT(plane->id))) 1281 continue; 1282 1283 intel_plane_disable_arm(NULL, plane, new_crtc_state); 1284 1285 if (old_plane_state->uapi.visible) 1286 fb_bits |= plane->frontbuffer_bit; 1287 } 1288 1289 intel_frontbuffer_flip(display, fb_bits); 1290 } 1291 1292 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1293 { 1294 struct intel_display *display = to_intel_display(state); 1295 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1296 struct intel_crtc *crtc; 1297 int i; 1298 1299 /* 1300 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 1301 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 1302 */ 1303 if (display->dpll.mgr) { 1304 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1305 if (intel_crtc_needs_modeset(new_crtc_state)) 1306 continue; 1307 1308 new_crtc_state->intel_dpll = old_crtc_state->intel_dpll; 1309 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1310 } 1311 } 1312 } 1313 1314 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1315 struct intel_crtc *crtc) 1316 { 1317 const struct intel_crtc_state *crtc_state = 1318 intel_atomic_get_new_crtc_state(state, crtc); 1319 const struct drm_connector_state *conn_state; 1320 struct drm_connector *conn; 1321 int i; 1322 1323 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1324 struct intel_encoder *encoder = 1325 to_intel_encoder(conn_state->best_encoder); 1326 1327 if (conn_state->crtc != &crtc->base) 1328 continue; 1329 1330 if (encoder->pre_pll_enable) 1331 encoder->pre_pll_enable(state, encoder, 1332 crtc_state, conn_state); 1333 } 1334 } 1335 1336 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1337 struct intel_crtc *crtc) 1338 { 1339 const struct intel_crtc_state *crtc_state = 1340 intel_atomic_get_new_crtc_state(state, crtc); 1341 const struct drm_connector_state *conn_state; 1342 struct drm_connector *conn; 1343 int i; 1344 1345 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1346 struct intel_encoder *encoder = 1347 to_intel_encoder(conn_state->best_encoder); 1348 1349 if (conn_state->crtc != &crtc->base) 1350 continue; 1351 1352 if (encoder->pre_enable) 1353 encoder->pre_enable(state, encoder, 1354 crtc_state, conn_state); 1355 } 1356 } 1357 1358 static void intel_encoders_enable(struct intel_atomic_state *state, 1359 struct intel_crtc *crtc) 1360 { 1361 const struct intel_crtc_state *crtc_state = 1362 intel_atomic_get_new_crtc_state(state, crtc); 1363 const struct drm_connector_state *conn_state; 1364 struct drm_connector *conn; 1365 int i; 1366 1367 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1368 struct intel_encoder *encoder = 1369 to_intel_encoder(conn_state->best_encoder); 1370 1371 if (conn_state->crtc != &crtc->base) 1372 continue; 1373 1374 if (encoder->enable) 1375 encoder->enable(state, encoder, 1376 crtc_state, conn_state); 1377 intel_opregion_notify_encoder(encoder, true); 1378 } 1379 } 1380 1381 static void intel_encoders_disable(struct intel_atomic_state *state, 1382 struct intel_crtc *crtc) 1383 { 1384 const struct intel_crtc_state *old_crtc_state = 1385 intel_atomic_get_old_crtc_state(state, crtc); 1386 const struct drm_connector_state *old_conn_state; 1387 struct drm_connector *conn; 1388 int i; 1389 1390 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1391 struct intel_encoder *encoder = 1392 to_intel_encoder(old_conn_state->best_encoder); 1393 1394 if (old_conn_state->crtc != &crtc->base) 1395 continue; 1396 1397 intel_opregion_notify_encoder(encoder, false); 1398 if (encoder->disable) 1399 encoder->disable(state, encoder, 1400 old_crtc_state, old_conn_state); 1401 } 1402 } 1403 1404 static void intel_encoders_post_disable(struct intel_atomic_state *state, 1405 struct intel_crtc *crtc) 1406 { 1407 const struct intel_crtc_state *old_crtc_state = 1408 intel_atomic_get_old_crtc_state(state, crtc); 1409 const struct drm_connector_state *old_conn_state; 1410 struct drm_connector *conn; 1411 int i; 1412 1413 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1414 struct intel_encoder *encoder = 1415 to_intel_encoder(old_conn_state->best_encoder); 1416 1417 if (old_conn_state->crtc != &crtc->base) 1418 continue; 1419 1420 if (encoder->post_disable) 1421 encoder->post_disable(state, encoder, 1422 old_crtc_state, old_conn_state); 1423 } 1424 } 1425 1426 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1427 struct intel_crtc *crtc) 1428 { 1429 const struct intel_crtc_state *old_crtc_state = 1430 intel_atomic_get_old_crtc_state(state, crtc); 1431 const struct drm_connector_state *old_conn_state; 1432 struct drm_connector *conn; 1433 int i; 1434 1435 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1436 struct intel_encoder *encoder = 1437 to_intel_encoder(old_conn_state->best_encoder); 1438 1439 if (old_conn_state->crtc != &crtc->base) 1440 continue; 1441 1442 if (encoder->post_pll_disable) 1443 encoder->post_pll_disable(state, encoder, 1444 old_crtc_state, old_conn_state); 1445 } 1446 } 1447 1448 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1449 struct intel_crtc *crtc) 1450 { 1451 const struct intel_crtc_state *crtc_state = 1452 intel_atomic_get_new_crtc_state(state, crtc); 1453 const struct drm_connector_state *conn_state; 1454 struct drm_connector *conn; 1455 int i; 1456 1457 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1458 struct intel_encoder *encoder = 1459 to_intel_encoder(conn_state->best_encoder); 1460 1461 if (conn_state->crtc != &crtc->base) 1462 continue; 1463 1464 if (encoder->update_pipe) 1465 encoder->update_pipe(state, encoder, 1466 crtc_state, conn_state); 1467 } 1468 } 1469 1470 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1471 { 1472 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1473 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1474 1475 if (crtc_state->has_pch_encoder) { 1476 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1477 &crtc_state->fdi_m_n); 1478 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1479 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1480 &crtc_state->dp_m_n); 1481 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1482 &crtc_state->dp_m2_n2); 1483 } 1484 1485 intel_set_transcoder_timings(crtc_state); 1486 1487 ilk_set_pipeconf(crtc_state); 1488 } 1489 1490 static void ilk_crtc_enable(struct intel_atomic_state *state, 1491 struct intel_crtc *crtc) 1492 { 1493 struct intel_display *display = to_intel_display(crtc); 1494 const struct intel_crtc_state *new_crtc_state = 1495 intel_atomic_get_new_crtc_state(state, crtc); 1496 enum pipe pipe = crtc->pipe; 1497 1498 if (drm_WARN_ON(display->drm, crtc->active)) 1499 return; 1500 1501 /* 1502 * Sometimes spurious CPU pipe underruns happen during FDI 1503 * training, at least with VGA+HDMI cloning. Suppress them. 1504 * 1505 * On ILK we get an occasional spurious CPU pipe underruns 1506 * between eDP port A enable and vdd enable. Also PCH port 1507 * enable seems to result in the occasional CPU pipe underrun. 1508 * 1509 * Spurious PCH underruns also occur during PCH enabling. 1510 */ 1511 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 1512 intel_set_pch_fifo_underrun_reporting(display, pipe, false); 1513 1514 ilk_configure_cpu_transcoder(new_crtc_state); 1515 1516 intel_set_pipe_src_size(new_crtc_state); 1517 1518 crtc->active = true; 1519 1520 intel_encoders_pre_enable(state, crtc); 1521 1522 if (new_crtc_state->has_pch_encoder) { 1523 ilk_pch_pre_enable(state, crtc); 1524 } else { 1525 assert_fdi_tx_disabled(display, pipe); 1526 assert_fdi_rx_disabled(display, pipe); 1527 } 1528 1529 ilk_pfit_enable(new_crtc_state); 1530 1531 /* 1532 * On ILK+ LUT must be loaded before the pipe is running but with 1533 * clocks enabled 1534 */ 1535 intel_color_modeset(new_crtc_state); 1536 1537 intel_initial_watermarks(state, crtc); 1538 intel_enable_transcoder(new_crtc_state); 1539 1540 if (new_crtc_state->has_pch_encoder) 1541 ilk_pch_enable(state, crtc); 1542 1543 intel_crtc_vblank_on(new_crtc_state); 1544 1545 intel_encoders_enable(state, crtc); 1546 1547 if (HAS_PCH_CPT(display)) 1548 intel_wait_for_pipe_scanline_moving(crtc); 1549 1550 /* 1551 * Must wait for vblank to avoid spurious PCH FIFO underruns. 1552 * And a second vblank wait is needed at least on ILK with 1553 * some interlaced HDMI modes. Let's do the double wait always 1554 * in case there are more corner cases we don't know about. 1555 */ 1556 if (new_crtc_state->has_pch_encoder) { 1557 intel_crtc_wait_for_next_vblank(crtc); 1558 intel_crtc_wait_for_next_vblank(crtc); 1559 } 1560 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 1561 intel_set_pch_fifo_underrun_reporting(display, pipe, true); 1562 } 1563 1564 /* Display WA #1180: WaDisableScalarClockGating: glk */ 1565 static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state) 1566 { 1567 struct intel_display *display = to_intel_display(crtc_state); 1568 1569 return DISPLAY_VER(display) == 10 && crtc_state->pch_pfit.enabled; 1570 } 1571 1572 static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable) 1573 { 1574 struct intel_display *display = to_intel_display(crtc); 1575 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1576 1577 intel_de_rmw(display, CLKGATE_DIS_PSL(crtc->pipe), 1578 mask, enable ? mask : 0); 1579 } 1580 1581 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1582 { 1583 struct intel_display *display = to_intel_display(crtc_state); 1584 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1585 1586 intel_de_write(display, WM_LINETIME(crtc->pipe), 1587 HSW_LINETIME(crtc_state->linetime) | 1588 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1589 } 1590 1591 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1592 { 1593 struct intel_display *display = to_intel_display(crtc_state); 1594 1595 intel_de_rmw(display, CHICKEN_TRANS(display, crtc_state->cpu_transcoder), 1596 HSW_FRAME_START_DELAY_MASK, 1597 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1598 } 1599 1600 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1601 { 1602 struct intel_display *display = to_intel_display(crtc_state); 1603 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1604 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1605 1606 if (crtc_state->has_pch_encoder) { 1607 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1608 &crtc_state->fdi_m_n); 1609 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1610 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1611 &crtc_state->dp_m_n); 1612 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1613 &crtc_state->dp_m2_n2); 1614 } 1615 1616 intel_set_transcoder_timings(crtc_state); 1617 intel_vrr_set_transcoder_timings(crtc_state); 1618 1619 if (cpu_transcoder != TRANSCODER_EDP) 1620 intel_de_write(display, TRANS_MULT(display, cpu_transcoder), 1621 crtc_state->pixel_multiplier - 1); 1622 1623 hsw_set_frame_start_delay(crtc_state); 1624 1625 hsw_set_transconf(crtc_state); 1626 } 1627 1628 static void hsw_crtc_enable(struct intel_atomic_state *state, 1629 struct intel_crtc *crtc) 1630 { 1631 struct intel_display *display = to_intel_display(state); 1632 const struct intel_crtc_state *new_crtc_state = 1633 intel_atomic_get_new_crtc_state(state, crtc); 1634 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1635 struct intel_crtc *pipe_crtc; 1636 int i; 1637 1638 if (drm_WARN_ON(display->drm, crtc->active)) 1639 return; 1640 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1641 const struct intel_crtc_state *new_pipe_crtc_state = 1642 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1643 1644 intel_dmc_enable_pipe(new_pipe_crtc_state); 1645 } 1646 1647 intel_encoders_pre_pll_enable(state, crtc); 1648 1649 if (new_crtc_state->intel_dpll) 1650 intel_dpll_enable(new_crtc_state); 1651 1652 intel_encoders_pre_enable(state, crtc); 1653 1654 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1655 const struct intel_crtc_state *pipe_crtc_state = 1656 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1657 1658 intel_dsc_enable(pipe_crtc_state); 1659 1660 if (HAS_UNCOMPRESSED_JOINER(display)) 1661 intel_uncompressed_joiner_enable(pipe_crtc_state); 1662 1663 intel_set_pipe_src_size(pipe_crtc_state); 1664 1665 if (DISPLAY_VER(display) >= 9 || display->platform.broadwell) 1666 bdw_set_pipe_misc(NULL, pipe_crtc_state); 1667 } 1668 1669 if (!transcoder_is_dsi(cpu_transcoder)) 1670 hsw_configure_cpu_transcoder(new_crtc_state); 1671 1672 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1673 const struct intel_crtc_state *pipe_crtc_state = 1674 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1675 1676 pipe_crtc->active = true; 1677 1678 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) 1679 glk_pipe_scaler_clock_gating_wa(pipe_crtc, true); 1680 1681 if (DISPLAY_VER(display) >= 9) 1682 skl_pfit_enable(pipe_crtc_state); 1683 else 1684 ilk_pfit_enable(pipe_crtc_state); 1685 1686 /* 1687 * On ILK+ LUT must be loaded before the pipe is running but with 1688 * clocks enabled 1689 */ 1690 intel_color_modeset(pipe_crtc_state); 1691 1692 hsw_set_linetime_wm(pipe_crtc_state); 1693 1694 if (DISPLAY_VER(display) >= 11) 1695 icl_set_pipe_chicken(pipe_crtc_state); 1696 1697 intel_initial_watermarks(state, pipe_crtc); 1698 } 1699 1700 intel_encoders_enable(state, crtc); 1701 1702 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1703 const struct intel_crtc_state *pipe_crtc_state = 1704 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1705 enum pipe hsw_workaround_pipe; 1706 1707 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) { 1708 intel_crtc_wait_for_next_vblank(pipe_crtc); 1709 glk_pipe_scaler_clock_gating_wa(pipe_crtc, false); 1710 } 1711 1712 /* 1713 * If we change the relative order between pipe/planes 1714 * enabling, we need to change the workaround. 1715 */ 1716 hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe; 1717 if (display->platform.haswell && hsw_workaround_pipe != INVALID_PIPE) { 1718 struct intel_crtc *wa_crtc = 1719 intel_crtc_for_pipe(display, hsw_workaround_pipe); 1720 1721 intel_crtc_wait_for_next_vblank(wa_crtc); 1722 intel_crtc_wait_for_next_vblank(wa_crtc); 1723 } 1724 } 1725 } 1726 1727 static void ilk_crtc_disable(struct intel_atomic_state *state, 1728 struct intel_crtc *crtc) 1729 { 1730 struct intel_display *display = to_intel_display(crtc); 1731 const struct intel_crtc_state *old_crtc_state = 1732 intel_atomic_get_old_crtc_state(state, crtc); 1733 enum pipe pipe = crtc->pipe; 1734 1735 /* 1736 * Sometimes spurious CPU pipe underruns happen when the 1737 * pipe is already disabled, but FDI RX/TX is still enabled. 1738 * Happens at least with VGA+HDMI cloning. Suppress them. 1739 */ 1740 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 1741 intel_set_pch_fifo_underrun_reporting(display, pipe, false); 1742 1743 intel_encoders_disable(state, crtc); 1744 1745 intel_crtc_vblank_off(old_crtc_state); 1746 1747 intel_disable_transcoder(old_crtc_state); 1748 1749 ilk_pfit_disable(old_crtc_state); 1750 1751 if (old_crtc_state->has_pch_encoder) 1752 ilk_pch_disable(state, crtc); 1753 1754 intel_encoders_post_disable(state, crtc); 1755 1756 if (old_crtc_state->has_pch_encoder) 1757 ilk_pch_post_disable(state, crtc); 1758 1759 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 1760 intel_set_pch_fifo_underrun_reporting(display, pipe, true); 1761 } 1762 1763 static void hsw_crtc_disable(struct intel_atomic_state *state, 1764 struct intel_crtc *crtc) 1765 { 1766 struct intel_display *display = to_intel_display(state); 1767 const struct intel_crtc_state *old_crtc_state = 1768 intel_atomic_get_old_crtc_state(state, crtc); 1769 struct intel_crtc *pipe_crtc; 1770 int i; 1771 1772 /* 1773 * FIXME collapse everything to one hook. 1774 * Need care with mst->ddi interactions. 1775 */ 1776 intel_encoders_disable(state, crtc); 1777 intel_encoders_post_disable(state, crtc); 1778 1779 intel_dpll_disable(old_crtc_state); 1780 1781 intel_encoders_post_pll_disable(state, crtc); 1782 1783 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1784 const struct intel_crtc_state *old_pipe_crtc_state = 1785 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1786 1787 intel_dmc_disable_pipe(old_pipe_crtc_state); 1788 } 1789 } 1790 1791 /* Prefer intel_encoder_is_combo() */ 1792 bool intel_phy_is_combo(struct intel_display *display, enum phy phy) 1793 { 1794 if (phy == PHY_NONE) 1795 return false; 1796 else if (display->platform.alderlake_s) 1797 return phy <= PHY_E; 1798 else if (display->platform.dg1 || display->platform.rocketlake) 1799 return phy <= PHY_D; 1800 else if (display->platform.jasperlake || display->platform.elkhartlake) 1801 return phy <= PHY_C; 1802 else if (display->platform.alderlake_p || IS_DISPLAY_VER(display, 11, 12)) 1803 return phy <= PHY_B; 1804 else 1805 /* 1806 * DG2 outputs labelled as "combo PHY" in the bspec use 1807 * SNPS PHYs with completely different programming, 1808 * hence we always return false here. 1809 */ 1810 return false; 1811 } 1812 1813 /* Prefer intel_encoder_is_tc() */ 1814 bool intel_phy_is_tc(struct intel_display *display, enum phy phy) 1815 { 1816 /* 1817 * Discrete GPU phy's are not attached to FIA's to support TC 1818 * subsystem Legacy or non-legacy, and only support native DP/HDMI 1819 */ 1820 if (display->platform.dgfx) 1821 return false; 1822 1823 if (DISPLAY_VER(display) >= 13) 1824 return phy >= PHY_F && phy <= PHY_I; 1825 else if (display->platform.tigerlake) 1826 return phy >= PHY_D && phy <= PHY_I; 1827 else if (display->platform.icelake) 1828 return phy >= PHY_C && phy <= PHY_F; 1829 1830 return false; 1831 } 1832 1833 /* Prefer intel_encoder_is_snps() */ 1834 bool intel_phy_is_snps(struct intel_display *display, enum phy phy) 1835 { 1836 /* 1837 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port 1838 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). 1839 */ 1840 return display->platform.dg2 && phy > PHY_NONE && phy <= PHY_E; 1841 } 1842 1843 /* Prefer intel_encoder_to_phy() */ 1844 enum phy intel_port_to_phy(struct intel_display *display, enum port port) 1845 { 1846 if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) 1847 return PHY_D + port - PORT_D_XELPD; 1848 else if (DISPLAY_VER(display) >= 13 && port >= PORT_TC1) 1849 return PHY_F + port - PORT_TC1; 1850 else if (display->platform.alderlake_s && port >= PORT_TC1) 1851 return PHY_B + port - PORT_TC1; 1852 else if ((display->platform.dg1 || display->platform.rocketlake) && port >= PORT_TC1) 1853 return PHY_C + port - PORT_TC1; 1854 else if ((display->platform.jasperlake || display->platform.elkhartlake) && 1855 port == PORT_D) 1856 return PHY_A; 1857 1858 return PHY_A + port - PORT_A; 1859 } 1860 1861 /* Prefer intel_encoder_to_tc() */ 1862 /* 1863 * Return TC_PORT_1..I915_MAX_TC_PORTS for any TypeC DDI port. The function 1864 * can be also called for TypeC DDI ports not connected to a TypeC PHY such as 1865 * the PORT_TC1..4 ports on RKL/ADLS/BMG. 1866 */ 1867 enum tc_port intel_port_to_tc(struct intel_display *display, enum port port) 1868 { 1869 if (DISPLAY_VER(display) >= 12) 1870 return TC_PORT_1 + port - PORT_TC1; 1871 else 1872 return TC_PORT_1 + port - PORT_C; 1873 } 1874 1875 /* 1876 * Return TC_PORT_1..I915_MAX_TC_PORTS for TypeC DDI ports connected to a TypeC PHY. 1877 * Note that on RKL, ADLS, BMG the PORT_TC1..4 ports are connected to a non-TypeC 1878 * PHY, so on those platforms the function returns TC_PORT_NONE. 1879 */ 1880 enum tc_port intel_tc_phy_port_to_tc(struct intel_display *display, enum port port) 1881 { 1882 if (!intel_phy_is_tc(display, intel_port_to_phy(display, port))) 1883 return TC_PORT_NONE; 1884 1885 return intel_port_to_tc(display, port); 1886 } 1887 1888 enum phy intel_encoder_to_phy(struct intel_encoder *encoder) 1889 { 1890 struct intel_display *display = to_intel_display(encoder); 1891 1892 return intel_port_to_phy(display, encoder->port); 1893 } 1894 1895 bool intel_encoder_is_combo(struct intel_encoder *encoder) 1896 { 1897 struct intel_display *display = to_intel_display(encoder); 1898 1899 return intel_phy_is_combo(display, intel_encoder_to_phy(encoder)); 1900 } 1901 1902 bool intel_encoder_is_snps(struct intel_encoder *encoder) 1903 { 1904 struct intel_display *display = to_intel_display(encoder); 1905 1906 return intel_phy_is_snps(display, intel_encoder_to_phy(encoder)); 1907 } 1908 1909 bool intel_encoder_is_tc(struct intel_encoder *encoder) 1910 { 1911 struct intel_display *display = to_intel_display(encoder); 1912 1913 return intel_phy_is_tc(display, intel_encoder_to_phy(encoder)); 1914 } 1915 1916 enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder) 1917 { 1918 struct intel_display *display = to_intel_display(encoder); 1919 1920 return intel_tc_phy_port_to_tc(display, encoder->port); 1921 } 1922 1923 enum intel_display_power_domain 1924 intel_aux_power_domain(struct intel_digital_port *dig_port) 1925 { 1926 struct intel_display *display = to_intel_display(dig_port); 1927 1928 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 1929 return intel_display_power_tbt_aux_domain(display, dig_port->aux_ch); 1930 1931 return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch); 1932 } 1933 1934 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, 1935 struct intel_power_domain_mask *mask) 1936 { 1937 struct intel_display *display = to_intel_display(crtc_state); 1938 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1939 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1940 struct drm_encoder *encoder; 1941 enum pipe pipe = crtc->pipe; 1942 1943 bitmap_zero(mask->bits, POWER_DOMAIN_NUM); 1944 1945 if (!crtc_state->hw.active) 1946 return; 1947 1948 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); 1949 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); 1950 if (crtc_state->pch_pfit.enabled || 1951 crtc_state->pch_pfit.force_thru) 1952 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); 1953 1954 drm_for_each_encoder_mask(encoder, display->drm, 1955 crtc_state->uapi.encoder_mask) { 1956 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 1957 1958 set_bit(intel_encoder->power_domain, mask->bits); 1959 } 1960 1961 if (HAS_DDI(display) && crtc_state->has_audio) 1962 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); 1963 1964 if (crtc_state->intel_dpll) 1965 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); 1966 1967 if (crtc_state->dsc.compression_enable) 1968 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); 1969 } 1970 1971 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, 1972 struct intel_power_domain_mask *old_domains) 1973 { 1974 struct intel_display *display = to_intel_display(crtc_state); 1975 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1976 enum intel_display_power_domain domain; 1977 struct intel_power_domain_mask domains, new_domains; 1978 1979 get_crtc_power_domains(crtc_state, &domains); 1980 1981 bitmap_andnot(new_domains.bits, 1982 domains.bits, 1983 crtc->enabled_power_domains.mask.bits, 1984 POWER_DOMAIN_NUM); 1985 bitmap_andnot(old_domains->bits, 1986 crtc->enabled_power_domains.mask.bits, 1987 domains.bits, 1988 POWER_DOMAIN_NUM); 1989 1990 for_each_power_domain(domain, &new_domains) 1991 intel_display_power_get_in_set(display, 1992 &crtc->enabled_power_domains, 1993 domain); 1994 } 1995 1996 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, 1997 struct intel_power_domain_mask *domains) 1998 { 1999 struct intel_display *display = to_intel_display(crtc); 2000 2001 intel_display_power_put_mask_in_set(display, 2002 &crtc->enabled_power_domains, 2003 domains); 2004 } 2005 2006 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 2007 { 2008 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2009 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2010 2011 if (intel_crtc_has_dp_encoder(crtc_state)) { 2012 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 2013 &crtc_state->dp_m_n); 2014 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 2015 &crtc_state->dp_m2_n2); 2016 } 2017 2018 intel_set_transcoder_timings(crtc_state); 2019 2020 i9xx_set_pipeconf(crtc_state); 2021 } 2022 2023 static void valleyview_crtc_enable(struct intel_atomic_state *state, 2024 struct intel_crtc *crtc) 2025 { 2026 struct intel_display *display = to_intel_display(crtc); 2027 const struct intel_crtc_state *new_crtc_state = 2028 intel_atomic_get_new_crtc_state(state, crtc); 2029 enum pipe pipe = crtc->pipe; 2030 2031 if (drm_WARN_ON(display->drm, crtc->active)) 2032 return; 2033 2034 i9xx_configure_cpu_transcoder(new_crtc_state); 2035 2036 intel_set_pipe_src_size(new_crtc_state); 2037 2038 intel_de_write(display, VLV_PIPE_MSA_MISC(display, pipe), 0); 2039 2040 if (display->platform.cherryview && pipe == PIPE_B) { 2041 intel_de_write(display, CHV_BLEND(display, pipe), 2042 CHV_BLEND_LEGACY); 2043 intel_de_write(display, CHV_CANVAS(display, pipe), 0); 2044 } 2045 2046 crtc->active = true; 2047 2048 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 2049 2050 intel_encoders_pre_pll_enable(state, crtc); 2051 2052 if (display->platform.cherryview) 2053 chv_enable_pll(new_crtc_state); 2054 else 2055 vlv_enable_pll(new_crtc_state); 2056 2057 intel_encoders_pre_enable(state, crtc); 2058 2059 i9xx_pfit_enable(new_crtc_state); 2060 2061 intel_color_modeset(new_crtc_state); 2062 2063 intel_initial_watermarks(state, crtc); 2064 intel_enable_transcoder(new_crtc_state); 2065 2066 intel_crtc_vblank_on(new_crtc_state); 2067 2068 intel_encoders_enable(state, crtc); 2069 } 2070 2071 static void i9xx_crtc_enable(struct intel_atomic_state *state, 2072 struct intel_crtc *crtc) 2073 { 2074 struct intel_display *display = to_intel_display(crtc); 2075 const struct intel_crtc_state *new_crtc_state = 2076 intel_atomic_get_new_crtc_state(state, crtc); 2077 enum pipe pipe = crtc->pipe; 2078 2079 if (drm_WARN_ON(display->drm, crtc->active)) 2080 return; 2081 2082 i9xx_configure_cpu_transcoder(new_crtc_state); 2083 2084 intel_set_pipe_src_size(new_crtc_state); 2085 2086 crtc->active = true; 2087 2088 if (DISPLAY_VER(display) != 2) 2089 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 2090 2091 intel_encoders_pre_enable(state, crtc); 2092 2093 i9xx_enable_pll(new_crtc_state); 2094 2095 i9xx_pfit_enable(new_crtc_state); 2096 2097 intel_color_modeset(new_crtc_state); 2098 2099 if (!intel_initial_watermarks(state, crtc)) 2100 intel_update_watermarks(display); 2101 intel_enable_transcoder(new_crtc_state); 2102 2103 intel_crtc_vblank_on(new_crtc_state); 2104 2105 intel_encoders_enable(state, crtc); 2106 2107 /* prevents spurious underruns */ 2108 if (DISPLAY_VER(display) == 2) 2109 intel_crtc_wait_for_next_vblank(crtc); 2110 } 2111 2112 static void i9xx_crtc_disable(struct intel_atomic_state *state, 2113 struct intel_crtc *crtc) 2114 { 2115 struct intel_display *display = to_intel_display(state); 2116 struct intel_crtc_state *old_crtc_state = 2117 intel_atomic_get_old_crtc_state(state, crtc); 2118 enum pipe pipe = crtc->pipe; 2119 2120 /* 2121 * On gen2 planes are double buffered but the pipe isn't, so we must 2122 * wait for planes to fully turn off before disabling the pipe. 2123 */ 2124 if (DISPLAY_VER(display) == 2) 2125 intel_crtc_wait_for_next_vblank(crtc); 2126 2127 intel_encoders_disable(state, crtc); 2128 2129 intel_crtc_vblank_off(old_crtc_state); 2130 2131 intel_disable_transcoder(old_crtc_state); 2132 2133 i9xx_pfit_disable(old_crtc_state); 2134 2135 intel_encoders_post_disable(state, crtc); 2136 2137 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2138 if (display->platform.cherryview) 2139 chv_disable_pll(display, pipe); 2140 else if (display->platform.valleyview) 2141 vlv_disable_pll(display, pipe); 2142 else 2143 i9xx_disable_pll(old_crtc_state); 2144 } 2145 2146 intel_encoders_post_pll_disable(state, crtc); 2147 2148 if (DISPLAY_VER(display) != 2) 2149 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 2150 2151 if (!display->funcs.wm->initial_watermarks) 2152 intel_update_watermarks(display); 2153 2154 /* clock the pipe down to 640x480@60 to potentially save power */ 2155 if (display->platform.i830) 2156 i830_enable_pipe(display, pipe); 2157 } 2158 2159 void intel_encoder_destroy(struct drm_encoder *encoder) 2160 { 2161 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2162 2163 drm_encoder_cleanup(encoder); 2164 kfree(intel_encoder); 2165 } 2166 2167 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2168 { 2169 struct intel_display *display = to_intel_display(crtc); 2170 2171 /* GDG double wide on either pipe, otherwise pipe A only */ 2172 return HAS_DOUBLE_WIDE(display) && 2173 (crtc->pipe == PIPE_A || display->platform.i915g); 2174 } 2175 2176 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2177 { 2178 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 2179 struct drm_rect src; 2180 2181 /* 2182 * We only use IF-ID interlacing. If we ever use 2183 * PF-ID we'll need to adjust the pixel_rate here. 2184 */ 2185 2186 if (!crtc_state->pch_pfit.enabled) 2187 return pixel_rate; 2188 2189 drm_rect_init(&src, 0, 0, 2190 drm_rect_width(&crtc_state->pipe_src) << 16, 2191 drm_rect_height(&crtc_state->pipe_src) << 16); 2192 2193 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 2194 pixel_rate); 2195 } 2196 2197 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 2198 const struct drm_display_mode *timings) 2199 { 2200 mode->hdisplay = timings->crtc_hdisplay; 2201 mode->htotal = timings->crtc_htotal; 2202 mode->hsync_start = timings->crtc_hsync_start; 2203 mode->hsync_end = timings->crtc_hsync_end; 2204 2205 mode->vdisplay = timings->crtc_vdisplay; 2206 mode->vtotal = timings->crtc_vtotal; 2207 mode->vsync_start = timings->crtc_vsync_start; 2208 mode->vsync_end = timings->crtc_vsync_end; 2209 2210 mode->flags = timings->flags; 2211 mode->type = DRM_MODE_TYPE_DRIVER; 2212 2213 mode->clock = timings->crtc_clock; 2214 2215 drm_mode_set_name(mode); 2216 } 2217 2218 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2219 { 2220 struct intel_display *display = to_intel_display(crtc_state); 2221 2222 if (HAS_GMCH(display)) 2223 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2224 crtc_state->pixel_rate = 2225 crtc_state->hw.pipe_mode.crtc_clock; 2226 else 2227 crtc_state->pixel_rate = 2228 ilk_pipe_pixel_rate(crtc_state); 2229 } 2230 2231 static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state, 2232 struct drm_display_mode *mode) 2233 { 2234 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2235 2236 if (num_pipes == 1) 2237 return; 2238 2239 mode->crtc_clock /= num_pipes; 2240 mode->crtc_hdisplay /= num_pipes; 2241 mode->crtc_hblank_start /= num_pipes; 2242 mode->crtc_hblank_end /= num_pipes; 2243 mode->crtc_hsync_start /= num_pipes; 2244 mode->crtc_hsync_end /= num_pipes; 2245 mode->crtc_htotal /= num_pipes; 2246 } 2247 2248 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, 2249 struct drm_display_mode *mode) 2250 { 2251 int overlap = crtc_state->splitter.pixel_overlap; 2252 int n = crtc_state->splitter.link_count; 2253 2254 if (!crtc_state->splitter.enable) 2255 return; 2256 2257 /* 2258 * eDP MSO uses segment timings from EDID for transcoder 2259 * timings, but full mode for everything else. 2260 * 2261 * h_full = (h_segment - pixel_overlap) * link_count 2262 */ 2263 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; 2264 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; 2265 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; 2266 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; 2267 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; 2268 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; 2269 mode->crtc_clock *= n; 2270 } 2271 2272 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 2273 { 2274 struct drm_display_mode *mode = &crtc_state->hw.mode; 2275 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2276 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2277 2278 /* 2279 * Start with the adjusted_mode crtc timings, which 2280 * have been filled with the transcoder timings. 2281 */ 2282 drm_mode_copy(pipe_mode, adjusted_mode); 2283 2284 /* Expand MSO per-segment transcoder timings to full */ 2285 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2286 2287 /* 2288 * We want the full numbers in adjusted_mode normal timings, 2289 * adjusted_mode crtc timings are left with the raw transcoder 2290 * timings. 2291 */ 2292 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 2293 2294 /* Populate the "user" mode with full numbers */ 2295 drm_mode_copy(mode, pipe_mode); 2296 intel_mode_from_crtc_timings(mode, mode); 2297 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * 2298 intel_crtc_num_joined_pipes(crtc_state); 2299 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); 2300 2301 /* Derive per-pipe timings in case joiner is used */ 2302 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2303 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2304 2305 intel_crtc_compute_pixel_rate(crtc_state); 2306 } 2307 2308 void intel_encoder_get_config(struct intel_encoder *encoder, 2309 struct intel_crtc_state *crtc_state) 2310 { 2311 encoder->get_config(encoder, crtc_state); 2312 2313 intel_crtc_readout_derived_state(crtc_state); 2314 } 2315 2316 static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state) 2317 { 2318 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2319 int width, height; 2320 2321 if (num_pipes == 1) 2322 return; 2323 2324 width = drm_rect_width(&crtc_state->pipe_src); 2325 height = drm_rect_height(&crtc_state->pipe_src); 2326 2327 drm_rect_init(&crtc_state->pipe_src, 0, 0, 2328 width / num_pipes, height); 2329 } 2330 2331 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) 2332 { 2333 struct intel_display *display = to_intel_display(crtc_state); 2334 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2335 2336 intel_joiner_compute_pipe_src(crtc_state); 2337 2338 /* 2339 * Pipe horizontal size must be even in: 2340 * - DVO ganged mode 2341 * - LVDS dual channel mode 2342 * - Double wide pipe 2343 */ 2344 if (drm_rect_width(&crtc_state->pipe_src) & 1) { 2345 if (crtc_state->double_wide) { 2346 drm_dbg_kms(display->drm, 2347 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", 2348 crtc->base.base.id, crtc->base.name); 2349 return -EINVAL; 2350 } 2351 2352 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 2353 intel_is_dual_link_lvds(display)) { 2354 drm_dbg_kms(display->drm, 2355 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", 2356 crtc->base.base.id, crtc->base.name); 2357 return -EINVAL; 2358 } 2359 } 2360 2361 return 0; 2362 } 2363 2364 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) 2365 { 2366 struct intel_display *display = to_intel_display(crtc_state); 2367 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2368 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2369 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2370 int clock_limit = display->cdclk.max_dotclk_freq; 2371 2372 /* 2373 * Start with the adjusted_mode crtc timings, which 2374 * have been filled with the transcoder timings. 2375 */ 2376 drm_mode_copy(pipe_mode, adjusted_mode); 2377 2378 /* Expand MSO per-segment transcoder timings to full */ 2379 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2380 2381 /* Derive per-pipe timings in case joiner is used */ 2382 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2383 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2384 2385 if (DISPLAY_VER(display) < 4) { 2386 clock_limit = display->cdclk.max_cdclk_freq * 9 / 10; 2387 2388 /* 2389 * Enable double wide mode when the dot clock 2390 * is > 90% of the (display) core speed. 2391 */ 2392 if (intel_crtc_supports_double_wide(crtc) && 2393 pipe_mode->crtc_clock > clock_limit) { 2394 clock_limit = display->cdclk.max_dotclk_freq; 2395 crtc_state->double_wide = true; 2396 } 2397 } 2398 2399 if (pipe_mode->crtc_clock > clock_limit) { 2400 drm_dbg_kms(display->drm, 2401 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 2402 crtc->base.base.id, crtc->base.name, 2403 pipe_mode->crtc_clock, clock_limit, 2404 str_yes_no(crtc_state->double_wide)); 2405 return -EINVAL; 2406 } 2407 2408 return 0; 2409 } 2410 2411 static int intel_crtc_set_context_latency(struct intel_crtc_state *crtc_state) 2412 { 2413 struct intel_display *display = to_intel_display(crtc_state); 2414 int set_context_latency = 0; 2415 2416 if (!HAS_DSB(display)) 2417 return 0; 2418 2419 set_context_latency = max(set_context_latency, 2420 intel_psr_min_set_context_latency(crtc_state)); 2421 2422 return set_context_latency; 2423 } 2424 2425 static int intel_crtc_compute_set_context_latency(struct intel_atomic_state *state, 2426 struct intel_crtc *crtc) 2427 { 2428 struct intel_display *display = to_intel_display(state); 2429 struct intel_crtc_state *crtc_state = 2430 intel_atomic_get_new_crtc_state(state, crtc); 2431 struct drm_display_mode *adjusted_mode = 2432 &crtc_state->hw.adjusted_mode; 2433 int set_context_latency, max_vblank_delay; 2434 2435 set_context_latency = intel_crtc_set_context_latency(crtc_state); 2436 2437 max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1; 2438 2439 if (set_context_latency > max_vblank_delay) { 2440 drm_dbg_kms(display->drm, "[CRTC:%d:%s] set context latency (%d) exceeds max (%d)\n", 2441 crtc->base.base.id, crtc->base.name, 2442 set_context_latency, 2443 max_vblank_delay); 2444 return -EINVAL; 2445 } 2446 2447 crtc_state->set_context_latency = set_context_latency; 2448 adjusted_mode->crtc_vblank_start += set_context_latency; 2449 2450 return 0; 2451 } 2452 2453 static int intel_crtc_compute_config(struct intel_atomic_state *state, 2454 struct intel_crtc *crtc) 2455 { 2456 struct intel_crtc_state *crtc_state = 2457 intel_atomic_get_new_crtc_state(state, crtc); 2458 int ret; 2459 2460 ret = intel_dpll_crtc_compute_clock(state, crtc); 2461 if (ret) 2462 return ret; 2463 2464 ret = intel_crtc_compute_set_context_latency(state, crtc); 2465 if (ret) 2466 return ret; 2467 2468 ret = intel_crtc_compute_pipe_src(crtc_state); 2469 if (ret) 2470 return ret; 2471 2472 ret = intel_crtc_compute_pipe_mode(crtc_state); 2473 if (ret) 2474 return ret; 2475 2476 intel_crtc_compute_pixel_rate(crtc_state); 2477 2478 if (crtc_state->has_pch_encoder) 2479 return ilk_fdi_compute_config(crtc, crtc_state); 2480 2481 intel_vrr_compute_guardband(crtc_state); 2482 2483 return 0; 2484 } 2485 2486 static void 2487 intel_reduce_m_n_ratio(u32 *num, u32 *den) 2488 { 2489 while (*num > DATA_LINK_M_N_MASK || 2490 *den > DATA_LINK_M_N_MASK) { 2491 *num >>= 1; 2492 *den >>= 1; 2493 } 2494 } 2495 2496 static void compute_m_n(u32 *ret_m, u32 *ret_n, 2497 u32 m, u32 n, u32 constant_n) 2498 { 2499 if (constant_n) 2500 *ret_n = constant_n; 2501 else 2502 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2503 2504 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2505 intel_reduce_m_n_ratio(ret_m, ret_n); 2506 } 2507 2508 void 2509 intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, 2510 int pixel_clock, int link_clock, 2511 int bw_overhead, 2512 struct intel_link_m_n *m_n) 2513 { 2514 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); 2515 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, 2516 bw_overhead); 2517 u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); 2518 2519 /* 2520 * Windows/BIOS uses fixed M/N values always. Follow suit. 2521 * 2522 * Also several DP dongles in particular seem to be fussy 2523 * about too large link M/N values. Presumably the 20bit 2524 * value used by Windows/BIOS is acceptable to everyone. 2525 */ 2526 m_n->tu = 64; 2527 compute_m_n(&m_n->data_m, &m_n->data_n, 2528 data_m, data_n, 2529 0x8000000); 2530 2531 compute_m_n(&m_n->link_m, &m_n->link_n, 2532 pixel_clock, link_symbol_clock, 2533 0x80000); 2534 } 2535 2536 void intel_panel_sanitize_ssc(struct intel_display *display) 2537 { 2538 /* 2539 * There may be no VBT; and if the BIOS enabled SSC we can 2540 * just keep using it to avoid unnecessary flicker. Whereas if the 2541 * BIOS isn't using it, don't assume it will work even if the VBT 2542 * indicates as much. 2543 */ 2544 if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) { 2545 bool bios_lvds_use_ssc = intel_de_read(display, 2546 PCH_DREF_CONTROL) & 2547 DREF_SSC1_ENABLE; 2548 2549 if (display->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2550 drm_dbg_kms(display->drm, 2551 "SSC %s by BIOS, overriding VBT which says %s\n", 2552 str_enabled_disabled(bios_lvds_use_ssc), 2553 str_enabled_disabled(display->vbt.lvds_use_ssc)); 2554 display->vbt.lvds_use_ssc = bios_lvds_use_ssc; 2555 } 2556 } 2557 } 2558 2559 void intel_zero_m_n(struct intel_link_m_n *m_n) 2560 { 2561 /* corresponds to 0 register value */ 2562 memset(m_n, 0, sizeof(*m_n)); 2563 m_n->tu = 1; 2564 } 2565 2566 void intel_set_m_n(struct intel_display *display, 2567 const struct intel_link_m_n *m_n, 2568 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 2569 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 2570 { 2571 intel_de_write(display, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 2572 intel_de_write(display, data_n_reg, m_n->data_n); 2573 intel_de_write(display, link_m_reg, m_n->link_m); 2574 /* 2575 * On BDW+ writing LINK_N arms the double buffered update 2576 * of all the M/N registers, so it must be written last. 2577 */ 2578 intel_de_write(display, link_n_reg, m_n->link_n); 2579 } 2580 2581 bool intel_cpu_transcoder_has_m2_n2(struct intel_display *display, 2582 enum transcoder transcoder) 2583 { 2584 if (display->platform.haswell) 2585 return transcoder == TRANSCODER_EDP; 2586 2587 return IS_DISPLAY_VER(display, 5, 7) || display->platform.cherryview; 2588 } 2589 2590 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 2591 enum transcoder transcoder, 2592 const struct intel_link_m_n *m_n) 2593 { 2594 struct intel_display *display = to_intel_display(crtc); 2595 enum pipe pipe = crtc->pipe; 2596 2597 if (DISPLAY_VER(display) >= 5) 2598 intel_set_m_n(display, m_n, 2599 PIPE_DATA_M1(display, transcoder), 2600 PIPE_DATA_N1(display, transcoder), 2601 PIPE_LINK_M1(display, transcoder), 2602 PIPE_LINK_N1(display, transcoder)); 2603 else 2604 intel_set_m_n(display, m_n, 2605 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 2606 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 2607 } 2608 2609 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 2610 enum transcoder transcoder, 2611 const struct intel_link_m_n *m_n) 2612 { 2613 struct intel_display *display = to_intel_display(crtc); 2614 2615 if (!intel_cpu_transcoder_has_m2_n2(display, transcoder)) 2616 return; 2617 2618 intel_set_m_n(display, m_n, 2619 PIPE_DATA_M2(display, transcoder), 2620 PIPE_DATA_N2(display, transcoder), 2621 PIPE_LINK_M2(display, transcoder), 2622 PIPE_LINK_N2(display, transcoder)); 2623 } 2624 2625 static bool 2626 transcoder_has_vrr(const struct intel_crtc_state *crtc_state) 2627 { 2628 struct intel_display *display = to_intel_display(crtc_state); 2629 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2630 2631 return HAS_VRR(display) && !transcoder_is_dsi(cpu_transcoder); 2632 } 2633 2634 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 2635 { 2636 struct intel_display *display = to_intel_display(crtc_state); 2637 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2638 enum pipe pipe = crtc->pipe; 2639 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2640 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2641 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2642 int vsyncshift = 0; 2643 2644 drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)); 2645 2646 /* We need to be careful not to changed the adjusted mode, for otherwise 2647 * the hw state checker will get angry at the mismatch. */ 2648 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2649 crtc_vtotal = adjusted_mode->crtc_vtotal; 2650 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2651 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2652 2653 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2654 /* the chip adds 2 halflines automatically */ 2655 crtc_vtotal -= 1; 2656 crtc_vblank_end -= 1; 2657 2658 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2659 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 2660 else 2661 vsyncshift = adjusted_mode->crtc_hsync_start - 2662 adjusted_mode->crtc_htotal / 2; 2663 if (vsyncshift < 0) 2664 vsyncshift += adjusted_mode->crtc_htotal; 2665 } 2666 2667 /* 2668 * VBLANK_START no longer works on ADL+, instead we must use 2669 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. 2670 */ 2671 if (DISPLAY_VER(display) >= 13) { 2672 intel_de_write(display, 2673 TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder), 2674 crtc_state->set_context_latency); 2675 2676 /* 2677 * VBLANK_START not used by hw, just clear it 2678 * to make it stand out in register dumps. 2679 */ 2680 crtc_vblank_start = 1; 2681 } else if (DISPLAY_VER(display) == 12) { 2682 /* VBLANK_START - VACTIVE defines SCL on TGL */ 2683 crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency; 2684 } 2685 2686 if (DISPLAY_VER(display) >= 4 && DISPLAY_VER(display) < 35) 2687 intel_de_write(display, 2688 TRANS_VSYNCSHIFT(display, cpu_transcoder), 2689 vsyncshift); 2690 2691 intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder), 2692 HACTIVE(adjusted_mode->crtc_hdisplay - 1) | 2693 HTOTAL(adjusted_mode->crtc_htotal - 1)); 2694 intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder), 2695 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | 2696 HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); 2697 intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder), 2698 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | 2699 HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); 2700 2701 /* 2702 * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal 2703 * bits are not required. Since the support for these bits is going to 2704 * be deprecated in upcoming platforms, avoid writing these bits for the 2705 * platforms that do not use legacy Timing Generator. 2706 */ 2707 if (intel_vrr_always_use_vrr_tg(display)) 2708 crtc_vtotal = 1; 2709 2710 intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), 2711 VACTIVE(crtc_vdisplay - 1) | 2712 VTOTAL(crtc_vtotal - 1)); 2713 intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), 2714 VBLANK_START(crtc_vblank_start - 1) | 2715 VBLANK_END(crtc_vblank_end - 1)); 2716 intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder), 2717 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | 2718 VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); 2719 2720 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2721 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 2722 * documented on the DDI_FUNC_CTL register description, EDP Input Select 2723 * bits. */ 2724 if (display->platform.haswell && cpu_transcoder == TRANSCODER_EDP && 2725 (pipe == PIPE_B || pipe == PIPE_C)) 2726 intel_de_write(display, TRANS_VTOTAL(display, pipe), 2727 VACTIVE(crtc_vdisplay - 1) | 2728 VTOTAL(crtc_vtotal - 1)); 2729 2730 if (DISPLAY_VER(display) >= 30) { 2731 /* 2732 * Address issues for resolutions with high refresh rate that 2733 * have small Hblank, specifically where Hblank is smaller than 2734 * one MTP. Simulations indicate this will address the 2735 * jitter issues that currently causes BS to be immediately 2736 * followed by BE which DPRX devices are unable to handle. 2737 * https://groups.vesa.org/wg/DP/document/20494 2738 */ 2739 intel_de_write(display, DP_MIN_HBLANK_CTL(cpu_transcoder), 2740 crtc_state->min_hblank); 2741 } 2742 } 2743 2744 static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state) 2745 { 2746 struct intel_display *display = to_intel_display(crtc_state); 2747 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2748 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2749 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2750 2751 drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)); 2752 2753 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2754 crtc_vtotal = adjusted_mode->crtc_vtotal; 2755 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2756 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2757 2758 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2759 /* the chip adds 2 halflines automatically */ 2760 crtc_vtotal -= 1; 2761 crtc_vblank_end -= 1; 2762 } 2763 2764 if (DISPLAY_VER(display) >= 13) { 2765 intel_de_write(display, 2766 TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder), 2767 crtc_state->set_context_latency); 2768 2769 /* 2770 * VBLANK_START not used by hw, just clear it 2771 * to make it stand out in register dumps. 2772 */ 2773 crtc_vblank_start = 1; 2774 } else if (DISPLAY_VER(display) == 12) { 2775 /* VBLANK_START - VACTIVE defines SCL on TGL */ 2776 crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency; 2777 } 2778 2779 /* 2780 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode. 2781 * But let's write it anyway to keep the state checker happy. 2782 */ 2783 intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), 2784 VBLANK_START(crtc_vblank_start - 1) | 2785 VBLANK_END(crtc_vblank_end - 1)); 2786 /* 2787 * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal 2788 * bits are not required. Since the support for these bits is going to 2789 * be deprecated in upcoming platforms, avoid writing these bits for the 2790 * platforms that do not use legacy Timing Generator. 2791 */ 2792 if (intel_vrr_always_use_vrr_tg(display)) 2793 crtc_vtotal = 1; 2794 2795 /* 2796 * The double buffer latch point for TRANS_VTOTAL 2797 * is the transcoder's undelayed vblank. 2798 */ 2799 intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), 2800 VACTIVE(crtc_vdisplay - 1) | 2801 VTOTAL(crtc_vtotal - 1)); 2802 2803 intel_vrr_set_fixed_rr_timings(crtc_state); 2804 intel_vrr_transcoder_enable(crtc_state); 2805 } 2806 2807 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 2808 { 2809 struct intel_display *display = to_intel_display(crtc_state); 2810 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2811 int width = drm_rect_width(&crtc_state->pipe_src); 2812 int height = drm_rect_height(&crtc_state->pipe_src); 2813 enum pipe pipe = crtc->pipe; 2814 2815 /* pipesrc controls the size that is scaled from, which should 2816 * always be the user's requested size. 2817 */ 2818 intel_de_write(display, PIPESRC(display, pipe), 2819 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2820 } 2821 2822 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 2823 { 2824 struct intel_display *display = to_intel_display(crtc_state); 2825 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2826 2827 if (DISPLAY_VER(display) == 2 || DISPLAY_VER(display) >= 35) 2828 return false; 2829 2830 if (DISPLAY_VER(display) >= 9 || 2831 display->platform.broadwell || display->platform.haswell) 2832 return intel_de_read(display, 2833 TRANSCONF(display, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; 2834 else 2835 return intel_de_read(display, 2836 TRANSCONF(display, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; 2837 } 2838 2839 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 2840 struct intel_crtc_state *pipe_config) 2841 { 2842 struct intel_display *display = to_intel_display(crtc); 2843 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2844 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2845 u32 tmp; 2846 2847 tmp = intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder)); 2848 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; 2849 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; 2850 2851 if (!transcoder_is_dsi(cpu_transcoder)) { 2852 tmp = intel_de_read(display, 2853 TRANS_HBLANK(display, cpu_transcoder)); 2854 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; 2855 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; 2856 } 2857 2858 tmp = intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder)); 2859 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; 2860 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; 2861 2862 tmp = intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder)); 2863 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; 2864 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; 2865 2866 /* FIXME TGL+ DSI transcoders have this! */ 2867 if (!transcoder_is_dsi(cpu_transcoder)) { 2868 tmp = intel_de_read(display, 2869 TRANS_VBLANK(display, cpu_transcoder)); 2870 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; 2871 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; 2872 } 2873 tmp = intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder)); 2874 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; 2875 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; 2876 2877 if (intel_pipe_is_interlaced(pipe_config)) { 2878 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; 2879 adjusted_mode->crtc_vtotal += 1; 2880 adjusted_mode->crtc_vblank_end += 1; 2881 } 2882 2883 if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder)) { 2884 pipe_config->set_context_latency = 2885 intel_de_read(display, 2886 TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder)); 2887 adjusted_mode->crtc_vblank_start = 2888 adjusted_mode->crtc_vdisplay + 2889 pipe_config->set_context_latency; 2890 } else if (DISPLAY_VER(display) == 12) { 2891 /* 2892 * TGL doesn't have a dedicated register for SCL. 2893 * Instead, the hardware derives SCL from the difference between 2894 * TRANS_VBLANK.vblank_start and TRANS_VTOTAL.vactive. 2895 * To reflect the HW behaviour, readout the value for SCL as 2896 * Vblank start - Vactive. 2897 */ 2898 pipe_config->set_context_latency = 2899 adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay; 2900 } 2901 2902 if (DISPLAY_VER(display) >= 30) 2903 pipe_config->min_hblank = intel_de_read(display, 2904 DP_MIN_HBLANK_CTL(cpu_transcoder)); 2905 } 2906 2907 static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) 2908 { 2909 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2910 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2911 enum pipe primary_pipe, pipe = crtc->pipe; 2912 int width; 2913 2914 if (num_pipes == 1) 2915 return; 2916 2917 primary_pipe = joiner_primary_pipe(crtc_state); 2918 width = drm_rect_width(&crtc_state->pipe_src); 2919 2920 drm_rect_translate_to(&crtc_state->pipe_src, 2921 (pipe - primary_pipe) * width, 0); 2922 } 2923 2924 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 2925 struct intel_crtc_state *pipe_config) 2926 { 2927 struct intel_display *display = to_intel_display(crtc); 2928 u32 tmp; 2929 2930 tmp = intel_de_read(display, PIPESRC(display, crtc->pipe)); 2931 2932 drm_rect_init(&pipe_config->pipe_src, 0, 0, 2933 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, 2934 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); 2935 2936 intel_joiner_adjust_pipe_src(pipe_config); 2937 } 2938 2939 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 2940 { 2941 struct intel_display *display = to_intel_display(crtc_state); 2942 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2943 u32 val = 0; 2944 2945 /* 2946 * - We keep both pipes enabled on 830 2947 * - During modeset the pipe is still disabled and must remain so 2948 * - During fastset the pipe is already enabled and must remain so 2949 */ 2950 if (display->platform.i830 || !intel_crtc_needs_modeset(crtc_state)) 2951 val |= TRANSCONF_ENABLE; 2952 2953 if (crtc_state->double_wide) 2954 val |= TRANSCONF_DOUBLE_WIDE; 2955 2956 /* only g4x and later have fancy bpc/dither controls */ 2957 if (display->platform.g4x || display->platform.valleyview || 2958 display->platform.cherryview) { 2959 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 2960 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 2961 val |= TRANSCONF_DITHER_EN | 2962 TRANSCONF_DITHER_TYPE_SP; 2963 2964 switch (crtc_state->pipe_bpp) { 2965 default: 2966 /* Case prevented by intel_choose_pipe_bpp_dither. */ 2967 MISSING_CASE(crtc_state->pipe_bpp); 2968 fallthrough; 2969 case 18: 2970 val |= TRANSCONF_BPC_6; 2971 break; 2972 case 24: 2973 val |= TRANSCONF_BPC_8; 2974 break; 2975 case 30: 2976 val |= TRANSCONF_BPC_10; 2977 break; 2978 } 2979 } 2980 2981 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 2982 if (DISPLAY_VER(display) < 4 || 2983 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2984 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; 2985 else 2986 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; 2987 } else { 2988 val |= TRANSCONF_INTERLACE_PROGRESSIVE; 2989 } 2990 2991 if ((display->platform.valleyview || display->platform.cherryview) && 2992 crtc_state->limited_color_range) 2993 val |= TRANSCONF_COLOR_RANGE_SELECT; 2994 2995 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 2996 2997 if (crtc_state->wgc_enable) 2998 val |= TRANSCONF_WGC_ENABLE; 2999 3000 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3001 3002 intel_de_write(display, TRANSCONF(display, cpu_transcoder), val); 3003 intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder)); 3004 } 3005 3006 static enum intel_output_format 3007 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) 3008 { 3009 struct intel_display *display = to_intel_display(crtc); 3010 u32 tmp; 3011 3012 tmp = intel_de_read(display, PIPE_MISC(crtc->pipe)); 3013 3014 if (tmp & PIPE_MISC_YUV420_ENABLE) { 3015 /* 3016 * We support 4:2:0 in full blend mode only. 3017 * For xe3_lpd+ this is implied in YUV420 Enable bit. 3018 * Ensure the same for prior platforms in YUV420 Mode bit. 3019 */ 3020 if (DISPLAY_VER(display) < 30) 3021 drm_WARN_ON(display->drm, 3022 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); 3023 3024 return INTEL_OUTPUT_FORMAT_YCBCR420; 3025 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { 3026 return INTEL_OUTPUT_FORMAT_YCBCR444; 3027 } else { 3028 return INTEL_OUTPUT_FORMAT_RGB; 3029 } 3030 } 3031 3032 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 3033 struct intel_crtc_state *pipe_config) 3034 { 3035 struct intel_display *display = to_intel_display(crtc); 3036 enum intel_display_power_domain power_domain; 3037 enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe; 3038 intel_wakeref_t wakeref; 3039 bool ret = false; 3040 u32 tmp; 3041 3042 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3043 wakeref = intel_display_power_get_if_enabled(display, power_domain); 3044 if (!wakeref) 3045 return false; 3046 3047 tmp = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); 3048 if (!(tmp & TRANSCONF_ENABLE)) 3049 goto out; 3050 3051 pipe_config->cpu_transcoder = cpu_transcoder; 3052 3053 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3054 pipe_config->sink_format = pipe_config->output_format; 3055 3056 if (display->platform.g4x || display->platform.valleyview || 3057 display->platform.cherryview) { 3058 switch (tmp & TRANSCONF_BPC_MASK) { 3059 case TRANSCONF_BPC_6: 3060 pipe_config->pipe_bpp = 18; 3061 break; 3062 case TRANSCONF_BPC_8: 3063 pipe_config->pipe_bpp = 24; 3064 break; 3065 case TRANSCONF_BPC_10: 3066 pipe_config->pipe_bpp = 30; 3067 break; 3068 default: 3069 MISSING_CASE(tmp); 3070 break; 3071 } 3072 } 3073 3074 if ((display->platform.valleyview || display->platform.cherryview) && 3075 (tmp & TRANSCONF_COLOR_RANGE_SELECT)) 3076 pipe_config->limited_color_range = true; 3077 3078 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); 3079 3080 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3081 3082 if ((display->platform.valleyview || display->platform.cherryview) && 3083 (tmp & TRANSCONF_WGC_ENABLE)) 3084 pipe_config->wgc_enable = true; 3085 3086 intel_color_get_config(pipe_config); 3087 3088 if (HAS_DOUBLE_WIDE(display)) 3089 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; 3090 3091 intel_get_transcoder_timings(crtc, pipe_config); 3092 intel_get_pipe_src_size(crtc, pipe_config); 3093 3094 i9xx_pfit_get_config(pipe_config); 3095 3096 i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state); 3097 3098 if (DISPLAY_VER(display) >= 4) { 3099 tmp = pipe_config->dpll_hw_state.i9xx.dpll_md; 3100 pipe_config->pixel_multiplier = 3101 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 3102 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 3103 } else if (display->platform.i945g || display->platform.i945gm || 3104 display->platform.g33 || display->platform.pineview) { 3105 tmp = pipe_config->dpll_hw_state.i9xx.dpll; 3106 pipe_config->pixel_multiplier = 3107 ((tmp & SDVO_MULTIPLIER_MASK) 3108 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 3109 } else { 3110 /* Note that on i915G/GM the pixel multiplier is in the sdvo 3111 * port and will be fixed up in the encoder->get_config 3112 * function. */ 3113 pipe_config->pixel_multiplier = 1; 3114 } 3115 3116 if (display->platform.cherryview) 3117 chv_crtc_clock_get(pipe_config); 3118 else if (display->platform.valleyview) 3119 vlv_crtc_clock_get(pipe_config); 3120 else 3121 i9xx_crtc_clock_get(pipe_config); 3122 3123 /* 3124 * Normally the dotclock is filled in by the encoder .get_config() 3125 * but in case the pipe is enabled w/o any ports we need a sane 3126 * default. 3127 */ 3128 pipe_config->hw.adjusted_mode.crtc_clock = 3129 pipe_config->port_clock / pipe_config->pixel_multiplier; 3130 3131 ret = true; 3132 3133 out: 3134 intel_display_power_put(display, power_domain, wakeref); 3135 3136 return ret; 3137 } 3138 3139 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3140 { 3141 struct intel_display *display = to_intel_display(crtc_state); 3142 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3143 u32 val = 0; 3144 3145 /* 3146 * - During modeset the pipe is still disabled and must remain so 3147 * - During fastset the pipe is already enabled and must remain so 3148 */ 3149 if (!intel_crtc_needs_modeset(crtc_state)) 3150 val |= TRANSCONF_ENABLE; 3151 3152 switch (crtc_state->pipe_bpp) { 3153 default: 3154 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3155 MISSING_CASE(crtc_state->pipe_bpp); 3156 fallthrough; 3157 case 18: 3158 val |= TRANSCONF_BPC_6; 3159 break; 3160 case 24: 3161 val |= TRANSCONF_BPC_8; 3162 break; 3163 case 30: 3164 val |= TRANSCONF_BPC_10; 3165 break; 3166 case 36: 3167 val |= TRANSCONF_BPC_12; 3168 break; 3169 } 3170 3171 if (crtc_state->dither) 3172 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3173 3174 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3175 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3176 else 3177 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3178 3179 /* 3180 * This would end up with an odd purple hue over 3181 * the entire display. Make sure we don't do it. 3182 */ 3183 drm_WARN_ON(display->drm, crtc_state->limited_color_range && 3184 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3185 3186 if (crtc_state->limited_color_range && 3187 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3188 val |= TRANSCONF_COLOR_RANGE_SELECT; 3189 3190 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3191 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; 3192 3193 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3194 3195 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3196 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3197 3198 intel_de_write(display, TRANSCONF(display, cpu_transcoder), val); 3199 intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder)); 3200 } 3201 3202 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3203 { 3204 struct intel_display *display = to_intel_display(crtc_state); 3205 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3206 u32 val = 0; 3207 3208 /* 3209 * - During modeset the pipe is still disabled and must remain so 3210 * - During fastset the pipe is already enabled and must remain so 3211 */ 3212 if (!intel_crtc_needs_modeset(crtc_state)) 3213 val |= TRANSCONF_ENABLE; 3214 3215 if (display->platform.haswell && crtc_state->dither) 3216 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3217 3218 if (DISPLAY_VER(display) < 35) { 3219 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3220 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3221 else 3222 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3223 } 3224 3225 if (display->platform.haswell && 3226 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3227 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; 3228 3229 intel_de_write(display, TRANSCONF(display, cpu_transcoder), val); 3230 intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder)); 3231 } 3232 3233 static void bdw_set_pipe_misc(struct intel_dsb *dsb, 3234 const struct intel_crtc_state *crtc_state) 3235 { 3236 struct intel_display *display = to_intel_display(crtc_state); 3237 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3238 u32 val = 0; 3239 3240 switch (crtc_state->pipe_bpp) { 3241 case 18: 3242 val |= PIPE_MISC_BPC_6; 3243 break; 3244 case 24: 3245 val |= PIPE_MISC_BPC_8; 3246 break; 3247 case 30: 3248 val |= PIPE_MISC_BPC_10; 3249 break; 3250 case 36: 3251 /* Port output 12BPC defined for ADLP+ */ 3252 if (DISPLAY_VER(display) >= 13) 3253 val |= PIPE_MISC_BPC_12_ADLP; 3254 break; 3255 default: 3256 MISSING_CASE(crtc_state->pipe_bpp); 3257 break; 3258 } 3259 3260 if (crtc_state->dither) 3261 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; 3262 3263 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3264 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3265 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; 3266 3267 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3268 val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE : 3269 PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND; 3270 3271 if (DISPLAY_VER(display) >= 11 && is_hdr_mode(crtc_state)) 3272 val |= PIPE_MISC_HDR_MODE_PRECISION; 3273 3274 if (DISPLAY_VER(display) >= 12) 3275 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; 3276 3277 /* allow PSR with sprite enabled */ 3278 if (display->platform.broadwell) 3279 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; 3280 3281 intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val); 3282 } 3283 3284 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) 3285 { 3286 struct intel_display *display = to_intel_display(crtc); 3287 u32 tmp; 3288 3289 tmp = intel_de_read(display, PIPE_MISC(crtc->pipe)); 3290 3291 switch (tmp & PIPE_MISC_BPC_MASK) { 3292 case PIPE_MISC_BPC_6: 3293 return 18; 3294 case PIPE_MISC_BPC_8: 3295 return 24; 3296 case PIPE_MISC_BPC_10: 3297 return 30; 3298 /* 3299 * PORT OUTPUT 12 BPC defined for ADLP+. 3300 * 3301 * TODO: 3302 * For previous platforms with DSI interface, bits 5:7 3303 * are used for storing pipe_bpp irrespective of dithering. 3304 * Since the value of 12 BPC is not defined for these bits 3305 * on older platforms, need to find a workaround for 12 BPC 3306 * MIPI DSI HW readout. 3307 */ 3308 case PIPE_MISC_BPC_12_ADLP: 3309 if (DISPLAY_VER(display) >= 13) 3310 return 36; 3311 fallthrough; 3312 default: 3313 MISSING_CASE(tmp); 3314 return 0; 3315 } 3316 } 3317 3318 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3319 { 3320 /* 3321 * Account for spread spectrum to avoid 3322 * oversubscribing the link. Max center spread 3323 * is 2.5%; use 5% for safety's sake. 3324 */ 3325 u32 bps = target_clock * bpp * 21 / 20; 3326 return DIV_ROUND_UP(bps, link_bw * 8); 3327 } 3328 3329 void intel_get_m_n(struct intel_display *display, 3330 struct intel_link_m_n *m_n, 3331 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 3332 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3333 { 3334 m_n->link_m = intel_de_read(display, link_m_reg) & DATA_LINK_M_N_MASK; 3335 m_n->link_n = intel_de_read(display, link_n_reg) & DATA_LINK_M_N_MASK; 3336 m_n->data_m = intel_de_read(display, data_m_reg) & DATA_LINK_M_N_MASK; 3337 m_n->data_n = intel_de_read(display, data_n_reg) & DATA_LINK_M_N_MASK; 3338 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(display, data_m_reg)) + 1; 3339 } 3340 3341 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3342 enum transcoder transcoder, 3343 struct intel_link_m_n *m_n) 3344 { 3345 struct intel_display *display = to_intel_display(crtc); 3346 enum pipe pipe = crtc->pipe; 3347 3348 if (DISPLAY_VER(display) >= 5) 3349 intel_get_m_n(display, m_n, 3350 PIPE_DATA_M1(display, transcoder), 3351 PIPE_DATA_N1(display, transcoder), 3352 PIPE_LINK_M1(display, transcoder), 3353 PIPE_LINK_N1(display, transcoder)); 3354 else 3355 intel_get_m_n(display, m_n, 3356 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 3357 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3358 } 3359 3360 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 3361 enum transcoder transcoder, 3362 struct intel_link_m_n *m_n) 3363 { 3364 struct intel_display *display = to_intel_display(crtc); 3365 3366 if (!intel_cpu_transcoder_has_m2_n2(display, transcoder)) 3367 return; 3368 3369 intel_get_m_n(display, m_n, 3370 PIPE_DATA_M2(display, transcoder), 3371 PIPE_DATA_N2(display, transcoder), 3372 PIPE_LINK_M2(display, transcoder), 3373 PIPE_LINK_N2(display, transcoder)); 3374 } 3375 3376 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3377 struct intel_crtc_state *pipe_config) 3378 { 3379 struct intel_display *display = to_intel_display(crtc); 3380 enum intel_display_power_domain power_domain; 3381 enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe; 3382 intel_wakeref_t wakeref; 3383 bool ret = false; 3384 u32 tmp; 3385 3386 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3387 wakeref = intel_display_power_get_if_enabled(display, power_domain); 3388 if (!wakeref) 3389 return false; 3390 3391 tmp = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); 3392 if (!(tmp & TRANSCONF_ENABLE)) 3393 goto out; 3394 3395 pipe_config->cpu_transcoder = cpu_transcoder; 3396 3397 switch (tmp & TRANSCONF_BPC_MASK) { 3398 case TRANSCONF_BPC_6: 3399 pipe_config->pipe_bpp = 18; 3400 break; 3401 case TRANSCONF_BPC_8: 3402 pipe_config->pipe_bpp = 24; 3403 break; 3404 case TRANSCONF_BPC_10: 3405 pipe_config->pipe_bpp = 30; 3406 break; 3407 case TRANSCONF_BPC_12: 3408 pipe_config->pipe_bpp = 36; 3409 break; 3410 default: 3411 break; 3412 } 3413 3414 if (tmp & TRANSCONF_COLOR_RANGE_SELECT) 3415 pipe_config->limited_color_range = true; 3416 3417 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { 3418 case TRANSCONF_OUTPUT_COLORSPACE_YUV601: 3419 case TRANSCONF_OUTPUT_COLORSPACE_YUV709: 3420 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3421 break; 3422 default: 3423 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3424 break; 3425 } 3426 3427 pipe_config->sink_format = pipe_config->output_format; 3428 3429 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); 3430 3431 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3432 3433 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); 3434 3435 intel_color_get_config(pipe_config); 3436 3437 pipe_config->pixel_multiplier = 1; 3438 3439 ilk_pch_get_config(pipe_config); 3440 3441 intel_get_transcoder_timings(crtc, pipe_config); 3442 intel_get_pipe_src_size(crtc, pipe_config); 3443 3444 ilk_pfit_get_config(pipe_config); 3445 3446 ret = true; 3447 3448 out: 3449 intel_display_power_put(display, power_domain, wakeref); 3450 3451 return ret; 3452 } 3453 3454 static u8 joiner_pipes(struct intel_display *display) 3455 { 3456 u8 pipes; 3457 3458 if (DISPLAY_VER(display) >= 12) 3459 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3460 else if (DISPLAY_VER(display) >= 11) 3461 pipes = BIT(PIPE_B) | BIT(PIPE_C); 3462 else 3463 pipes = 0; 3464 3465 return pipes & DISPLAY_RUNTIME_INFO(display)->pipe_mask; 3466 } 3467 3468 static bool transcoder_ddi_func_is_enabled(struct intel_display *display, 3469 enum transcoder cpu_transcoder) 3470 { 3471 enum intel_display_power_domain power_domain; 3472 intel_wakeref_t wakeref; 3473 u32 tmp = 0; 3474 3475 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3476 3477 with_intel_display_power_if_enabled(display, power_domain, wakeref) 3478 tmp = intel_de_read(display, 3479 TRANS_DDI_FUNC_CTL(display, cpu_transcoder)); 3480 3481 return tmp & TRANS_DDI_FUNC_ENABLE; 3482 } 3483 3484 static void enabled_uncompressed_joiner_pipes(struct intel_display *display, 3485 u8 *primary_pipes, u8 *secondary_pipes) 3486 { 3487 struct intel_crtc *crtc; 3488 3489 *primary_pipes = 0; 3490 *secondary_pipes = 0; 3491 3492 if (!HAS_UNCOMPRESSED_JOINER(display)) 3493 return; 3494 3495 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, 3496 joiner_pipes(display)) { 3497 enum intel_display_power_domain power_domain; 3498 enum pipe pipe = crtc->pipe; 3499 intel_wakeref_t wakeref; 3500 3501 power_domain = POWER_DOMAIN_PIPE(pipe); 3502 with_intel_display_power_if_enabled(display, power_domain, wakeref) { 3503 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3504 3505 if (tmp & UNCOMPRESSED_JOINER_PRIMARY) 3506 *primary_pipes |= BIT(pipe); 3507 if (tmp & UNCOMPRESSED_JOINER_SECONDARY) 3508 *secondary_pipes |= BIT(pipe); 3509 } 3510 } 3511 } 3512 3513 static void enabled_bigjoiner_pipes(struct intel_display *display, 3514 u8 *primary_pipes, u8 *secondary_pipes) 3515 { 3516 struct intel_crtc *crtc; 3517 3518 *primary_pipes = 0; 3519 *secondary_pipes = 0; 3520 3521 if (!HAS_BIGJOINER(display)) 3522 return; 3523 3524 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, 3525 joiner_pipes(display)) { 3526 enum intel_display_power_domain power_domain; 3527 enum pipe pipe = crtc->pipe; 3528 intel_wakeref_t wakeref; 3529 3530 power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); 3531 with_intel_display_power_if_enabled(display, power_domain, wakeref) { 3532 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3533 3534 if (!(tmp & BIG_JOINER_ENABLE)) 3535 continue; 3536 3537 if (tmp & PRIMARY_BIG_JOINER_ENABLE) 3538 *primary_pipes |= BIT(pipe); 3539 else 3540 *secondary_pipes |= BIT(pipe); 3541 } 3542 } 3543 } 3544 3545 static u8 expected_secondary_pipes(u8 primary_pipes, int num_pipes) 3546 { 3547 u8 secondary_pipes = 0; 3548 3549 for (int i = 1; i < num_pipes; i++) 3550 secondary_pipes |= primary_pipes << i; 3551 3552 return secondary_pipes; 3553 } 3554 3555 static u8 expected_uncompressed_joiner_secondary_pipes(u8 uncompjoiner_primary_pipes) 3556 { 3557 return expected_secondary_pipes(uncompjoiner_primary_pipes, 2); 3558 } 3559 3560 static u8 expected_bigjoiner_secondary_pipes(u8 bigjoiner_primary_pipes) 3561 { 3562 return expected_secondary_pipes(bigjoiner_primary_pipes, 2); 3563 } 3564 3565 static u8 get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes) 3566 { 3567 primary_pipes &= GENMASK(pipe, 0); 3568 3569 return primary_pipes ? BIT(fls(primary_pipes) - 1) : 0; 3570 } 3571 3572 static u8 expected_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes) 3573 { 3574 return expected_secondary_pipes(ultrajoiner_primary_pipes, 4); 3575 } 3576 3577 static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes, 3578 u8 ultrajoiner_secondary_pipes) 3579 { 3580 return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3; 3581 } 3582 3583 static void enabled_ultrajoiner_pipes(struct intel_display *display, 3584 u8 *primary_pipes, u8 *secondary_pipes) 3585 { 3586 struct intel_crtc *crtc; 3587 3588 *primary_pipes = 0; 3589 *secondary_pipes = 0; 3590 3591 if (!HAS_ULTRAJOINER(display)) 3592 return; 3593 3594 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, 3595 joiner_pipes(display)) { 3596 enum intel_display_power_domain power_domain; 3597 enum pipe pipe = crtc->pipe; 3598 intel_wakeref_t wakeref; 3599 3600 power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); 3601 with_intel_display_power_if_enabled(display, power_domain, wakeref) { 3602 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3603 3604 if (!(tmp & ULTRA_JOINER_ENABLE)) 3605 continue; 3606 3607 if (tmp & PRIMARY_ULTRA_JOINER_ENABLE) 3608 *primary_pipes |= BIT(pipe); 3609 else 3610 *secondary_pipes |= BIT(pipe); 3611 } 3612 } 3613 } 3614 3615 static void enabled_joiner_pipes(struct intel_display *display, 3616 enum pipe pipe, 3617 u8 *primary_pipe, u8 *secondary_pipes) 3618 { 3619 u8 primary_ultrajoiner_pipes; 3620 u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes; 3621 u8 secondary_ultrajoiner_pipes; 3622 u8 secondary_uncompressed_joiner_pipes, secondary_bigjoiner_pipes; 3623 u8 ultrajoiner_pipes; 3624 u8 uncompressed_joiner_pipes, bigjoiner_pipes; 3625 3626 enabled_ultrajoiner_pipes(display, &primary_ultrajoiner_pipes, 3627 &secondary_ultrajoiner_pipes); 3628 /* 3629 * For some strange reason the last pipe in the set of four 3630 * shouldn't have ultrajoiner enable bit set in hardware. 3631 * Set the bit anyway to make life easier. 3632 */ 3633 drm_WARN_ON(display->drm, 3634 expected_secondary_pipes(primary_ultrajoiner_pipes, 3) != 3635 secondary_ultrajoiner_pipes); 3636 secondary_ultrajoiner_pipes = 3637 fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes, 3638 secondary_ultrajoiner_pipes); 3639 3640 drm_WARN_ON(display->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0); 3641 3642 enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes, 3643 &secondary_uncompressed_joiner_pipes); 3644 3645 drm_WARN_ON(display->drm, 3646 (primary_uncompressed_joiner_pipes & secondary_uncompressed_joiner_pipes) != 0); 3647 3648 enabled_bigjoiner_pipes(display, &primary_bigjoiner_pipes, 3649 &secondary_bigjoiner_pipes); 3650 3651 drm_WARN_ON(display->drm, 3652 (primary_bigjoiner_pipes & secondary_bigjoiner_pipes) != 0); 3653 3654 ultrajoiner_pipes = primary_ultrajoiner_pipes | secondary_ultrajoiner_pipes; 3655 uncompressed_joiner_pipes = primary_uncompressed_joiner_pipes | 3656 secondary_uncompressed_joiner_pipes; 3657 bigjoiner_pipes = primary_bigjoiner_pipes | secondary_bigjoiner_pipes; 3658 3659 drm_WARN(display->drm, (ultrajoiner_pipes & bigjoiner_pipes) != ultrajoiner_pipes, 3660 "Ultrajoiner pipes(%#x) should be bigjoiner pipes(%#x)\n", 3661 ultrajoiner_pipes, bigjoiner_pipes); 3662 3663 drm_WARN(display->drm, secondary_ultrajoiner_pipes != 3664 expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), 3665 "Wrong secondary ultrajoiner pipes(expected %#x, current %#x)\n", 3666 expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), 3667 secondary_ultrajoiner_pipes); 3668 3669 drm_WARN(display->drm, (uncompressed_joiner_pipes & bigjoiner_pipes) != 0, 3670 "Uncompressed joiner pipes(%#x) and bigjoiner pipes(%#x) can't intersect\n", 3671 uncompressed_joiner_pipes, bigjoiner_pipes); 3672 3673 drm_WARN(display->drm, secondary_bigjoiner_pipes != 3674 expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), 3675 "Wrong secondary bigjoiner pipes(expected %#x, current %#x)\n", 3676 expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), 3677 secondary_bigjoiner_pipes); 3678 3679 drm_WARN(display->drm, secondary_uncompressed_joiner_pipes != 3680 expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), 3681 "Wrong secondary uncompressed joiner pipes(expected %#x, current %#x)\n", 3682 expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), 3683 secondary_uncompressed_joiner_pipes); 3684 3685 *primary_pipe = 0; 3686 *secondary_pipes = 0; 3687 3688 if (ultrajoiner_pipes & BIT(pipe)) { 3689 *primary_pipe = get_joiner_primary_pipe(pipe, primary_ultrajoiner_pipes); 3690 *secondary_pipes = secondary_ultrajoiner_pipes & 3691 expected_ultrajoiner_secondary_pipes(*primary_pipe); 3692 3693 drm_WARN(display->drm, 3694 expected_ultrajoiner_secondary_pipes(*primary_pipe) != 3695 *secondary_pipes, 3696 "Wrong ultrajoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3697 *primary_pipe, 3698 expected_ultrajoiner_secondary_pipes(*primary_pipe), 3699 *secondary_pipes); 3700 return; 3701 } 3702 3703 if (uncompressed_joiner_pipes & BIT(pipe)) { 3704 *primary_pipe = get_joiner_primary_pipe(pipe, primary_uncompressed_joiner_pipes); 3705 *secondary_pipes = secondary_uncompressed_joiner_pipes & 3706 expected_uncompressed_joiner_secondary_pipes(*primary_pipe); 3707 3708 drm_WARN(display->drm, 3709 expected_uncompressed_joiner_secondary_pipes(*primary_pipe) != 3710 *secondary_pipes, 3711 "Wrong uncompressed joiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3712 *primary_pipe, 3713 expected_uncompressed_joiner_secondary_pipes(*primary_pipe), 3714 *secondary_pipes); 3715 return; 3716 } 3717 3718 if (bigjoiner_pipes & BIT(pipe)) { 3719 *primary_pipe = get_joiner_primary_pipe(pipe, primary_bigjoiner_pipes); 3720 *secondary_pipes = secondary_bigjoiner_pipes & 3721 expected_bigjoiner_secondary_pipes(*primary_pipe); 3722 3723 drm_WARN(display->drm, 3724 expected_bigjoiner_secondary_pipes(*primary_pipe) != 3725 *secondary_pipes, 3726 "Wrong bigjoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3727 *primary_pipe, 3728 expected_bigjoiner_secondary_pipes(*primary_pipe), 3729 *secondary_pipes); 3730 return; 3731 } 3732 } 3733 3734 static u8 hsw_panel_transcoders(struct intel_display *display) 3735 { 3736 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 3737 3738 if (DISPLAY_VER(display) >= 11) 3739 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 3740 3741 return panel_transcoder_mask; 3742 } 3743 3744 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3745 { 3746 struct intel_display *display = to_intel_display(crtc); 3747 u8 panel_transcoder_mask = hsw_panel_transcoders(display); 3748 enum transcoder cpu_transcoder; 3749 u8 primary_pipe, secondary_pipes; 3750 u8 enabled_transcoders = 0; 3751 3752 /* 3753 * XXX: Do intel_display_power_get_if_enabled before reading this (for 3754 * consistency and less surprising code; it's in always on power). 3755 */ 3756 for_each_cpu_transcoder_masked(display, cpu_transcoder, 3757 panel_transcoder_mask) { 3758 enum intel_display_power_domain power_domain; 3759 intel_wakeref_t wakeref; 3760 enum pipe trans_pipe; 3761 u32 tmp = 0; 3762 3763 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3764 with_intel_display_power_if_enabled(display, power_domain, wakeref) 3765 tmp = intel_de_read(display, 3766 TRANS_DDI_FUNC_CTL(display, cpu_transcoder)); 3767 3768 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 3769 continue; 3770 3771 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 3772 default: 3773 drm_WARN(display->drm, 1, 3774 "unknown pipe linked to transcoder %s\n", 3775 transcoder_name(cpu_transcoder)); 3776 fallthrough; 3777 case TRANS_DDI_EDP_INPUT_A_ONOFF: 3778 case TRANS_DDI_EDP_INPUT_A_ON: 3779 trans_pipe = PIPE_A; 3780 break; 3781 case TRANS_DDI_EDP_INPUT_B_ONOFF: 3782 trans_pipe = PIPE_B; 3783 break; 3784 case TRANS_DDI_EDP_INPUT_C_ONOFF: 3785 trans_pipe = PIPE_C; 3786 break; 3787 case TRANS_DDI_EDP_INPUT_D_ONOFF: 3788 trans_pipe = PIPE_D; 3789 break; 3790 } 3791 3792 if (trans_pipe == crtc->pipe) 3793 enabled_transcoders |= BIT(cpu_transcoder); 3794 } 3795 3796 /* single pipe or joiner primary */ 3797 cpu_transcoder = (enum transcoder) crtc->pipe; 3798 if (transcoder_ddi_func_is_enabled(display, cpu_transcoder)) 3799 enabled_transcoders |= BIT(cpu_transcoder); 3800 3801 /* joiner secondary -> consider the primary pipe's transcoder as well */ 3802 enabled_joiner_pipes(display, crtc->pipe, &primary_pipe, &secondary_pipes); 3803 if (secondary_pipes & BIT(crtc->pipe)) { 3804 cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1; 3805 if (transcoder_ddi_func_is_enabled(display, cpu_transcoder)) 3806 enabled_transcoders |= BIT(cpu_transcoder); 3807 } 3808 3809 return enabled_transcoders; 3810 } 3811 3812 static bool has_edp_transcoders(u8 enabled_transcoders) 3813 { 3814 return enabled_transcoders & BIT(TRANSCODER_EDP); 3815 } 3816 3817 static bool has_dsi_transcoders(u8 enabled_transcoders) 3818 { 3819 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 3820 BIT(TRANSCODER_DSI_1)); 3821 } 3822 3823 static bool has_pipe_transcoders(u8 enabled_transcoders) 3824 { 3825 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 3826 BIT(TRANSCODER_DSI_0) | 3827 BIT(TRANSCODER_DSI_1)); 3828 } 3829 3830 static void assert_enabled_transcoders(struct intel_display *display, 3831 u8 enabled_transcoders) 3832 { 3833 /* Only one type of transcoder please */ 3834 drm_WARN_ON(display->drm, 3835 has_edp_transcoders(enabled_transcoders) + 3836 has_dsi_transcoders(enabled_transcoders) + 3837 has_pipe_transcoders(enabled_transcoders) > 1); 3838 3839 /* Only DSI transcoders can be ganged */ 3840 drm_WARN_ON(display->drm, 3841 !has_dsi_transcoders(enabled_transcoders) && 3842 !is_power_of_2(enabled_transcoders)); 3843 } 3844 3845 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 3846 struct intel_crtc_state *pipe_config, 3847 struct intel_display_power_domain_set *power_domain_set) 3848 { 3849 struct intel_display *display = to_intel_display(crtc); 3850 unsigned long enabled_transcoders; 3851 u32 tmp; 3852 3853 enabled_transcoders = hsw_enabled_transcoders(crtc); 3854 if (!enabled_transcoders) 3855 return false; 3856 3857 assert_enabled_transcoders(display, enabled_transcoders); 3858 3859 /* 3860 * With the exception of DSI we should only ever have 3861 * a single enabled transcoder. With DSI let's just 3862 * pick the first one. 3863 */ 3864 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 3865 3866 if (!intel_display_power_get_in_set_if_enabled(display, power_domain_set, 3867 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 3868 return false; 3869 3870 if (hsw_panel_transcoders(display) & BIT(pipe_config->cpu_transcoder)) { 3871 tmp = intel_de_read(display, 3872 TRANS_DDI_FUNC_CTL(display, pipe_config->cpu_transcoder)); 3873 3874 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 3875 pipe_config->pch_pfit.force_thru = true; 3876 } 3877 3878 tmp = intel_de_read(display, 3879 TRANSCONF(display, pipe_config->cpu_transcoder)); 3880 3881 return tmp & TRANSCONF_ENABLE; 3882 } 3883 3884 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 3885 struct intel_crtc_state *pipe_config, 3886 struct intel_display_power_domain_set *power_domain_set) 3887 { 3888 struct intel_display *display = to_intel_display(crtc); 3889 enum transcoder cpu_transcoder; 3890 enum port port; 3891 u32 tmp; 3892 3893 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 3894 if (port == PORT_A) 3895 cpu_transcoder = TRANSCODER_DSI_A; 3896 else 3897 cpu_transcoder = TRANSCODER_DSI_C; 3898 3899 if (!intel_display_power_get_in_set_if_enabled(display, power_domain_set, 3900 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 3901 continue; 3902 3903 /* 3904 * The PLL needs to be enabled with a valid divider 3905 * configuration, otherwise accessing DSI registers will hang 3906 * the machine. See BSpec North Display Engine 3907 * registers/MIPI[BXT]. We can break out here early, since we 3908 * need the same DSI PLL to be enabled for both DSI ports. 3909 */ 3910 if (!bxt_dsi_pll_is_enabled(display)) 3911 break; 3912 3913 /* XXX: this works for video mode only */ 3914 tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port)); 3915 if (!(tmp & DPI_ENABLE)) 3916 continue; 3917 3918 tmp = intel_de_read(display, MIPI_CTRL(display, port)); 3919 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 3920 continue; 3921 3922 pipe_config->cpu_transcoder = cpu_transcoder; 3923 break; 3924 } 3925 3926 return transcoder_is_dsi(pipe_config->cpu_transcoder); 3927 } 3928 3929 static void intel_joiner_get_config(struct intel_crtc_state *crtc_state) 3930 { 3931 struct intel_display *display = to_intel_display(crtc_state); 3932 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3933 u8 primary_pipe, secondary_pipes; 3934 enum pipe pipe = crtc->pipe; 3935 3936 enabled_joiner_pipes(display, pipe, &primary_pipe, &secondary_pipes); 3937 3938 if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0) 3939 return; 3940 3941 crtc_state->joiner_pipes = primary_pipe | secondary_pipes; 3942 } 3943 3944 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 3945 struct intel_crtc_state *pipe_config) 3946 { 3947 struct intel_display *display = to_intel_display(crtc); 3948 bool active; 3949 u32 tmp; 3950 3951 if (!intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains, 3952 POWER_DOMAIN_PIPE(crtc->pipe))) 3953 return false; 3954 3955 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains); 3956 3957 if ((display->platform.geminilake || display->platform.broxton) && 3958 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) { 3959 drm_WARN_ON(display->drm, active); 3960 active = true; 3961 } 3962 3963 if (!active) 3964 goto out; 3965 3966 intel_joiner_get_config(pipe_config); 3967 intel_dsc_get_config(pipe_config); 3968 3969 /* intel_vrr_get_config() depends on .framestart_delay */ 3970 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 3971 tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder)); 3972 3973 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 3974 } else { 3975 /* no idea if this is correct */ 3976 pipe_config->framestart_delay = 1; 3977 } 3978 3979 /* 3980 * intel_vrr_get_config() depends on TRANS_SET_CONTEXT_LATENCY 3981 * readout done by intel_get_transcoder_timings(). 3982 */ 3983 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 3984 DISPLAY_VER(display) >= 11) 3985 intel_get_transcoder_timings(crtc, pipe_config); 3986 3987 if (transcoder_has_vrr(pipe_config)) 3988 intel_vrr_get_config(pipe_config); 3989 3990 intel_get_pipe_src_size(crtc, pipe_config); 3991 3992 if (display->platform.haswell) { 3993 u32 tmp = intel_de_read(display, 3994 TRANSCONF(display, pipe_config->cpu_transcoder)); 3995 3996 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) 3997 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3998 else 3999 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 4000 } else { 4001 pipe_config->output_format = 4002 bdw_get_pipe_misc_output_format(crtc); 4003 } 4004 4005 pipe_config->sink_format = pipe_config->output_format; 4006 4007 intel_color_get_config(pipe_config); 4008 4009 tmp = intel_de_read(display, WM_LINETIME(crtc->pipe)); 4010 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 4011 if (display->platform.broadwell || display->platform.haswell) 4012 pipe_config->ips_linetime = 4013 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 4014 4015 if (intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains, 4016 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 4017 if (DISPLAY_VER(display) >= 9) 4018 skl_scaler_get_config(pipe_config); 4019 else 4020 ilk_pfit_get_config(pipe_config); 4021 } 4022 4023 hsw_ips_get_config(pipe_config); 4024 4025 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 4026 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4027 pipe_config->pixel_multiplier = 4028 intel_de_read(display, 4029 TRANS_MULT(display, pipe_config->cpu_transcoder)) + 1; 4030 } else { 4031 pipe_config->pixel_multiplier = 1; 4032 } 4033 4034 out: 4035 intel_display_power_put_all_in_set(display, &crtc->hw_readout_power_domains); 4036 4037 return active; 4038 } 4039 4040 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 4041 { 4042 struct intel_display *display = to_intel_display(crtc_state); 4043 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4044 4045 if (!display->funcs.display->get_pipe_config(crtc, crtc_state)) 4046 return false; 4047 4048 crtc_state->hw.active = true; 4049 4050 intel_crtc_readout_derived_state(crtc_state); 4051 4052 return true; 4053 } 4054 4055 int intel_dotclock_calculate(int link_freq, 4056 const struct intel_link_m_n *m_n) 4057 { 4058 /* 4059 * The calculation for the data clock -> pixel clock is: 4060 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 4061 * But we want to avoid losing precision if possible, so: 4062 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 4063 * 4064 * and for link freq (10kbs units) -> pixel clock it is: 4065 * link_symbol_clock = link_freq * 10 / link_symbol_size 4066 * pixel_clock = (m * link_symbol_clock) / n 4067 * or for more precision: 4068 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size) 4069 */ 4070 4071 if (!m_n->link_n) 4072 return 0; 4073 4074 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10), 4075 m_n->link_n * intel_dp_link_symbol_size(link_freq)); 4076 } 4077 4078 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) 4079 { 4080 int dotclock; 4081 4082 if (intel_crtc_has_dp_encoder(pipe_config)) 4083 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 4084 &pipe_config->dp_m_n); 4085 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) 4086 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, 4087 pipe_config->pipe_bpp); 4088 else 4089 dotclock = pipe_config->port_clock; 4090 4091 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 4092 !intel_crtc_has_dp_encoder(pipe_config)) 4093 dotclock *= 2; 4094 4095 if (pipe_config->pixel_multiplier) 4096 dotclock /= pipe_config->pixel_multiplier; 4097 4098 return dotclock; 4099 } 4100 4101 /* Returns the currently programmed mode of the given encoder. */ 4102 struct drm_display_mode * 4103 intel_encoder_current_mode(struct intel_encoder *encoder) 4104 { 4105 struct intel_display *display = to_intel_display(encoder); 4106 struct intel_crtc_state *crtc_state; 4107 struct drm_display_mode *mode; 4108 struct intel_crtc *crtc; 4109 enum pipe pipe; 4110 4111 if (!encoder->get_hw_state(encoder, &pipe)) 4112 return NULL; 4113 4114 crtc = intel_crtc_for_pipe(display, pipe); 4115 4116 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 4117 if (!mode) 4118 return NULL; 4119 4120 crtc_state = intel_crtc_state_alloc(crtc); 4121 if (!crtc_state) { 4122 kfree(mode); 4123 return NULL; 4124 } 4125 4126 if (!intel_crtc_get_pipe_config(crtc_state)) { 4127 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4128 kfree(mode); 4129 return NULL; 4130 } 4131 4132 intel_encoder_get_config(encoder, crtc_state); 4133 4134 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 4135 4136 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4137 4138 return mode; 4139 } 4140 4141 static bool encoders_cloneable(const struct intel_encoder *a, 4142 const struct intel_encoder *b) 4143 { 4144 /* masks could be asymmetric, so check both ways */ 4145 return a == b || (a->cloneable & BIT(b->type) && 4146 b->cloneable & BIT(a->type)); 4147 } 4148 4149 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 4150 struct intel_crtc *crtc, 4151 struct intel_encoder *encoder) 4152 { 4153 struct intel_encoder *source_encoder; 4154 struct drm_connector *connector; 4155 struct drm_connector_state *connector_state; 4156 int i; 4157 4158 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4159 if (connector_state->crtc != &crtc->base) 4160 continue; 4161 4162 source_encoder = 4163 to_intel_encoder(connector_state->best_encoder); 4164 if (!encoders_cloneable(encoder, source_encoder)) 4165 return false; 4166 } 4167 4168 return true; 4169 } 4170 4171 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4172 { 4173 const struct drm_display_mode *pipe_mode = 4174 &crtc_state->hw.pipe_mode; 4175 int linetime_wm; 4176 4177 if (!crtc_state->hw.enable) 4178 return 0; 4179 4180 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4181 pipe_mode->crtc_clock); 4182 4183 return min(linetime_wm, 0x1ff); 4184 } 4185 4186 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4187 const struct intel_cdclk_state *cdclk_state) 4188 { 4189 const struct drm_display_mode *pipe_mode = 4190 &crtc_state->hw.pipe_mode; 4191 int linetime_wm; 4192 4193 if (!crtc_state->hw.enable) 4194 return 0; 4195 4196 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4197 intel_cdclk_logical(cdclk_state)); 4198 4199 return min(linetime_wm, 0x1ff); 4200 } 4201 4202 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4203 { 4204 struct intel_display *display = to_intel_display(crtc_state); 4205 const struct drm_display_mode *pipe_mode = 4206 &crtc_state->hw.pipe_mode; 4207 int linetime_wm; 4208 4209 if (!crtc_state->hw.enable) 4210 return 0; 4211 4212 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4213 crtc_state->pixel_rate); 4214 4215 /* Display WA #1135: BXT:ALL GLK:ALL */ 4216 if ((display->platform.geminilake || display->platform.broxton) && 4217 skl_watermark_ipc_enabled(display)) 4218 linetime_wm /= 2; 4219 4220 return min(linetime_wm, 0x1ff); 4221 } 4222 4223 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4224 struct intel_crtc *crtc) 4225 { 4226 struct intel_display *display = to_intel_display(state); 4227 struct intel_crtc_state *crtc_state = 4228 intel_atomic_get_new_crtc_state(state, crtc); 4229 const struct intel_cdclk_state *cdclk_state; 4230 4231 if (DISPLAY_VER(display) >= 9) 4232 crtc_state->linetime = skl_linetime_wm(crtc_state); 4233 else 4234 crtc_state->linetime = hsw_linetime_wm(crtc_state); 4235 4236 if (!hsw_crtc_supports_ips(crtc)) 4237 return 0; 4238 4239 cdclk_state = intel_atomic_get_cdclk_state(state); 4240 if (IS_ERR(cdclk_state)) 4241 return PTR_ERR(cdclk_state); 4242 4243 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4244 cdclk_state); 4245 4246 return 0; 4247 } 4248 4249 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4250 struct intel_crtc *crtc) 4251 { 4252 struct intel_display *display = to_intel_display(crtc); 4253 struct intel_crtc_state *crtc_state = 4254 intel_atomic_get_new_crtc_state(state, crtc); 4255 int ret; 4256 4257 if (DISPLAY_VER(display) < 5 && !display->platform.g4x && 4258 intel_crtc_needs_modeset(crtc_state) && 4259 !crtc_state->hw.active) 4260 crtc_state->update_wm_post = true; 4261 4262 if (intel_crtc_needs_modeset(crtc_state)) { 4263 ret = intel_dpll_crtc_get_dpll(state, crtc); 4264 if (ret) 4265 return ret; 4266 } 4267 4268 ret = intel_color_check(state, crtc); 4269 if (ret) 4270 return ret; 4271 4272 ret = intel_wm_compute(state, crtc); 4273 if (ret) { 4274 drm_dbg_kms(display->drm, 4275 "[CRTC:%d:%s] watermarks are invalid\n", 4276 crtc->base.base.id, crtc->base.name); 4277 return ret; 4278 } 4279 4280 ret = intel_casf_compute_config(crtc_state); 4281 if (ret) 4282 return ret; 4283 4284 if (DISPLAY_VER(display) >= 9) { 4285 if (intel_crtc_needs_modeset(crtc_state) || 4286 intel_crtc_needs_fastset(crtc_state) || 4287 intel_casf_needs_scaler(crtc_state)) { 4288 ret = skl_update_scaler_crtc(crtc_state); 4289 if (ret) 4290 return ret; 4291 } 4292 4293 ret = intel_atomic_setup_scalers(state, crtc); 4294 if (ret) 4295 return ret; 4296 } 4297 4298 if (HAS_IPS(display)) { 4299 ret = hsw_ips_compute_config(state, crtc); 4300 if (ret) 4301 return ret; 4302 } 4303 4304 if (DISPLAY_VER(display) >= 9 || 4305 display->platform.broadwell || display->platform.haswell) { 4306 ret = hsw_compute_linetime_wm(state, crtc); 4307 if (ret) 4308 return ret; 4309 4310 } 4311 4312 ret = intel_psr2_sel_fetch_update(state, crtc); 4313 if (ret) 4314 return ret; 4315 4316 return 0; 4317 } 4318 4319 static int 4320 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 4321 struct intel_crtc_state *crtc_state) 4322 { 4323 struct intel_display *display = to_intel_display(crtc_state); 4324 struct drm_connector *connector = conn_state->connector; 4325 const struct drm_display_info *info = &connector->display_info; 4326 int bpp; 4327 4328 switch (conn_state->max_bpc) { 4329 case 6 ... 7: 4330 bpp = 6 * 3; 4331 break; 4332 case 8 ... 9: 4333 bpp = 8 * 3; 4334 break; 4335 case 10 ... 11: 4336 bpp = 10 * 3; 4337 break; 4338 case 12 ... 16: 4339 bpp = 12 * 3; 4340 break; 4341 default: 4342 MISSING_CASE(conn_state->max_bpc); 4343 return -EINVAL; 4344 } 4345 4346 if (bpp < crtc_state->pipe_bpp) { 4347 drm_dbg_kms(display->drm, 4348 "[CONNECTOR:%d:%s] Limiting display bpp to %d " 4349 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", 4350 connector->base.id, connector->name, 4351 bpp, 3 * info->bpc, 4352 3 * conn_state->max_requested_bpc, 4353 crtc_state->pipe_bpp); 4354 4355 crtc_state->pipe_bpp = bpp; 4356 } 4357 4358 return 0; 4359 } 4360 4361 int intel_display_min_pipe_bpp(void) 4362 { 4363 return 6 * 3; 4364 } 4365 4366 int intel_display_max_pipe_bpp(struct intel_display *display) 4367 { 4368 if (display->platform.g4x || display->platform.valleyview || 4369 display->platform.cherryview) 4370 return 10*3; 4371 else if (DISPLAY_VER(display) >= 5) 4372 return 12*3; 4373 else 4374 return 8*3; 4375 } 4376 4377 static int 4378 compute_baseline_pipe_bpp(struct intel_atomic_state *state, 4379 struct intel_crtc *crtc) 4380 { 4381 struct intel_display *display = to_intel_display(crtc); 4382 struct intel_crtc_state *crtc_state = 4383 intel_atomic_get_new_crtc_state(state, crtc); 4384 struct drm_connector *connector; 4385 struct drm_connector_state *connector_state; 4386 int i; 4387 4388 crtc_state->pipe_bpp = intel_display_max_pipe_bpp(display); 4389 4390 /* Clamp display bpp to connector max bpp */ 4391 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4392 int ret; 4393 4394 if (connector_state->crtc != &crtc->base) 4395 continue; 4396 4397 ret = compute_sink_pipe_bpp(connector_state, crtc_state); 4398 if (ret) 4399 return ret; 4400 } 4401 4402 return 0; 4403 } 4404 4405 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 4406 { 4407 struct intel_display *display = to_intel_display(state); 4408 struct drm_connector *connector; 4409 struct drm_connector_list_iter conn_iter; 4410 unsigned int used_ports = 0; 4411 unsigned int used_mst_ports = 0; 4412 bool ret = true; 4413 4414 /* 4415 * We're going to peek into connector->state, 4416 * hence connection_mutex must be held. 4417 */ 4418 drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex); 4419 4420 /* 4421 * Walk the connector list instead of the encoder 4422 * list to detect the problem on ddi platforms 4423 * where there's just one encoder per digital port. 4424 */ 4425 drm_connector_list_iter_begin(display->drm, &conn_iter); 4426 drm_for_each_connector_iter(connector, &conn_iter) { 4427 struct drm_connector_state *connector_state; 4428 struct intel_encoder *encoder; 4429 4430 connector_state = 4431 drm_atomic_get_new_connector_state(&state->base, 4432 connector); 4433 if (!connector_state) 4434 connector_state = connector->state; 4435 4436 if (!connector_state->best_encoder) 4437 continue; 4438 4439 encoder = to_intel_encoder(connector_state->best_encoder); 4440 4441 drm_WARN_ON(display->drm, !connector_state->crtc); 4442 4443 switch (encoder->type) { 4444 case INTEL_OUTPUT_DDI: 4445 if (drm_WARN_ON(display->drm, !HAS_DDI(display))) 4446 break; 4447 fallthrough; 4448 case INTEL_OUTPUT_DP: 4449 case INTEL_OUTPUT_HDMI: 4450 case INTEL_OUTPUT_EDP: 4451 /* the same port mustn't appear more than once */ 4452 if (used_ports & BIT(encoder->port)) 4453 ret = false; 4454 4455 used_ports |= BIT(encoder->port); 4456 break; 4457 case INTEL_OUTPUT_DP_MST: 4458 used_mst_ports |= 4459 1 << encoder->port; 4460 break; 4461 default: 4462 break; 4463 } 4464 } 4465 drm_connector_list_iter_end(&conn_iter); 4466 4467 /* can't mix MST and SST/HDMI on the same port */ 4468 if (used_ports & used_mst_ports) 4469 return false; 4470 4471 return ret; 4472 } 4473 4474 static void 4475 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 4476 struct intel_crtc *crtc) 4477 { 4478 struct intel_crtc_state *crtc_state = 4479 intel_atomic_get_new_crtc_state(state, crtc); 4480 4481 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4482 4483 drm_property_replace_blob(&crtc_state->hw.degamma_lut, 4484 crtc_state->uapi.degamma_lut); 4485 drm_property_replace_blob(&crtc_state->hw.gamma_lut, 4486 crtc_state->uapi.gamma_lut); 4487 drm_property_replace_blob(&crtc_state->hw.ctm, 4488 crtc_state->uapi.ctm); 4489 } 4490 4491 static void 4492 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 4493 struct intel_crtc *crtc) 4494 { 4495 struct intel_crtc_state *crtc_state = 4496 intel_atomic_get_new_crtc_state(state, crtc); 4497 4498 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4499 4500 crtc_state->hw.enable = crtc_state->uapi.enable; 4501 crtc_state->hw.active = crtc_state->uapi.active; 4502 drm_mode_copy(&crtc_state->hw.mode, 4503 &crtc_state->uapi.mode); 4504 drm_mode_copy(&crtc_state->hw.adjusted_mode, 4505 &crtc_state->uapi.adjusted_mode); 4506 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 4507 4508 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 4509 } 4510 4511 static void 4512 copy_joiner_crtc_state_nomodeset(struct intel_atomic_state *state, 4513 struct intel_crtc *secondary_crtc) 4514 { 4515 struct intel_crtc_state *secondary_crtc_state = 4516 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4517 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4518 const struct intel_crtc_state *primary_crtc_state = 4519 intel_atomic_get_new_crtc_state(state, primary_crtc); 4520 4521 drm_property_replace_blob(&secondary_crtc_state->hw.degamma_lut, 4522 primary_crtc_state->hw.degamma_lut); 4523 drm_property_replace_blob(&secondary_crtc_state->hw.gamma_lut, 4524 primary_crtc_state->hw.gamma_lut); 4525 drm_property_replace_blob(&secondary_crtc_state->hw.ctm, 4526 primary_crtc_state->hw.ctm); 4527 4528 secondary_crtc_state->uapi.color_mgmt_changed = primary_crtc_state->uapi.color_mgmt_changed; 4529 } 4530 4531 static int 4532 copy_joiner_crtc_state_modeset(struct intel_atomic_state *state, 4533 struct intel_crtc *secondary_crtc) 4534 { 4535 struct intel_crtc_state *secondary_crtc_state = 4536 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4537 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4538 const struct intel_crtc_state *primary_crtc_state = 4539 intel_atomic_get_new_crtc_state(state, primary_crtc); 4540 struct intel_crtc_state *saved_state; 4541 4542 WARN_ON(primary_crtc_state->joiner_pipes != 4543 secondary_crtc_state->joiner_pipes); 4544 4545 saved_state = kmemdup(primary_crtc_state, sizeof(*saved_state), GFP_KERNEL); 4546 if (!saved_state) 4547 return -ENOMEM; 4548 4549 /* preserve some things from the slave's original crtc state */ 4550 saved_state->uapi = secondary_crtc_state->uapi; 4551 saved_state->scaler_state = secondary_crtc_state->scaler_state; 4552 saved_state->intel_dpll = secondary_crtc_state->intel_dpll; 4553 saved_state->crc_enabled = secondary_crtc_state->crc_enabled; 4554 4555 intel_crtc_free_hw_state(secondary_crtc_state); 4556 if (secondary_crtc_state->dp_tunnel_ref.tunnel) 4557 drm_dp_tunnel_ref_put(&secondary_crtc_state->dp_tunnel_ref); 4558 memcpy(secondary_crtc_state, saved_state, sizeof(*secondary_crtc_state)); 4559 kfree(saved_state); 4560 4561 /* Re-init hw state */ 4562 memset(&secondary_crtc_state->hw, 0, sizeof(secondary_crtc_state->hw)); 4563 secondary_crtc_state->hw.enable = primary_crtc_state->hw.enable; 4564 secondary_crtc_state->hw.active = primary_crtc_state->hw.active; 4565 drm_mode_copy(&secondary_crtc_state->hw.mode, 4566 &primary_crtc_state->hw.mode); 4567 drm_mode_copy(&secondary_crtc_state->hw.pipe_mode, 4568 &primary_crtc_state->hw.pipe_mode); 4569 drm_mode_copy(&secondary_crtc_state->hw.adjusted_mode, 4570 &primary_crtc_state->hw.adjusted_mode); 4571 secondary_crtc_state->hw.scaling_filter = primary_crtc_state->hw.scaling_filter; 4572 4573 if (primary_crtc_state->dp_tunnel_ref.tunnel) 4574 drm_dp_tunnel_ref_get(primary_crtc_state->dp_tunnel_ref.tunnel, 4575 &secondary_crtc_state->dp_tunnel_ref); 4576 4577 copy_joiner_crtc_state_nomodeset(state, secondary_crtc); 4578 4579 secondary_crtc_state->uapi.mode_changed = primary_crtc_state->uapi.mode_changed; 4580 secondary_crtc_state->uapi.connectors_changed = primary_crtc_state->uapi.connectors_changed; 4581 secondary_crtc_state->uapi.active_changed = primary_crtc_state->uapi.active_changed; 4582 4583 WARN_ON(primary_crtc_state->joiner_pipes != 4584 secondary_crtc_state->joiner_pipes); 4585 4586 return 0; 4587 } 4588 4589 static int 4590 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 4591 struct intel_crtc *crtc) 4592 { 4593 struct intel_display *display = to_intel_display(state); 4594 struct intel_crtc_state *crtc_state = 4595 intel_atomic_get_new_crtc_state(state, crtc); 4596 struct intel_crtc_state *saved_state; 4597 4598 saved_state = intel_crtc_state_alloc(crtc); 4599 if (!saved_state) 4600 return -ENOMEM; 4601 4602 /* free the old crtc_state->hw members */ 4603 intel_crtc_free_hw_state(crtc_state); 4604 4605 intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state); 4606 4607 /* FIXME: before the switch to atomic started, a new pipe_config was 4608 * kzalloc'd. Code that depends on any field being zero should be 4609 * fixed, so that the crtc_state can be safely duplicated. For now, 4610 * only fields that are know to not cause problems are preserved. */ 4611 4612 saved_state->uapi = crtc_state->uapi; 4613 saved_state->inherited = crtc_state->inherited; 4614 saved_state->scaler_state = crtc_state->scaler_state; 4615 saved_state->intel_dpll = crtc_state->intel_dpll; 4616 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 4617 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 4618 sizeof(saved_state->icl_port_dplls)); 4619 saved_state->crc_enabled = crtc_state->crc_enabled; 4620 if (display->platform.g4x || 4621 display->platform.valleyview || display->platform.cherryview) 4622 saved_state->wm = crtc_state->wm; 4623 4624 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 4625 kfree(saved_state); 4626 4627 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 4628 4629 return 0; 4630 } 4631 4632 static int 4633 intel_modeset_pipe_config(struct intel_atomic_state *state, 4634 struct intel_crtc *crtc, 4635 const struct intel_link_bw_limits *limits) 4636 { 4637 struct intel_display *display = to_intel_display(crtc); 4638 struct intel_crtc_state *crtc_state = 4639 intel_atomic_get_new_crtc_state(state, crtc); 4640 struct drm_connector *connector; 4641 struct drm_connector_state *connector_state; 4642 int pipe_src_w, pipe_src_h; 4643 int base_bpp, ret, i; 4644 4645 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; 4646 4647 crtc_state->framestart_delay = 1; 4648 4649 /* 4650 * Sanitize sync polarity flags based on requested ones. If neither 4651 * positive or negative polarity is requested, treat this as meaning 4652 * negative polarity. 4653 */ 4654 if (!(crtc_state->hw.adjusted_mode.flags & 4655 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 4656 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 4657 4658 if (!(crtc_state->hw.adjusted_mode.flags & 4659 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 4660 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 4661 4662 ret = compute_baseline_pipe_bpp(state, crtc); 4663 if (ret) 4664 return ret; 4665 4666 crtc_state->dsc.compression_enabled_on_link = limits->link_dsc_pipes & BIT(crtc->pipe); 4667 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; 4668 4669 if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) { 4670 drm_dbg_kms(display->drm, 4671 "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n", 4672 crtc->base.base.id, crtc->base.name, 4673 FXP_Q4_ARGS(crtc_state->max_link_bpp_x16)); 4674 crtc_state->bw_constrained = true; 4675 } 4676 4677 base_bpp = crtc_state->pipe_bpp; 4678 4679 /* 4680 * Determine the real pipe dimensions. Note that stereo modes can 4681 * increase the actual pipe size due to the frame doubling and 4682 * insertion of additional space for blanks between the frame. This 4683 * is stored in the crtc timings. We use the requested mode to do this 4684 * computation to clearly distinguish it from the adjusted mode, which 4685 * can be changed by the connectors in the below retry loop. 4686 */ 4687 drm_mode_get_hv_timing(&crtc_state->hw.mode, 4688 &pipe_src_w, &pipe_src_h); 4689 drm_rect_init(&crtc_state->pipe_src, 0, 0, 4690 pipe_src_w, pipe_src_h); 4691 4692 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4693 struct intel_encoder *encoder = 4694 to_intel_encoder(connector_state->best_encoder); 4695 4696 if (connector_state->crtc != &crtc->base) 4697 continue; 4698 4699 if (!check_single_encoder_cloning(state, crtc, encoder)) { 4700 drm_dbg_kms(display->drm, 4701 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", 4702 encoder->base.base.id, encoder->base.name); 4703 return -EINVAL; 4704 } 4705 4706 /* 4707 * Determine output_types before calling the .compute_config() 4708 * hooks so that the hooks can use this information safely. 4709 */ 4710 if (encoder->compute_output_type) 4711 crtc_state->output_types |= 4712 BIT(encoder->compute_output_type(encoder, crtc_state, 4713 connector_state)); 4714 else 4715 crtc_state->output_types |= BIT(encoder->type); 4716 } 4717 4718 /* Ensure the port clock defaults are reset when retrying. */ 4719 crtc_state->port_clock = 0; 4720 crtc_state->pixel_multiplier = 1; 4721 4722 /* Fill in default crtc timings, allow encoders to overwrite them. */ 4723 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, 4724 CRTC_STEREO_DOUBLE); 4725 4726 /* Pass our mode to the connectors and the CRTC to give them a chance to 4727 * adjust it according to limitations or connector properties, and also 4728 * a chance to reject the mode entirely. 4729 */ 4730 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4731 struct intel_encoder *encoder = 4732 to_intel_encoder(connector_state->best_encoder); 4733 4734 if (connector_state->crtc != &crtc->base) 4735 continue; 4736 4737 ret = encoder->compute_config(encoder, crtc_state, 4738 connector_state); 4739 if (ret == -EDEADLK) 4740 return ret; 4741 if (ret < 0) { 4742 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] config failure: %d\n", 4743 encoder->base.base.id, encoder->base.name, ret); 4744 return ret; 4745 } 4746 } 4747 4748 /* Set default port clock if not overwritten by the encoder. Needs to be 4749 * done afterwards in case the encoder adjusts the mode. */ 4750 if (!crtc_state->port_clock) 4751 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock 4752 * crtc_state->pixel_multiplier; 4753 4754 ret = intel_crtc_compute_config(state, crtc); 4755 if (ret == -EDEADLK) 4756 return ret; 4757 if (ret < 0) { 4758 drm_dbg_kms(display->drm, "[CRTC:%d:%s] config failure: %d\n", 4759 crtc->base.base.id, crtc->base.name, ret); 4760 return ret; 4761 } 4762 4763 /* Dithering seems to not pass-through bits correctly when it should, so 4764 * only enable it on 6bpc panels and when its not a compliance 4765 * test requesting 6bpc video pattern. 4766 */ 4767 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && 4768 !crtc_state->dither_force_disable; 4769 drm_dbg_kms(display->drm, 4770 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 4771 crtc->base.base.id, crtc->base.name, 4772 base_bpp, crtc_state->pipe_bpp, crtc_state->dither); 4773 4774 return 0; 4775 } 4776 4777 static int 4778 intel_modeset_pipe_config_late(struct intel_atomic_state *state, 4779 struct intel_crtc *crtc) 4780 { 4781 struct intel_crtc_state *crtc_state = 4782 intel_atomic_get_new_crtc_state(state, crtc); 4783 struct drm_connector_state *conn_state; 4784 struct drm_connector *connector; 4785 int i; 4786 4787 for_each_new_connector_in_state(&state->base, connector, 4788 conn_state, i) { 4789 struct intel_encoder *encoder = 4790 to_intel_encoder(conn_state->best_encoder); 4791 int ret; 4792 4793 if (conn_state->crtc != &crtc->base || 4794 !encoder->compute_config_late) 4795 continue; 4796 4797 ret = encoder->compute_config_late(encoder, crtc_state, 4798 conn_state); 4799 if (ret) 4800 return ret; 4801 } 4802 4803 return 0; 4804 } 4805 4806 bool intel_fuzzy_clock_check(int clock1, int clock2) 4807 { 4808 int diff; 4809 4810 if (clock1 == clock2) 4811 return true; 4812 4813 if (!clock1 || !clock2) 4814 return false; 4815 4816 diff = abs(clock1 - clock2); 4817 4818 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 4819 return true; 4820 4821 return false; 4822 } 4823 4824 static bool 4825 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 4826 const struct intel_link_m_n *m2_n2) 4827 { 4828 return m_n->tu == m2_n2->tu && 4829 m_n->data_m == m2_n2->data_m && 4830 m_n->data_n == m2_n2->data_n && 4831 m_n->link_m == m2_n2->link_m && 4832 m_n->link_n == m2_n2->link_n; 4833 } 4834 4835 static bool 4836 intel_compare_infoframe(const union hdmi_infoframe *a, 4837 const union hdmi_infoframe *b) 4838 { 4839 return memcmp(a, b, sizeof(*a)) == 0; 4840 } 4841 4842 static bool 4843 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 4844 const struct drm_dp_vsc_sdp *b) 4845 { 4846 return a->pixelformat == b->pixelformat && 4847 a->colorimetry == b->colorimetry && 4848 a->bpc == b->bpc && 4849 a->dynamic_range == b->dynamic_range && 4850 a->content_type == b->content_type; 4851 } 4852 4853 static bool 4854 intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a, 4855 const struct drm_dp_as_sdp *b) 4856 { 4857 return a->vtotal == b->vtotal && 4858 a->target_rr == b->target_rr && 4859 a->duration_incr_ms == b->duration_incr_ms && 4860 a->duration_decr_ms == b->duration_decr_ms && 4861 a->mode == b->mode; 4862 } 4863 4864 static bool 4865 intel_compare_buffer(const u8 *a, const u8 *b, size_t len) 4866 { 4867 return memcmp(a, b, len) == 0; 4868 } 4869 4870 static void __printf(5, 6) 4871 pipe_config_mismatch(struct drm_printer *p, bool fastset, 4872 const struct intel_crtc *crtc, 4873 const char *name, const char *format, ...) 4874 { 4875 struct va_format vaf; 4876 va_list args; 4877 4878 va_start(args, format); 4879 vaf.fmt = format; 4880 vaf.va = &args; 4881 4882 if (fastset) 4883 drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", 4884 crtc->base.base.id, crtc->base.name, name, &vaf); 4885 else 4886 drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n", 4887 crtc->base.base.id, crtc->base.name, name, &vaf); 4888 4889 va_end(args); 4890 } 4891 4892 static void 4893 pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset, 4894 const struct intel_crtc *crtc, 4895 const char *name, 4896 const union hdmi_infoframe *a, 4897 const union hdmi_infoframe *b) 4898 { 4899 struct intel_display *display = to_intel_display(crtc); 4900 const char *loglevel; 4901 4902 if (fastset) { 4903 if (!drm_debug_enabled(DRM_UT_KMS)) 4904 return; 4905 4906 loglevel = KERN_DEBUG; 4907 } else { 4908 loglevel = KERN_ERR; 4909 } 4910 4911 pipe_config_mismatch(p, fastset, crtc, name, "infoframe"); 4912 4913 drm_printf(p, "expected:\n"); 4914 hdmi_infoframe_log(loglevel, display->drm->dev, a); 4915 drm_printf(p, "found:\n"); 4916 hdmi_infoframe_log(loglevel, display->drm->dev, b); 4917 } 4918 4919 static void 4920 pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset, 4921 const struct intel_crtc *crtc, 4922 const char *name, 4923 const struct drm_dp_vsc_sdp *a, 4924 const struct drm_dp_vsc_sdp *b) 4925 { 4926 pipe_config_mismatch(p, fastset, crtc, name, "dp vsc sdp"); 4927 4928 drm_printf(p, "expected:\n"); 4929 drm_dp_vsc_sdp_log(p, a); 4930 drm_printf(p, "found:\n"); 4931 drm_dp_vsc_sdp_log(p, b); 4932 } 4933 4934 static void 4935 pipe_config_dp_as_sdp_mismatch(struct drm_printer *p, bool fastset, 4936 const struct intel_crtc *crtc, 4937 const char *name, 4938 const struct drm_dp_as_sdp *a, 4939 const struct drm_dp_as_sdp *b) 4940 { 4941 pipe_config_mismatch(p, fastset, crtc, name, "dp as sdp"); 4942 4943 drm_printf(p, "expected:\n"); 4944 drm_dp_as_sdp_log(p, a); 4945 drm_printf(p, "found:\n"); 4946 drm_dp_as_sdp_log(p, b); 4947 } 4948 4949 /* Returns the length up to and including the last differing byte */ 4950 static size_t 4951 memcmp_diff_len(const u8 *a, const u8 *b, size_t len) 4952 { 4953 int i; 4954 4955 for (i = len - 1; i >= 0; i--) { 4956 if (a[i] != b[i]) 4957 return i + 1; 4958 } 4959 4960 return 0; 4961 } 4962 4963 static void 4964 pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset, 4965 const struct intel_crtc *crtc, 4966 const char *name, 4967 const u8 *a, const u8 *b, size_t len) 4968 { 4969 pipe_config_mismatch(p, fastset, crtc, name, "buffer"); 4970 4971 /* only dump up to the last difference */ 4972 len = memcmp_diff_len(a, b, len); 4973 4974 drm_print_hex_dump(p, "expected: ", a, len); 4975 drm_print_hex_dump(p, "found: ", b, len); 4976 } 4977 4978 static void 4979 pipe_config_pll_mismatch(struct drm_printer *p, bool fastset, 4980 const struct intel_crtc *crtc, 4981 const char *name, 4982 const struct intel_dpll_hw_state *a, 4983 const struct intel_dpll_hw_state *b) 4984 { 4985 struct intel_display *display = to_intel_display(crtc); 4986 4987 pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */ 4988 4989 drm_printf(p, "expected:\n"); 4990 intel_dpll_dump_hw_state(display, p, a); 4991 drm_printf(p, "found:\n"); 4992 intel_dpll_dump_hw_state(display, p, b); 4993 } 4994 4995 static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_state) 4996 { 4997 struct intel_display *display = to_intel_display(old_crtc_state); 4998 4999 /* 5000 * Allow fastboot to fix up vblank delay (handled via LRR 5001 * codepaths), a bit dodgy as the registers aren't 5002 * double buffered but seems to be working more or less... 5003 * 5004 * Also allow this when the VRR timing generator is always on, 5005 * and optimized guardband is used. In such cases, 5006 * vblank delay may vary even without inherited state, but it's 5007 * still safe as VRR guardband is still same. 5008 */ 5009 return HAS_LRR(display) && 5010 (old_crtc_state->inherited || intel_vrr_always_use_vrr_tg(display)) && 5011 !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI); 5012 } 5013 5014 static void 5015 pipe_config_lt_phy_pll_mismatch(struct drm_printer *p, bool fastset, 5016 const struct intel_crtc *crtc, 5017 const char *name, 5018 const struct intel_lt_phy_pll_state *a, 5019 const struct intel_lt_phy_pll_state *b) 5020 { 5021 struct intel_display *display = to_intel_display(crtc); 5022 char *chipname = "LTPHY"; 5023 5024 pipe_config_mismatch(p, fastset, crtc, name, chipname); 5025 5026 drm_printf(p, "expected:\n"); 5027 intel_lt_phy_dump_hw_state(display, a); 5028 drm_printf(p, "found:\n"); 5029 intel_lt_phy_dump_hw_state(display, b); 5030 } 5031 5032 bool 5033 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 5034 const struct intel_crtc_state *pipe_config, 5035 bool fastset) 5036 { 5037 struct intel_display *display = to_intel_display(current_config); 5038 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 5039 struct drm_printer p; 5040 u32 exclude_infoframes = 0; 5041 bool ret = true; 5042 5043 if (fastset) 5044 p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL); 5045 else 5046 p = drm_err_printer(display->drm, NULL); 5047 5048 #define PIPE_CONF_CHECK_X(name) do { \ 5049 if (current_config->name != pipe_config->name) { \ 5050 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5051 __stringify(name) " is bool"); \ 5052 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5053 "(expected 0x%08x, found 0x%08x)", \ 5054 current_config->name, \ 5055 pipe_config->name); \ 5056 ret = false; \ 5057 } \ 5058 } while (0) 5059 5060 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 5061 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 5062 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5063 __stringify(name) " is bool"); \ 5064 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5065 "(expected 0x%08x, found 0x%08x)", \ 5066 current_config->name & (mask), \ 5067 pipe_config->name & (mask)); \ 5068 ret = false; \ 5069 } \ 5070 } while (0) 5071 5072 #define PIPE_CONF_CHECK_I(name) do { \ 5073 if (current_config->name != pipe_config->name) { \ 5074 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5075 __stringify(name) " is bool"); \ 5076 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5077 "(expected %i, found %i)", \ 5078 current_config->name, \ 5079 pipe_config->name); \ 5080 ret = false; \ 5081 } \ 5082 } while (0) 5083 5084 #define PIPE_CONF_CHECK_LLI(name) do { \ 5085 if (current_config->name != pipe_config->name) { \ 5086 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5087 "(expected %lli, found %lli)", \ 5088 current_config->name, \ 5089 pipe_config->name); \ 5090 ret = false; \ 5091 } \ 5092 } while (0) 5093 5094 #define PIPE_CONF_CHECK_BOOL(name) do { \ 5095 if (current_config->name != pipe_config->name) { \ 5096 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ 5097 __stringify(name) " is not bool"); \ 5098 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5099 "(expected %s, found %s)", \ 5100 str_yes_no(current_config->name), \ 5101 str_yes_no(pipe_config->name)); \ 5102 ret = false; \ 5103 } \ 5104 } while (0) 5105 5106 #define PIPE_CONF_CHECK_P(name) do { \ 5107 if (current_config->name != pipe_config->name) { \ 5108 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5109 "(expected %p, found %p)", \ 5110 current_config->name, \ 5111 pipe_config->name); \ 5112 ret = false; \ 5113 } \ 5114 } while (0) 5115 5116 #define PIPE_CONF_CHECK_M_N(name) do { \ 5117 if (!intel_compare_link_m_n(¤t_config->name, \ 5118 &pipe_config->name)) { \ 5119 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5120 "(expected tu %i data %i/%i link %i/%i, " \ 5121 "found tu %i, data %i/%i link %i/%i)", \ 5122 current_config->name.tu, \ 5123 current_config->name.data_m, \ 5124 current_config->name.data_n, \ 5125 current_config->name.link_m, \ 5126 current_config->name.link_n, \ 5127 pipe_config->name.tu, \ 5128 pipe_config->name.data_m, \ 5129 pipe_config->name.data_n, \ 5130 pipe_config->name.link_m, \ 5131 pipe_config->name.link_n); \ 5132 ret = false; \ 5133 } \ 5134 } while (0) 5135 5136 #define PIPE_CONF_CHECK_PLL(name) do { \ 5137 if (!intel_dpll_compare_hw_state(display, ¤t_config->name, \ 5138 &pipe_config->name)) { \ 5139 pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5140 ¤t_config->name, \ 5141 &pipe_config->name); \ 5142 ret = false; \ 5143 } \ 5144 } while (0) 5145 5146 #define PIPE_CONF_CHECK_PLL_LT(name) do { \ 5147 if (!intel_lt_phy_pll_compare_hw_state(¤t_config->name, \ 5148 &pipe_config->name)) { \ 5149 pipe_config_lt_phy_pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5150 ¤t_config->name, \ 5151 &pipe_config->name); \ 5152 ret = false; \ 5153 } \ 5154 } while (0) 5155 5156 #define PIPE_CONF_CHECK_TIMINGS(name) do { \ 5157 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ 5158 PIPE_CONF_CHECK_I(name.crtc_htotal); \ 5159 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ 5160 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ 5161 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ 5162 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ 5163 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ 5164 if (!fastset || !allow_vblank_delay_fastset(current_config)) \ 5165 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ 5166 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ 5167 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ 5168 if (!fastset || !pipe_config->update_lrr) { \ 5169 PIPE_CONF_CHECK_I(name.crtc_vtotal); \ 5170 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ 5171 } \ 5172 } while (0) 5173 5174 #define PIPE_CONF_CHECK_RECT(name) do { \ 5175 PIPE_CONF_CHECK_I(name.x1); \ 5176 PIPE_CONF_CHECK_I(name.x2); \ 5177 PIPE_CONF_CHECK_I(name.y1); \ 5178 PIPE_CONF_CHECK_I(name.y2); \ 5179 } while (0) 5180 5181 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 5182 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 5183 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5184 "(%x) (expected %i, found %i)", \ 5185 (mask), \ 5186 current_config->name & (mask), \ 5187 pipe_config->name & (mask)); \ 5188 ret = false; \ 5189 } \ 5190 } while (0) 5191 5192 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 5193 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 5194 &pipe_config->infoframes.name)) { \ 5195 pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \ 5196 ¤t_config->infoframes.name, \ 5197 &pipe_config->infoframes.name); \ 5198 ret = false; \ 5199 } \ 5200 } while (0) 5201 5202 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 5203 if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 5204 &pipe_config->infoframes.name)) { \ 5205 pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5206 ¤t_config->infoframes.name, \ 5207 &pipe_config->infoframes.name); \ 5208 ret = false; \ 5209 } \ 5210 } while (0) 5211 5212 #define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \ 5213 if (!intel_compare_dp_as_sdp(¤t_config->infoframes.name, \ 5214 &pipe_config->infoframes.name)) { \ 5215 pipe_config_dp_as_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5216 ¤t_config->infoframes.name, \ 5217 &pipe_config->infoframes.name); \ 5218 ret = false; \ 5219 } \ 5220 } while (0) 5221 5222 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \ 5223 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ 5224 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ 5225 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ 5226 pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \ 5227 current_config->name, \ 5228 pipe_config->name, \ 5229 (len)); \ 5230 ret = false; \ 5231 } \ 5232 } while (0) 5233 5234 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \ 5235 if (current_config->gamma_mode == pipe_config->gamma_mode && \ 5236 !intel_color_lut_equal(current_config, \ 5237 current_config->lut, pipe_config->lut, \ 5238 is_pre_csc_lut)) { \ 5239 pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \ 5240 "hw_state doesn't match sw_state"); \ 5241 ret = false; \ 5242 } \ 5243 } while (0) 5244 5245 #define PIPE_CONF_CHECK_CSC(name) do { \ 5246 PIPE_CONF_CHECK_X(name.preoff[0]); \ 5247 PIPE_CONF_CHECK_X(name.preoff[1]); \ 5248 PIPE_CONF_CHECK_X(name.preoff[2]); \ 5249 PIPE_CONF_CHECK_X(name.coeff[0]); \ 5250 PIPE_CONF_CHECK_X(name.coeff[1]); \ 5251 PIPE_CONF_CHECK_X(name.coeff[2]); \ 5252 PIPE_CONF_CHECK_X(name.coeff[3]); \ 5253 PIPE_CONF_CHECK_X(name.coeff[4]); \ 5254 PIPE_CONF_CHECK_X(name.coeff[5]); \ 5255 PIPE_CONF_CHECK_X(name.coeff[6]); \ 5256 PIPE_CONF_CHECK_X(name.coeff[7]); \ 5257 PIPE_CONF_CHECK_X(name.coeff[8]); \ 5258 PIPE_CONF_CHECK_X(name.postoff[0]); \ 5259 PIPE_CONF_CHECK_X(name.postoff[1]); \ 5260 PIPE_CONF_CHECK_X(name.postoff[2]); \ 5261 } while (0) 5262 5263 #define PIPE_CONF_QUIRK(quirk) \ 5264 ((current_config->quirks | pipe_config->quirks) & (quirk)) 5265 5266 PIPE_CONF_CHECK_BOOL(hw.enable); 5267 PIPE_CONF_CHECK_BOOL(hw.active); 5268 5269 PIPE_CONF_CHECK_I(cpu_transcoder); 5270 PIPE_CONF_CHECK_I(mst_master_transcoder); 5271 5272 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 5273 PIPE_CONF_CHECK_I(fdi_lanes); 5274 PIPE_CONF_CHECK_M_N(fdi_m_n); 5275 5276 PIPE_CONF_CHECK_I(lane_count); 5277 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 5278 5279 PIPE_CONF_CHECK_I(min_hblank); 5280 5281 if (HAS_DOUBLE_BUFFERED_M_N(display)) { 5282 if (!fastset || !pipe_config->update_m_n) 5283 PIPE_CONF_CHECK_M_N(dp_m_n); 5284 } else { 5285 PIPE_CONF_CHECK_M_N(dp_m_n); 5286 PIPE_CONF_CHECK_M_N(dp_m2_n2); 5287 } 5288 5289 PIPE_CONF_CHECK_X(output_types); 5290 5291 PIPE_CONF_CHECK_I(framestart_delay); 5292 PIPE_CONF_CHECK_I(msa_timing_delay); 5293 5294 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); 5295 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); 5296 5297 PIPE_CONF_CHECK_I(pixel_multiplier); 5298 5299 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5300 DRM_MODE_FLAG_INTERLACE); 5301 5302 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 5303 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5304 DRM_MODE_FLAG_PHSYNC); 5305 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5306 DRM_MODE_FLAG_NHSYNC); 5307 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5308 DRM_MODE_FLAG_PVSYNC); 5309 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5310 DRM_MODE_FLAG_NVSYNC); 5311 } 5312 5313 PIPE_CONF_CHECK_I(output_format); 5314 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 5315 if ((DISPLAY_VER(display) < 8 && !display->platform.haswell) || 5316 display->platform.valleyview || display->platform.cherryview) 5317 PIPE_CONF_CHECK_BOOL(limited_color_range); 5318 5319 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 5320 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 5321 PIPE_CONF_CHECK_BOOL(has_infoframe); 5322 PIPE_CONF_CHECK_BOOL(enhanced_framing); 5323 PIPE_CONF_CHECK_BOOL(fec_enable); 5324 5325 if (!fastset) { 5326 PIPE_CONF_CHECK_BOOL(has_audio); 5327 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5328 } 5329 5330 PIPE_CONF_CHECK_X(gmch_pfit.control); 5331 /* pfit ratios are autocomputed by the hw on gen4+ */ 5332 if (DISPLAY_VER(display) < 4) 5333 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 5334 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 5335 5336 /* 5337 * Changing the EDP transcoder input mux 5338 * (A_ONOFF vs. A_ON) requires a full modeset. 5339 */ 5340 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 5341 5342 if (!fastset) { 5343 PIPE_CONF_CHECK_RECT(pipe_src); 5344 5345 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 5346 PIPE_CONF_CHECK_RECT(pch_pfit.dst); 5347 5348 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 5349 PIPE_CONF_CHECK_I(pixel_rate); 5350 PIPE_CONF_CHECK_BOOL(hw.casf_params.casf_enable); 5351 PIPE_CONF_CHECK_I(hw.casf_params.win_size); 5352 PIPE_CONF_CHECK_I(hw.casf_params.strength); 5353 5354 PIPE_CONF_CHECK_X(gamma_mode); 5355 if (display->platform.cherryview) 5356 PIPE_CONF_CHECK_X(cgm_mode); 5357 else 5358 PIPE_CONF_CHECK_X(csc_mode); 5359 PIPE_CONF_CHECK_BOOL(gamma_enable); 5360 PIPE_CONF_CHECK_BOOL(csc_enable); 5361 PIPE_CONF_CHECK_BOOL(wgc_enable); 5362 5363 PIPE_CONF_CHECK_I(linetime); 5364 PIPE_CONF_CHECK_I(ips_linetime); 5365 5366 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); 5367 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); 5368 5369 PIPE_CONF_CHECK_CSC(csc); 5370 PIPE_CONF_CHECK_CSC(output_csc); 5371 } 5372 5373 PIPE_CONF_CHECK_BOOL(double_wide); 5374 5375 if (display->dpll.mgr) 5376 PIPE_CONF_CHECK_P(intel_dpll); 5377 5378 /* FIXME convert everything over the dpll_mgr */ 5379 if (display->dpll.mgr || HAS_GMCH(display)) 5380 PIPE_CONF_CHECK_PLL(dpll_hw_state); 5381 5382 /* FIXME convert MTL+ platforms over to dpll_mgr */ 5383 if (HAS_LT_PHY(display)) 5384 PIPE_CONF_CHECK_PLL_LT(dpll_hw_state.ltpll); 5385 5386 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 5387 PIPE_CONF_CHECK_X(dsi_pll.div); 5388 5389 if (display->platform.g4x || DISPLAY_VER(display) >= 5) 5390 PIPE_CONF_CHECK_I(pipe_bpp); 5391 5392 if (!fastset || !pipe_config->update_m_n) { 5393 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); 5394 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); 5395 } 5396 PIPE_CONF_CHECK_I(port_clock); 5397 5398 PIPE_CONF_CHECK_I(min_voltage_level); 5399 5400 if (current_config->has_psr || pipe_config->has_psr) 5401 exclude_infoframes |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 5402 5403 if (current_config->vrr.enable || pipe_config->vrr.enable) 5404 exclude_infoframes |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC); 5405 5406 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, ~exclude_infoframes); 5407 PIPE_CONF_CHECK_X(infoframes.gcp); 5408 PIPE_CONF_CHECK_INFOFRAME(avi); 5409 PIPE_CONF_CHECK_INFOFRAME(spd); 5410 PIPE_CONF_CHECK_INFOFRAME(hdmi); 5411 if (!fastset) { 5412 PIPE_CONF_CHECK_INFOFRAME(drm); 5413 PIPE_CONF_CHECK_DP_AS_SDP(as_sdp); 5414 } 5415 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 5416 5417 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 5418 PIPE_CONF_CHECK_I(master_transcoder); 5419 PIPE_CONF_CHECK_X(joiner_pipes); 5420 5421 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable); 5422 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb); 5423 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422); 5424 PIPE_CONF_CHECK_BOOL(dsc.config.native_422); 5425 PIPE_CONF_CHECK_BOOL(dsc.config.native_420); 5426 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable); 5427 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth); 5428 PIPE_CONF_CHECK_I(dsc.config.bits_per_component); 5429 PIPE_CONF_CHECK_I(dsc.config.pic_width); 5430 PIPE_CONF_CHECK_I(dsc.config.pic_height); 5431 PIPE_CONF_CHECK_I(dsc.config.slice_width); 5432 PIPE_CONF_CHECK_I(dsc.config.slice_height); 5433 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay); 5434 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay); 5435 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval); 5436 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval); 5437 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value); 5438 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset); 5439 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp); 5440 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp); 5441 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset); 5442 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset); 5443 PIPE_CONF_CHECK_I(dsc.config.initial_offset); 5444 PIPE_CONF_CHECK_I(dsc.config.final_offset); 5445 PIPE_CONF_CHECK_I(dsc.config.rc_model_size); 5446 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0); 5447 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1); 5448 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size); 5449 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset); 5450 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset); 5451 5452 PIPE_CONF_CHECK_BOOL(dsc.compression_enable); 5453 PIPE_CONF_CHECK_I(dsc.num_streams); 5454 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16); 5455 5456 PIPE_CONF_CHECK_BOOL(splitter.enable); 5457 PIPE_CONF_CHECK_I(splitter.link_count); 5458 PIPE_CONF_CHECK_I(splitter.pixel_overlap); 5459 5460 if (!fastset) { 5461 PIPE_CONF_CHECK_BOOL(vrr.enable); 5462 PIPE_CONF_CHECK_I(vrr.vmin); 5463 PIPE_CONF_CHECK_I(vrr.vmax); 5464 PIPE_CONF_CHECK_I(vrr.flipline); 5465 PIPE_CONF_CHECK_I(vrr.vsync_start); 5466 PIPE_CONF_CHECK_I(vrr.vsync_end); 5467 PIPE_CONF_CHECK_LLI(cmrr.cmrr_m); 5468 PIPE_CONF_CHECK_LLI(cmrr.cmrr_n); 5469 PIPE_CONF_CHECK_BOOL(cmrr.enable); 5470 } 5471 5472 if (!fastset || intel_vrr_always_use_vrr_tg(display)) { 5473 PIPE_CONF_CHECK_I(vrr.pipeline_full); 5474 PIPE_CONF_CHECK_I(vrr.guardband); 5475 } 5476 5477 PIPE_CONF_CHECK_I(set_context_latency); 5478 5479 #undef PIPE_CONF_CHECK_X 5480 #undef PIPE_CONF_CHECK_I 5481 #undef PIPE_CONF_CHECK_LLI 5482 #undef PIPE_CONF_CHECK_BOOL 5483 #undef PIPE_CONF_CHECK_P 5484 #undef PIPE_CONF_CHECK_FLAGS 5485 #undef PIPE_CONF_CHECK_COLOR_LUT 5486 #undef PIPE_CONF_CHECK_TIMINGS 5487 #undef PIPE_CONF_CHECK_RECT 5488 #undef PIPE_CONF_QUIRK 5489 5490 return ret; 5491 } 5492 5493 static void 5494 intel_verify_planes(struct intel_atomic_state *state) 5495 { 5496 struct intel_plane *plane; 5497 const struct intel_plane_state *plane_state; 5498 int i; 5499 5500 for_each_new_intel_plane_in_state(state, plane, 5501 plane_state, i) 5502 assert_plane(plane, plane_state->is_y_plane || 5503 plane_state->uapi.visible); 5504 } 5505 5506 static int intel_modeset_pipe(struct intel_atomic_state *state, 5507 struct intel_crtc_state *crtc_state, 5508 const char *reason) 5509 { 5510 struct intel_display *display = to_intel_display(state); 5511 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5512 int ret; 5513 5514 drm_dbg_kms(display->drm, "[CRTC:%d:%s] Full modeset due to %s\n", 5515 crtc->base.base.id, crtc->base.name, reason); 5516 5517 ret = drm_atomic_add_affected_connectors(&state->base, 5518 &crtc->base); 5519 if (ret) 5520 return ret; 5521 5522 ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc); 5523 if (ret) 5524 return ret; 5525 5526 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); 5527 if (ret) 5528 return ret; 5529 5530 ret = intel_plane_add_affected(state, crtc); 5531 if (ret) 5532 return ret; 5533 5534 crtc_state->uapi.mode_changed = true; 5535 5536 return 0; 5537 } 5538 5539 /** 5540 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes 5541 * @state: intel atomic state 5542 * @reason: the reason for the full modeset 5543 * @mask: mask of pipes to modeset 5544 * 5545 * Add pipes in @mask to @state and force a full modeset on the enabled ones 5546 * due to the description in @reason. 5547 * This function can be called only before new plane states are computed. 5548 * 5549 * Returns 0 in case of success, negative error code otherwise. 5550 */ 5551 int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state, 5552 const char *reason, u8 mask) 5553 { 5554 struct intel_display *display = to_intel_display(state); 5555 struct intel_crtc *crtc; 5556 5557 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mask) { 5558 struct intel_crtc_state *crtc_state; 5559 int ret; 5560 5561 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5562 if (IS_ERR(crtc_state)) 5563 return PTR_ERR(crtc_state); 5564 5565 if (!crtc_state->hw.enable || 5566 intel_crtc_needs_modeset(crtc_state)) 5567 continue; 5568 5569 ret = intel_modeset_pipe(state, crtc_state, reason); 5570 if (ret) 5571 return ret; 5572 } 5573 5574 return 0; 5575 } 5576 5577 static void 5578 intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state) 5579 { 5580 crtc_state->uapi.mode_changed = true; 5581 5582 crtc_state->update_pipe = false; 5583 crtc_state->update_m_n = false; 5584 crtc_state->update_lrr = false; 5585 } 5586 5587 /** 5588 * intel_modeset_all_pipes_late - force a full modeset on all pipes 5589 * @state: intel atomic state 5590 * @reason: the reason for the full modeset 5591 * 5592 * Add all pipes to @state and force a full modeset on the active ones due to 5593 * the description in @reason. 5594 * This function can be called only after new plane states are computed already. 5595 * 5596 * Returns 0 in case of success, negative error code otherwise. 5597 */ 5598 int intel_modeset_all_pipes_late(struct intel_atomic_state *state, 5599 const char *reason) 5600 { 5601 struct intel_display *display = to_intel_display(state); 5602 struct intel_crtc *crtc; 5603 5604 for_each_intel_crtc(display->drm, crtc) { 5605 struct intel_crtc_state *crtc_state; 5606 int ret; 5607 5608 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5609 if (IS_ERR(crtc_state)) 5610 return PTR_ERR(crtc_state); 5611 5612 if (!crtc_state->hw.active || 5613 intel_crtc_needs_modeset(crtc_state)) 5614 continue; 5615 5616 ret = intel_modeset_pipe(state, crtc_state, reason); 5617 if (ret) 5618 return ret; 5619 5620 intel_crtc_flag_modeset(crtc_state); 5621 5622 crtc_state->update_planes |= crtc_state->active_planes; 5623 crtc_state->async_flip_planes = 0; 5624 crtc_state->do_async_flip = false; 5625 } 5626 5627 return 0; 5628 } 5629 5630 int intel_modeset_commit_pipes(struct intel_display *display, 5631 u8 pipe_mask, 5632 struct drm_modeset_acquire_ctx *ctx) 5633 { 5634 struct drm_atomic_state *state; 5635 struct intel_crtc *crtc; 5636 int ret; 5637 5638 state = drm_atomic_state_alloc(display->drm); 5639 if (!state) 5640 return -ENOMEM; 5641 5642 state->acquire_ctx = ctx; 5643 to_intel_atomic_state(state)->internal = true; 5644 5645 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) { 5646 struct intel_crtc_state *crtc_state = 5647 intel_atomic_get_crtc_state(state, crtc); 5648 5649 if (IS_ERR(crtc_state)) { 5650 ret = PTR_ERR(crtc_state); 5651 goto out; 5652 } 5653 5654 crtc_state->uapi.connectors_changed = true; 5655 } 5656 5657 ret = drm_atomic_commit(state); 5658 out: 5659 drm_atomic_state_put(state); 5660 5661 return ret; 5662 } 5663 5664 /* 5665 * This implements the workaround described in the "notes" section of the mode 5666 * set sequence documentation. When going from no pipes or single pipe to 5667 * multiple pipes, and planes are enabled after the pipe, we need to wait at 5668 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 5669 */ 5670 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 5671 { 5672 struct intel_crtc_state *crtc_state; 5673 struct intel_crtc *crtc; 5674 struct intel_crtc_state *first_crtc_state = NULL; 5675 struct intel_crtc_state *other_crtc_state = NULL; 5676 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 5677 int i; 5678 5679 /* look at all crtc's that are going to be enabled in during modeset */ 5680 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5681 if (!crtc_state->hw.active || 5682 !intel_crtc_needs_modeset(crtc_state)) 5683 continue; 5684 5685 if (first_crtc_state) { 5686 other_crtc_state = crtc_state; 5687 break; 5688 } else { 5689 first_crtc_state = crtc_state; 5690 first_pipe = crtc->pipe; 5691 } 5692 } 5693 5694 /* No workaround needed? */ 5695 if (!first_crtc_state) 5696 return 0; 5697 5698 /* w/a possibly needed, check how many crtc's are already enabled. */ 5699 for_each_intel_crtc(state->base.dev, crtc) { 5700 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5701 if (IS_ERR(crtc_state)) 5702 return PTR_ERR(crtc_state); 5703 5704 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 5705 5706 if (!crtc_state->hw.active || 5707 intel_crtc_needs_modeset(crtc_state)) 5708 continue; 5709 5710 /* 2 or more enabled crtcs means no need for w/a */ 5711 if (enabled_pipe != INVALID_PIPE) 5712 return 0; 5713 5714 enabled_pipe = crtc->pipe; 5715 } 5716 5717 if (enabled_pipe != INVALID_PIPE) 5718 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 5719 else if (other_crtc_state) 5720 other_crtc_state->hsw_workaround_pipe = first_pipe; 5721 5722 return 0; 5723 } 5724 5725 u8 intel_calc_enabled_pipes(struct intel_atomic_state *state, 5726 u8 enabled_pipes) 5727 { 5728 const struct intel_crtc_state *crtc_state; 5729 struct intel_crtc *crtc; 5730 int i; 5731 5732 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5733 if (crtc_state->hw.enable) 5734 enabled_pipes |= BIT(crtc->pipe); 5735 else 5736 enabled_pipes &= ~BIT(crtc->pipe); 5737 } 5738 5739 return enabled_pipes; 5740 } 5741 5742 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 5743 u8 active_pipes) 5744 { 5745 const struct intel_crtc_state *crtc_state; 5746 struct intel_crtc *crtc; 5747 int i; 5748 5749 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5750 if (crtc_state->hw.active) 5751 active_pipes |= BIT(crtc->pipe); 5752 else 5753 active_pipes &= ~BIT(crtc->pipe); 5754 } 5755 5756 return active_pipes; 5757 } 5758 5759 static int intel_modeset_checks(struct intel_atomic_state *state) 5760 { 5761 struct intel_display *display = to_intel_display(state); 5762 5763 state->modeset = true; 5764 5765 if (display->platform.haswell) 5766 return hsw_mode_set_planes_workaround(state); 5767 5768 return 0; 5769 } 5770 5771 static bool lrr_params_changed(const struct intel_crtc_state *old_crtc_state, 5772 const struct intel_crtc_state *new_crtc_state) 5773 { 5774 const struct drm_display_mode *old_adjusted_mode = &old_crtc_state->hw.adjusted_mode; 5775 const struct drm_display_mode *new_adjusted_mode = &new_crtc_state->hw.adjusted_mode; 5776 5777 return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start || 5778 old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end || 5779 old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal || 5780 old_crtc_state->set_context_latency != new_crtc_state->set_context_latency; 5781 } 5782 5783 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 5784 struct intel_crtc_state *new_crtc_state) 5785 { 5786 struct intel_display *display = to_intel_display(new_crtc_state); 5787 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 5788 5789 /* only allow LRR when the timings stay within the VRR range */ 5790 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range) 5791 new_crtc_state->update_lrr = false; 5792 5793 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) { 5794 drm_dbg_kms(display->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n", 5795 crtc->base.base.id, crtc->base.name); 5796 } else { 5797 if (allow_vblank_delay_fastset(old_crtc_state)) 5798 new_crtc_state->update_lrr = true; 5799 new_crtc_state->uapi.mode_changed = false; 5800 } 5801 5802 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n, 5803 &new_crtc_state->dp_m_n)) 5804 new_crtc_state->update_m_n = false; 5805 5806 if (!lrr_params_changed(old_crtc_state, new_crtc_state)) 5807 new_crtc_state->update_lrr = false; 5808 5809 if (intel_crtc_needs_modeset(new_crtc_state)) 5810 intel_crtc_flag_modeset(new_crtc_state); 5811 else 5812 new_crtc_state->update_pipe = true; 5813 } 5814 5815 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 5816 { 5817 struct intel_display *display = to_intel_display(state); 5818 struct intel_crtc_state __maybe_unused *crtc_state; 5819 struct intel_crtc *crtc; 5820 int i; 5821 5822 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5823 int ret; 5824 5825 ret = intel_crtc_atomic_check(state, crtc); 5826 if (ret) { 5827 drm_dbg_atomic(display->drm, 5828 "[CRTC:%d:%s] atomic driver check failed\n", 5829 crtc->base.base.id, crtc->base.name); 5830 return ret; 5831 } 5832 } 5833 5834 return 0; 5835 } 5836 5837 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 5838 u8 transcoders) 5839 { 5840 const struct intel_crtc_state *new_crtc_state; 5841 struct intel_crtc *crtc; 5842 int i; 5843 5844 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5845 if (new_crtc_state->hw.enable && 5846 transcoders & BIT(new_crtc_state->cpu_transcoder) && 5847 intel_crtc_needs_modeset(new_crtc_state)) 5848 return true; 5849 } 5850 5851 return false; 5852 } 5853 5854 static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 5855 u8 pipes) 5856 { 5857 const struct intel_crtc_state *new_crtc_state; 5858 struct intel_crtc *crtc; 5859 int i; 5860 5861 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5862 if (new_crtc_state->hw.enable && 5863 pipes & BIT(crtc->pipe) && 5864 intel_crtc_needs_modeset(new_crtc_state)) 5865 return true; 5866 } 5867 5868 return false; 5869 } 5870 5871 static int intel_atomic_check_joiner(struct intel_atomic_state *state, 5872 struct intel_crtc *primary_crtc) 5873 { 5874 struct intel_display *display = to_intel_display(state); 5875 struct intel_crtc_state *primary_crtc_state = 5876 intel_atomic_get_new_crtc_state(state, primary_crtc); 5877 struct intel_crtc *secondary_crtc; 5878 5879 if (!primary_crtc_state->joiner_pipes) 5880 return 0; 5881 5882 /* sanity check */ 5883 if (drm_WARN_ON(display->drm, 5884 primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state))) 5885 return -EINVAL; 5886 5887 if (primary_crtc_state->joiner_pipes & ~joiner_pipes(display)) { 5888 drm_dbg_kms(display->drm, 5889 "[CRTC:%d:%s] Cannot act as joiner primary " 5890 "(need 0x%x as pipes, only 0x%x possible)\n", 5891 primary_crtc->base.base.id, primary_crtc->base.name, 5892 primary_crtc_state->joiner_pipes, joiner_pipes(display)); 5893 return -EINVAL; 5894 } 5895 5896 for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc, 5897 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 5898 struct intel_crtc_state *secondary_crtc_state; 5899 int ret; 5900 5901 secondary_crtc_state = intel_atomic_get_crtc_state(&state->base, secondary_crtc); 5902 if (IS_ERR(secondary_crtc_state)) 5903 return PTR_ERR(secondary_crtc_state); 5904 5905 /* primary being enabled, secondary was already configured? */ 5906 if (secondary_crtc_state->uapi.enable) { 5907 drm_dbg_kms(display->drm, 5908 "[CRTC:%d:%s] secondary is enabled as normal CRTC, but " 5909 "[CRTC:%d:%s] claiming this CRTC for joiner.\n", 5910 secondary_crtc->base.base.id, secondary_crtc->base.name, 5911 primary_crtc->base.base.id, primary_crtc->base.name); 5912 return -EINVAL; 5913 } 5914 5915 /* 5916 * The state copy logic assumes the primary crtc gets processed 5917 * before the secondary crtc during the main compute_config loop. 5918 * This works because the crtcs are created in pipe order, 5919 * and the hardware requires primary pipe < secondary pipe as well. 5920 * Should that change we need to rethink the logic. 5921 */ 5922 if (WARN_ON(drm_crtc_index(&primary_crtc->base) > 5923 drm_crtc_index(&secondary_crtc->base))) 5924 return -EINVAL; 5925 5926 drm_dbg_kms(display->drm, 5927 "[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n", 5928 secondary_crtc->base.base.id, secondary_crtc->base.name, 5929 primary_crtc->base.base.id, primary_crtc->base.name); 5930 5931 secondary_crtc_state->joiner_pipes = 5932 primary_crtc_state->joiner_pipes; 5933 5934 ret = copy_joiner_crtc_state_modeset(state, secondary_crtc); 5935 if (ret) 5936 return ret; 5937 } 5938 5939 return 0; 5940 } 5941 5942 static void kill_joiner_secondaries(struct intel_atomic_state *state, 5943 struct intel_crtc *primary_crtc) 5944 { 5945 struct intel_display *display = to_intel_display(state); 5946 struct intel_crtc_state *primary_crtc_state = 5947 intel_atomic_get_new_crtc_state(state, primary_crtc); 5948 struct intel_crtc *secondary_crtc; 5949 5950 for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc, 5951 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 5952 struct intel_crtc_state *secondary_crtc_state = 5953 intel_atomic_get_new_crtc_state(state, secondary_crtc); 5954 5955 secondary_crtc_state->joiner_pipes = 0; 5956 5957 intel_crtc_copy_uapi_to_hw_state_modeset(state, secondary_crtc); 5958 } 5959 5960 primary_crtc_state->joiner_pipes = 0; 5961 } 5962 5963 /** 5964 * DOC: asynchronous flip implementation 5965 * 5966 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 5967 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 5968 * Correspondingly, support is currently added for primary plane only. 5969 * 5970 * Async flip can only change the plane surface address, so anything else 5971 * changing is rejected from the intel_async_flip_check_hw() function. 5972 * Once this check is cleared, flip done interrupt is enabled using 5973 * the intel_crtc_enable_flip_done() function. 5974 * 5975 * As soon as the surface address register is written, flip done interrupt is 5976 * generated and the requested events are sent to the userspace in the interrupt 5977 * handler itself. The timestamp and sequence sent during the flip done event 5978 * correspond to the last vblank and have no relation to the actual time when 5979 * the flip done event was sent. 5980 */ 5981 static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 5982 struct intel_crtc *crtc) 5983 { 5984 struct intel_display *display = to_intel_display(state); 5985 const struct intel_crtc_state *new_crtc_state = 5986 intel_atomic_get_new_crtc_state(state, crtc); 5987 const struct intel_plane_state *old_plane_state; 5988 struct intel_plane_state *new_plane_state; 5989 struct intel_plane *plane; 5990 int i; 5991 5992 if (!new_crtc_state->uapi.async_flip) 5993 return 0; 5994 5995 if (!new_crtc_state->uapi.active) { 5996 drm_dbg_kms(display->drm, 5997 "[CRTC:%d:%s] not active\n", 5998 crtc->base.base.id, crtc->base.name); 5999 return -EINVAL; 6000 } 6001 6002 if (intel_crtc_needs_modeset(new_crtc_state)) { 6003 drm_dbg_kms(display->drm, 6004 "[CRTC:%d:%s] modeset required\n", 6005 crtc->base.base.id, crtc->base.name); 6006 return -EINVAL; 6007 } 6008 6009 /* 6010 * FIXME: joiner+async flip is busted currently. 6011 * Remove this check once the issues are fixed. 6012 */ 6013 if (new_crtc_state->joiner_pipes) { 6014 drm_dbg_kms(display->drm, 6015 "[CRTC:%d:%s] async flip disallowed with joiner\n", 6016 crtc->base.base.id, crtc->base.name); 6017 return -EINVAL; 6018 } 6019 6020 /* FIXME: selective fetch should be disabled for async flips */ 6021 if (new_crtc_state->enable_psr2_sel_fetch) { 6022 drm_dbg_kms(display->drm, 6023 "[CRTC:%d:%s] async flip disallowed with PSR2 selective fetch\n", 6024 crtc->base.base.id, crtc->base.name); 6025 return -EINVAL; 6026 } 6027 6028 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6029 new_plane_state, i) { 6030 if (plane->pipe != crtc->pipe) 6031 continue; 6032 6033 /* 6034 * TODO: Async flip is only supported through the page flip IOCTL 6035 * as of now. So support currently added for primary plane only. 6036 * Support for other planes on platforms on which supports 6037 * this(vlv/chv and icl+) should be added when async flip is 6038 * enabled in the atomic IOCTL path. 6039 */ 6040 if (!plane->async_flip) { 6041 drm_dbg_kms(display->drm, 6042 "[PLANE:%d:%s] async flip not supported\n", 6043 plane->base.base.id, plane->base.name); 6044 return -EINVAL; 6045 } 6046 6047 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 6048 drm_dbg_kms(display->drm, 6049 "[PLANE:%d:%s] no old or new framebuffer\n", 6050 plane->base.base.id, plane->base.name); 6051 return -EINVAL; 6052 } 6053 } 6054 6055 return 0; 6056 } 6057 6058 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 6059 { 6060 struct intel_display *display = to_intel_display(state); 6061 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6062 const struct intel_plane_state *new_plane_state, *old_plane_state; 6063 struct intel_plane *plane; 6064 int i; 6065 6066 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6067 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6068 6069 if (!new_crtc_state->uapi.async_flip) 6070 return 0; 6071 6072 if (!new_crtc_state->hw.active) { 6073 drm_dbg_kms(display->drm, 6074 "[CRTC:%d:%s] not active\n", 6075 crtc->base.base.id, crtc->base.name); 6076 return -EINVAL; 6077 } 6078 6079 if (intel_crtc_needs_modeset(new_crtc_state)) { 6080 drm_dbg_kms(display->drm, 6081 "[CRTC:%d:%s] modeset required\n", 6082 crtc->base.base.id, crtc->base.name); 6083 return -EINVAL; 6084 } 6085 6086 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 6087 drm_dbg_kms(display->drm, 6088 "[CRTC:%d:%s] Active planes cannot be in async flip\n", 6089 crtc->base.base.id, crtc->base.name); 6090 return -EINVAL; 6091 } 6092 6093 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6094 new_plane_state, i) { 6095 if (plane->pipe != crtc->pipe) 6096 continue; 6097 6098 /* 6099 * Only async flip capable planes should be in the state 6100 * if we're really about to ask the hardware to perform 6101 * an async flip. We should never get this far otherwise. 6102 */ 6103 if (drm_WARN_ON(display->drm, 6104 new_crtc_state->do_async_flip && !plane->async_flip)) 6105 return -EINVAL; 6106 6107 /* 6108 * Only check async flip capable planes other planes 6109 * may be involved in the initial commit due to 6110 * the wm0/ddb optimization. 6111 * 6112 * TODO maybe should track which planes actually 6113 * were requested to do the async flip... 6114 */ 6115 if (!plane->async_flip) 6116 continue; 6117 6118 if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->format, 6119 new_plane_state->hw.fb->modifier)) { 6120 drm_dbg_kms(display->drm, 6121 "[PLANE:%d:%s] pixel format %p4cc / modifier 0x%llx does not support async flip\n", 6122 plane->base.base.id, plane->base.name, 6123 &new_plane_state->hw.fb->format->format, 6124 new_plane_state->hw.fb->modifier); 6125 return -EINVAL; 6126 } 6127 6128 /* 6129 * We turn the first async flip request into a sync flip 6130 * so that we can reconfigure the plane (eg. change modifier). 6131 */ 6132 if (!new_crtc_state->do_async_flip) 6133 continue; 6134 6135 if (old_plane_state->view.color_plane[0].mapping_stride != 6136 new_plane_state->view.color_plane[0].mapping_stride) { 6137 drm_dbg_kms(display->drm, 6138 "[PLANE:%d:%s] Stride cannot be changed in async flip\n", 6139 plane->base.base.id, plane->base.name); 6140 return -EINVAL; 6141 } 6142 6143 if (old_plane_state->hw.fb->modifier != 6144 new_plane_state->hw.fb->modifier) { 6145 drm_dbg_kms(display->drm, 6146 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", 6147 plane->base.base.id, plane->base.name); 6148 return -EINVAL; 6149 } 6150 6151 if (old_plane_state->hw.fb->format != 6152 new_plane_state->hw.fb->format) { 6153 drm_dbg_kms(display->drm, 6154 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", 6155 plane->base.base.id, plane->base.name); 6156 return -EINVAL; 6157 } 6158 6159 if (old_plane_state->hw.rotation != 6160 new_plane_state->hw.rotation) { 6161 drm_dbg_kms(display->drm, 6162 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", 6163 plane->base.base.id, plane->base.name); 6164 return -EINVAL; 6165 } 6166 6167 if (skl_plane_aux_dist(old_plane_state, 0) != 6168 skl_plane_aux_dist(new_plane_state, 0)) { 6169 drm_dbg_kms(display->drm, 6170 "[PLANE:%d:%s] AUX_DIST cannot be changed in async flip\n", 6171 plane->base.base.id, plane->base.name); 6172 return -EINVAL; 6173 } 6174 6175 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 6176 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 6177 drm_dbg_kms(display->drm, 6178 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", 6179 plane->base.base.id, plane->base.name); 6180 return -EINVAL; 6181 } 6182 6183 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 6184 drm_dbg_kms(display->drm, 6185 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", 6186 plane->base.base.id, plane->base.name); 6187 return -EINVAL; 6188 } 6189 6190 if (old_plane_state->hw.pixel_blend_mode != 6191 new_plane_state->hw.pixel_blend_mode) { 6192 drm_dbg_kms(display->drm, 6193 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", 6194 plane->base.base.id, plane->base.name); 6195 return -EINVAL; 6196 } 6197 6198 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 6199 drm_dbg_kms(display->drm, 6200 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", 6201 plane->base.base.id, plane->base.name); 6202 return -EINVAL; 6203 } 6204 6205 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 6206 drm_dbg_kms(display->drm, 6207 "[PLANE:%d:%s] Color range cannot be changed in async flip\n", 6208 plane->base.base.id, plane->base.name); 6209 return -EINVAL; 6210 } 6211 6212 /* plane decryption is allow to change only in synchronous flips */ 6213 if (old_plane_state->decrypt != new_plane_state->decrypt) { 6214 drm_dbg_kms(display->drm, 6215 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", 6216 plane->base.base.id, plane->base.name); 6217 return -EINVAL; 6218 } 6219 } 6220 6221 return 0; 6222 } 6223 6224 static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state) 6225 { 6226 struct intel_display *display = to_intel_display(state); 6227 const struct intel_plane_state *plane_state; 6228 struct intel_crtc_state *crtc_state; 6229 struct intel_plane *plane; 6230 struct intel_crtc *crtc; 6231 u8 affected_pipes = 0; 6232 u8 modeset_pipes = 0; 6233 int i; 6234 6235 /* 6236 * Any plane which is in use by the joiner needs its crtc. 6237 * Pull those in first as this will not have happened yet 6238 * if the plane remains disabled according to uapi. 6239 */ 6240 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 6241 crtc = to_intel_crtc(plane_state->hw.crtc); 6242 if (!crtc) 6243 continue; 6244 6245 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6246 if (IS_ERR(crtc_state)) 6247 return PTR_ERR(crtc_state); 6248 } 6249 6250 /* Now pull in all joined crtcs */ 6251 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6252 affected_pipes |= crtc_state->joiner_pipes; 6253 if (intel_crtc_needs_modeset(crtc_state)) 6254 modeset_pipes |= crtc_state->joiner_pipes; 6255 } 6256 6257 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, affected_pipes) { 6258 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6259 if (IS_ERR(crtc_state)) 6260 return PTR_ERR(crtc_state); 6261 } 6262 6263 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, modeset_pipes) { 6264 int ret; 6265 6266 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6267 6268 crtc_state->uapi.mode_changed = true; 6269 6270 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6271 if (ret) 6272 return ret; 6273 6274 ret = intel_plane_add_affected(state, crtc); 6275 if (ret) 6276 return ret; 6277 } 6278 6279 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6280 /* Kill old joiner link, we may re-establish afterwards */ 6281 if (intel_crtc_needs_modeset(crtc_state) && 6282 intel_crtc_is_joiner_primary(crtc_state)) 6283 kill_joiner_secondaries(state, crtc); 6284 } 6285 6286 return 0; 6287 } 6288 6289 static int intel_atomic_check_config(struct intel_atomic_state *state, 6290 struct intel_link_bw_limits *limits, 6291 enum pipe *failed_pipe) 6292 { 6293 struct intel_display *display = to_intel_display(state); 6294 struct intel_crtc_state *new_crtc_state; 6295 struct intel_crtc *crtc; 6296 int ret; 6297 int i; 6298 6299 *failed_pipe = INVALID_PIPE; 6300 6301 ret = intel_joiner_add_affected_crtcs(state); 6302 if (ret) 6303 return ret; 6304 6305 ret = intel_fdi_add_affected_crtcs(state); 6306 if (ret) 6307 return ret; 6308 6309 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6310 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6311 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 6312 copy_joiner_crtc_state_nomodeset(state, crtc); 6313 else 6314 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 6315 continue; 6316 } 6317 6318 if (drm_WARN_ON(display->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6319 continue; 6320 6321 ret = intel_crtc_prepare_cleared_state(state, crtc); 6322 if (ret) 6323 goto fail; 6324 6325 if (!new_crtc_state->hw.enable) 6326 continue; 6327 6328 ret = intel_modeset_pipe_config(state, crtc, limits); 6329 if (ret) 6330 goto fail; 6331 } 6332 6333 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6334 if (!intel_crtc_needs_modeset(new_crtc_state)) 6335 continue; 6336 6337 if (drm_WARN_ON(display->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6338 continue; 6339 6340 if (!new_crtc_state->hw.enable) 6341 continue; 6342 6343 ret = intel_modeset_pipe_config_late(state, crtc); 6344 if (ret) 6345 goto fail; 6346 } 6347 6348 fail: 6349 if (ret) 6350 *failed_pipe = crtc->pipe; 6351 6352 return ret; 6353 } 6354 6355 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state) 6356 { 6357 struct intel_link_bw_limits new_limits; 6358 struct intel_link_bw_limits old_limits; 6359 int ret; 6360 6361 intel_link_bw_init_limits(state, &new_limits); 6362 old_limits = new_limits; 6363 6364 while (true) { 6365 enum pipe failed_pipe; 6366 6367 ret = intel_atomic_check_config(state, &new_limits, 6368 &failed_pipe); 6369 if (ret) { 6370 /* 6371 * The bpp limit for a pipe is below the minimum it supports, set the 6372 * limit to the minimum and recalculate the config. 6373 */ 6374 if (ret == -EINVAL && 6375 intel_link_bw_set_bpp_limit_for_pipe(state, 6376 &old_limits, 6377 &new_limits, 6378 failed_pipe)) 6379 continue; 6380 6381 break; 6382 } 6383 6384 old_limits = new_limits; 6385 6386 ret = intel_link_bw_atomic_check(state, &new_limits); 6387 if (ret != -EAGAIN) 6388 break; 6389 } 6390 6391 return ret; 6392 } 6393 /** 6394 * intel_atomic_check - validate state object 6395 * @dev: drm device 6396 * @_state: state to validate 6397 */ 6398 int intel_atomic_check(struct drm_device *dev, 6399 struct drm_atomic_state *_state) 6400 { 6401 struct intel_display *display = to_intel_display(dev); 6402 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6403 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6404 struct intel_crtc *crtc; 6405 int ret, i; 6406 6407 if (!intel_display_driver_check_access(display)) 6408 return -ENODEV; 6409 6410 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6411 new_crtc_state, i) { 6412 /* 6413 * crtc's state no longer considered to be inherited 6414 * after the first userspace/client initiated commit. 6415 */ 6416 if (!state->internal) 6417 new_crtc_state->inherited = false; 6418 6419 if (new_crtc_state->inherited != old_crtc_state->inherited) 6420 new_crtc_state->uapi.mode_changed = true; 6421 6422 if (new_crtc_state->uapi.scaling_filter != 6423 old_crtc_state->uapi.scaling_filter) 6424 new_crtc_state->uapi.mode_changed = true; 6425 } 6426 6427 intel_vrr_check_modeset(state); 6428 6429 ret = drm_atomic_helper_check_modeset(dev, &state->base); 6430 if (ret) 6431 goto fail; 6432 6433 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6434 ret = intel_async_flip_check_uapi(state, crtc); 6435 if (ret) 6436 return ret; 6437 } 6438 6439 ret = intel_atomic_check_config_and_link(state); 6440 if (ret) 6441 goto fail; 6442 6443 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6444 if (!intel_crtc_needs_modeset(new_crtc_state)) 6445 continue; 6446 6447 if (intel_crtc_is_joiner_secondary(new_crtc_state)) { 6448 drm_WARN_ON(display->drm, new_crtc_state->uapi.enable); 6449 continue; 6450 } 6451 6452 ret = intel_atomic_check_joiner(state, crtc); 6453 if (ret) 6454 goto fail; 6455 } 6456 6457 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6458 new_crtc_state, i) { 6459 if (!intel_crtc_needs_modeset(new_crtc_state)) 6460 continue; 6461 6462 intel_joiner_adjust_pipe_src(new_crtc_state); 6463 6464 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 6465 } 6466 6467 /** 6468 * Check if fastset is allowed by external dependencies like other 6469 * pipes and transcoders. 6470 * 6471 * Right now it only forces a fullmodeset when the MST master 6472 * transcoder did not changed but the pipe of the master transcoder 6473 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 6474 * in case of port synced crtcs, if one of the synced crtcs 6475 * needs a full modeset, all other synced crtcs should be 6476 * forced a full modeset. 6477 */ 6478 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6479 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6480 continue; 6481 6482 if (intel_dp_mst_crtc_needs_modeset(state, crtc)) 6483 intel_crtc_flag_modeset(new_crtc_state); 6484 6485 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6486 enum transcoder master = new_crtc_state->mst_master_transcoder; 6487 6488 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) 6489 intel_crtc_flag_modeset(new_crtc_state); 6490 } 6491 6492 if (is_trans_port_sync_mode(new_crtc_state)) { 6493 u8 trans = new_crtc_state->sync_mode_slaves_mask; 6494 6495 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6496 trans |= BIT(new_crtc_state->master_transcoder); 6497 6498 if (intel_cpu_transcoders_need_modeset(state, trans)) 6499 intel_crtc_flag_modeset(new_crtc_state); 6500 } 6501 6502 if (new_crtc_state->joiner_pipes) { 6503 if (intel_pipes_need_modeset(state, new_crtc_state->joiner_pipes)) 6504 intel_crtc_flag_modeset(new_crtc_state); 6505 } 6506 } 6507 6508 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6509 new_crtc_state, i) { 6510 if (!intel_crtc_needs_modeset(new_crtc_state)) 6511 continue; 6512 6513 intel_dpll_release(state, crtc); 6514 } 6515 6516 if (intel_any_crtc_needs_modeset(state) && !check_digital_port_conflicts(state)) { 6517 drm_dbg_kms(display->drm, "rejecting conflicting digital port configuration\n"); 6518 ret = -EINVAL; 6519 goto fail; 6520 } 6521 6522 ret = intel_plane_atomic_check(state); 6523 if (ret) 6524 goto fail; 6525 6526 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 6527 new_crtc_state->min_cdclk = intel_crtc_min_cdclk(new_crtc_state); 6528 6529 ret = intel_compute_global_watermarks(state); 6530 if (ret) 6531 goto fail; 6532 6533 ret = intel_bw_atomic_check(state); 6534 if (ret) 6535 goto fail; 6536 6537 ret = intel_cdclk_atomic_check(state); 6538 if (ret) 6539 goto fail; 6540 6541 if (intel_any_crtc_needs_modeset(state)) { 6542 ret = intel_modeset_checks(state); 6543 if (ret) 6544 goto fail; 6545 } 6546 6547 ret = intel_pmdemand_atomic_check(state); 6548 if (ret) 6549 goto fail; 6550 6551 ret = intel_atomic_check_crtcs(state); 6552 if (ret) 6553 goto fail; 6554 6555 ret = intel_fbc_atomic_check(state); 6556 if (ret) 6557 goto fail; 6558 6559 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6560 new_crtc_state, i) { 6561 intel_color_assert_luts(new_crtc_state); 6562 6563 ret = intel_async_flip_check_hw(state, crtc); 6564 if (ret) 6565 goto fail; 6566 6567 /* Either full modeset or fastset (or neither), never both */ 6568 drm_WARN_ON(display->drm, 6569 intel_crtc_needs_modeset(new_crtc_state) && 6570 intel_crtc_needs_fastset(new_crtc_state)); 6571 6572 if (!intel_crtc_needs_modeset(new_crtc_state) && 6573 !intel_crtc_needs_fastset(new_crtc_state)) 6574 continue; 6575 6576 intel_crtc_state_dump(new_crtc_state, state, 6577 intel_crtc_needs_modeset(new_crtc_state) ? 6578 "modeset" : "fastset"); 6579 } 6580 6581 return 0; 6582 6583 fail: 6584 if (ret == -EDEADLK) 6585 return ret; 6586 6587 /* 6588 * FIXME would probably be nice to know which crtc specifically 6589 * caused the failure, in cases where we can pinpoint it. 6590 */ 6591 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6592 new_crtc_state, i) 6593 intel_crtc_state_dump(new_crtc_state, state, "failed"); 6594 6595 return ret; 6596 } 6597 6598 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 6599 { 6600 int ret; 6601 6602 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 6603 if (ret < 0) 6604 return ret; 6605 6606 return 0; 6607 } 6608 6609 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 6610 struct intel_crtc_state *crtc_state) 6611 { 6612 struct intel_display *display = to_intel_display(crtc); 6613 6614 if (DISPLAY_VER(display) != 2 || crtc_state->active_planes) 6615 intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true); 6616 6617 if (crtc_state->has_pch_encoder) { 6618 enum pipe pch_transcoder = 6619 intel_crtc_pch_transcoder(crtc); 6620 6621 intel_set_pch_fifo_underrun_reporting(display, pch_transcoder, true); 6622 } 6623 } 6624 6625 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 6626 const struct intel_crtc_state *new_crtc_state) 6627 { 6628 struct intel_display *display = to_intel_display(new_crtc_state); 6629 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6630 6631 /* 6632 * Update pipe size and adjust fitter if needed: the reason for this is 6633 * that in compute_mode_changes we check the native mode (not the pfit 6634 * mode) to see if we can flip rather than do a full mode set. In the 6635 * fastboot case, we'll flip, but if we don't update the pipesrc and 6636 * pfit state, we'll end up with a big fb scanned out into the wrong 6637 * sized surface. 6638 */ 6639 intel_set_pipe_src_size(new_crtc_state); 6640 6641 /* on skylake this is done by detaching scalers */ 6642 if (DISPLAY_VER(display) >= 9) { 6643 if (new_crtc_state->pch_pfit.enabled) 6644 skl_pfit_enable(new_crtc_state); 6645 } else if (HAS_PCH_SPLIT(display)) { 6646 if (new_crtc_state->pch_pfit.enabled) 6647 ilk_pfit_enable(new_crtc_state); 6648 else if (old_crtc_state->pch_pfit.enabled) 6649 ilk_pfit_disable(old_crtc_state); 6650 } 6651 6652 /* 6653 * The register is supposedly single buffered so perhaps 6654 * not 100% correct to do this here. But SKL+ calculate 6655 * this based on the adjust pixel rate so pfit changes do 6656 * affect it and so it must be updated for fastsets. 6657 * HSW/BDW only really need this here for fastboot, after 6658 * that the value should not change without a full modeset. 6659 */ 6660 if (DISPLAY_VER(display) >= 9 || 6661 display->platform.broadwell || display->platform.haswell) 6662 hsw_set_linetime_wm(new_crtc_state); 6663 6664 if (new_crtc_state->update_m_n) 6665 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, 6666 &new_crtc_state->dp_m_n); 6667 6668 if (new_crtc_state->update_lrr) 6669 intel_set_transcoder_timings_lrr(new_crtc_state); 6670 } 6671 6672 static void commit_pipe_pre_planes(struct intel_atomic_state *state, 6673 struct intel_crtc *crtc) 6674 { 6675 struct intel_display *display = to_intel_display(state); 6676 const struct intel_crtc_state *old_crtc_state = 6677 intel_atomic_get_old_crtc_state(state, crtc); 6678 const struct intel_crtc_state *new_crtc_state = 6679 intel_atomic_get_new_crtc_state(state, crtc); 6680 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6681 6682 drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq); 6683 6684 /* 6685 * During modesets pipe configuration was programmed as the 6686 * CRTC was enabled. 6687 */ 6688 if (!modeset) { 6689 if (intel_crtc_needs_color_update(new_crtc_state)) 6690 intel_color_commit_arm(NULL, new_crtc_state); 6691 6692 if (DISPLAY_VER(display) >= 9 || display->platform.broadwell) 6693 bdw_set_pipe_misc(NULL, new_crtc_state); 6694 6695 if (intel_crtc_needs_fastset(new_crtc_state)) 6696 intel_pipe_fastset(old_crtc_state, new_crtc_state); 6697 } 6698 6699 intel_psr2_program_trans_man_trk_ctl(NULL, new_crtc_state); 6700 6701 intel_atomic_update_watermarks(state, crtc); 6702 } 6703 6704 static void commit_pipe_post_planes(struct intel_atomic_state *state, 6705 struct intel_crtc *crtc) 6706 { 6707 struct intel_display *display = to_intel_display(state); 6708 const struct intel_crtc_state *new_crtc_state = 6709 intel_atomic_get_new_crtc_state(state, crtc); 6710 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6711 6712 drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq); 6713 6714 /* 6715 * Disable the scaler(s) after the plane(s) so that we don't 6716 * get a catastrophic underrun even if the two operations 6717 * end up happening in two different frames. 6718 */ 6719 if (DISPLAY_VER(display) >= 9 && !modeset) 6720 skl_detach_scalers(NULL, new_crtc_state); 6721 6722 if (!modeset && 6723 intel_crtc_needs_color_update(new_crtc_state) && 6724 !intel_color_uses_dsb(new_crtc_state) && 6725 HAS_DOUBLE_BUFFERED_LUT(display)) 6726 intel_color_load_luts(new_crtc_state); 6727 6728 if (intel_crtc_vrr_enabling(state, crtc)) 6729 intel_vrr_enable(new_crtc_state); 6730 } 6731 6732 static void intel_enable_crtc(struct intel_atomic_state *state, 6733 struct intel_crtc *crtc) 6734 { 6735 struct intel_display *display = to_intel_display(state); 6736 const struct intel_crtc_state *new_crtc_state = 6737 intel_atomic_get_new_crtc_state(state, crtc); 6738 struct intel_crtc *pipe_crtc; 6739 6740 if (!intel_crtc_needs_modeset(new_crtc_state)) 6741 return; 6742 6743 for_each_intel_crtc_in_pipe_mask_reverse(display->drm, pipe_crtc, 6744 intel_crtc_joined_pipe_mask(new_crtc_state)) { 6745 const struct intel_crtc_state *pipe_crtc_state = 6746 intel_atomic_get_new_crtc_state(state, pipe_crtc); 6747 6748 /* VRR will be enable later, if required */ 6749 intel_crtc_update_active_timings(pipe_crtc_state, false); 6750 } 6751 6752 intel_psr_notify_pipe_change(state, crtc, true); 6753 6754 display->funcs.display->crtc_enable(state, crtc); 6755 6756 /* vblanks work again, re-enable pipe CRC. */ 6757 intel_crtc_enable_pipe_crc(crtc); 6758 } 6759 6760 static void intel_pre_update_crtc(struct intel_atomic_state *state, 6761 struct intel_crtc *crtc) 6762 { 6763 struct intel_display *display = to_intel_display(state); 6764 const struct intel_crtc_state *old_crtc_state = 6765 intel_atomic_get_old_crtc_state(state, crtc); 6766 struct intel_crtc_state *new_crtc_state = 6767 intel_atomic_get_new_crtc_state(state, crtc); 6768 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6769 6770 if (old_crtc_state->inherited || 6771 intel_crtc_needs_modeset(new_crtc_state)) { 6772 if (HAS_DPT(display)) 6773 intel_dpt_configure(crtc); 6774 } 6775 6776 if (!modeset) { 6777 if (new_crtc_state->preload_luts && 6778 intel_crtc_needs_color_update(new_crtc_state)) 6779 intel_color_load_luts(new_crtc_state); 6780 6781 intel_pre_plane_update(state, crtc); 6782 6783 if (intel_crtc_needs_fastset(new_crtc_state)) 6784 intel_encoders_update_pipe(state, crtc); 6785 6786 if (DISPLAY_VER(display) >= 11 && 6787 intel_crtc_needs_fastset(new_crtc_state)) 6788 icl_set_pipe_chicken(new_crtc_state); 6789 6790 if (vrr_params_changed(old_crtc_state, new_crtc_state) || 6791 cmrr_params_changed(old_crtc_state, new_crtc_state)) 6792 intel_vrr_set_transcoder_timings(new_crtc_state); 6793 } 6794 6795 if (intel_casf_enabling(new_crtc_state, old_crtc_state)) 6796 intel_casf_enable(new_crtc_state); 6797 else if (new_crtc_state->hw.casf_params.strength != old_crtc_state->hw.casf_params.strength) 6798 intel_casf_update_strength(new_crtc_state); 6799 6800 intel_fbc_update(state, crtc); 6801 6802 drm_WARN_ON(display->drm, !intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF)); 6803 6804 if (!modeset && 6805 intel_crtc_needs_color_update(new_crtc_state) && 6806 !new_crtc_state->use_dsb && !new_crtc_state->use_flipq) 6807 intel_color_commit_noarm(NULL, new_crtc_state); 6808 6809 if (!new_crtc_state->use_dsb && !new_crtc_state->use_flipq) 6810 intel_crtc_planes_update_noarm(NULL, state, crtc); 6811 } 6812 6813 static void intel_update_crtc(struct intel_atomic_state *state, 6814 struct intel_crtc *crtc) 6815 { 6816 const struct intel_crtc_state *old_crtc_state = 6817 intel_atomic_get_old_crtc_state(state, crtc); 6818 struct intel_crtc_state *new_crtc_state = 6819 intel_atomic_get_new_crtc_state(state, crtc); 6820 6821 if (new_crtc_state->use_flipq) { 6822 intel_flipq_enable(new_crtc_state); 6823 6824 intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->flipq_event); 6825 6826 intel_flipq_add(crtc, INTEL_FLIPQ_PLANE_1, 0, INTEL_DSB_0, 6827 new_crtc_state->dsb_commit); 6828 } else if (new_crtc_state->use_dsb) { 6829 intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event); 6830 6831 intel_dsb_commit(new_crtc_state->dsb_commit); 6832 } else { 6833 /* Perform vblank evasion around commit operation */ 6834 intel_pipe_update_start(state, crtc); 6835 6836 if (new_crtc_state->dsb_commit) 6837 intel_dsb_commit(new_crtc_state->dsb_commit); 6838 6839 commit_pipe_pre_planes(state, crtc); 6840 6841 intel_crtc_planes_update_arm(NULL, state, crtc); 6842 6843 commit_pipe_post_planes(state, crtc); 6844 6845 intel_pipe_update_end(state, crtc); 6846 } 6847 6848 /* 6849 * VRR/Seamless M/N update may need to update frame timings. 6850 * 6851 * FIXME Should be synchronized with the start of vblank somehow... 6852 */ 6853 if (intel_crtc_vrr_enabling(state, crtc) || 6854 new_crtc_state->update_m_n || new_crtc_state->update_lrr) 6855 intel_crtc_update_active_timings(new_crtc_state, 6856 new_crtc_state->vrr.enable); 6857 6858 /* 6859 * We usually enable FIFO underrun interrupts as part of the 6860 * CRTC enable sequence during modesets. But when we inherit a 6861 * valid pipe configuration from the BIOS we need to take care 6862 * of enabling them on the CRTC's first fastset. 6863 */ 6864 if (intel_crtc_needs_fastset(new_crtc_state) && 6865 old_crtc_state->inherited) 6866 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 6867 } 6868 6869 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 6870 struct intel_crtc *crtc) 6871 { 6872 struct intel_display *display = to_intel_display(state); 6873 const struct intel_crtc_state *old_crtc_state = 6874 intel_atomic_get_old_crtc_state(state, crtc); 6875 struct intel_crtc *pipe_crtc; 6876 6877 /* 6878 * We need to disable pipe CRC before disabling the pipe, 6879 * or we race against vblank off. 6880 */ 6881 for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc, 6882 intel_crtc_joined_pipe_mask(old_crtc_state)) 6883 intel_crtc_disable_pipe_crc(pipe_crtc); 6884 6885 intel_psr_notify_pipe_change(state, crtc, false); 6886 6887 display->funcs.display->crtc_disable(state, crtc); 6888 6889 for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc, 6890 intel_crtc_joined_pipe_mask(old_crtc_state)) { 6891 const struct intel_crtc_state *new_pipe_crtc_state = 6892 intel_atomic_get_new_crtc_state(state, pipe_crtc); 6893 6894 pipe_crtc->active = false; 6895 intel_fbc_disable(pipe_crtc); 6896 6897 if (!new_pipe_crtc_state->hw.active) 6898 intel_initial_watermarks(state, pipe_crtc); 6899 } 6900 } 6901 6902 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 6903 { 6904 struct intel_display *display = to_intel_display(state); 6905 const struct intel_crtc_state *new_crtc_state, *old_crtc_state; 6906 struct intel_crtc *crtc; 6907 u8 disable_pipes = 0; 6908 int i; 6909 6910 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6911 new_crtc_state, i) { 6912 if (!intel_crtc_needs_modeset(new_crtc_state)) 6913 continue; 6914 6915 /* 6916 * Needs to be done even for pipes 6917 * that weren't enabled previously. 6918 */ 6919 intel_pre_plane_update(state, crtc); 6920 6921 if (!old_crtc_state->hw.active) 6922 continue; 6923 6924 disable_pipes |= BIT(crtc->pipe); 6925 } 6926 6927 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 6928 if ((disable_pipes & BIT(crtc->pipe)) == 0) 6929 continue; 6930 6931 intel_crtc_disable_planes(state, crtc); 6932 6933 drm_vblank_work_flush_all(&crtc->base); 6934 } 6935 6936 /* Only disable port sync and MST slaves */ 6937 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 6938 if ((disable_pipes & BIT(crtc->pipe)) == 0) 6939 continue; 6940 6941 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 6942 continue; 6943 6944 /* In case of Transcoder port Sync master slave CRTCs can be 6945 * assigned in any order and we need to make sure that 6946 * slave CRTCs are disabled first and then master CRTC since 6947 * Slave vblanks are masked till Master Vblanks. 6948 */ 6949 if (!is_trans_port_sync_slave(old_crtc_state) && 6950 !intel_dp_mst_is_slave_trans(old_crtc_state)) 6951 continue; 6952 6953 intel_old_crtc_state_disables(state, crtc); 6954 6955 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 6956 } 6957 6958 /* Disable everything else left on */ 6959 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 6960 if ((disable_pipes & BIT(crtc->pipe)) == 0) 6961 continue; 6962 6963 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 6964 continue; 6965 6966 intel_old_crtc_state_disables(state, crtc); 6967 6968 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 6969 } 6970 6971 drm_WARN_ON(display->drm, disable_pipes); 6972 } 6973 6974 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 6975 { 6976 struct intel_crtc_state *new_crtc_state; 6977 struct intel_crtc *crtc; 6978 int i; 6979 6980 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6981 if (!new_crtc_state->hw.active) 6982 continue; 6983 6984 intel_enable_crtc(state, crtc); 6985 intel_pre_update_crtc(state, crtc); 6986 } 6987 6988 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6989 if (!new_crtc_state->hw.active) 6990 continue; 6991 6992 intel_update_crtc(state, crtc); 6993 } 6994 } 6995 6996 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 6997 { 6998 struct intel_display *display = to_intel_display(state); 6999 struct intel_crtc *crtc; 7000 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7001 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 7002 u8 update_pipes = 0, modeset_pipes = 0; 7003 int i; 7004 7005 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7006 enum pipe pipe = crtc->pipe; 7007 7008 if (!new_crtc_state->hw.active) 7009 continue; 7010 7011 /* ignore allocations for crtc's that have been turned off. */ 7012 if (!intel_crtc_needs_modeset(new_crtc_state)) { 7013 entries[pipe] = old_crtc_state->wm.skl.ddb; 7014 update_pipes |= BIT(pipe); 7015 } else { 7016 modeset_pipes |= BIT(pipe); 7017 } 7018 } 7019 7020 /* 7021 * Whenever the number of active pipes changes, we need to make sure we 7022 * update the pipes in the right order so that their ddb allocations 7023 * never overlap with each other between CRTC updates. Otherwise we'll 7024 * cause pipe underruns and other bad stuff. 7025 * 7026 * So first lets enable all pipes that do not need a fullmodeset as 7027 * those don't have any external dependency. 7028 */ 7029 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7030 enum pipe pipe = crtc->pipe; 7031 7032 if ((update_pipes & BIT(pipe)) == 0) 7033 continue; 7034 7035 intel_pre_update_crtc(state, crtc); 7036 } 7037 7038 intel_dbuf_mbus_pre_ddb_update(state); 7039 7040 while (update_pipes) { 7041 /* 7042 * Commit in reverse order to make joiner primary 7043 * send the uapi events after secondaries are done. 7044 */ 7045 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, 7046 new_crtc_state, i) { 7047 enum pipe pipe = crtc->pipe; 7048 7049 if ((update_pipes & BIT(pipe)) == 0) 7050 continue; 7051 7052 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7053 entries, I915_MAX_PIPES, pipe)) 7054 continue; 7055 7056 entries[pipe] = new_crtc_state->wm.skl.ddb; 7057 update_pipes &= ~BIT(pipe); 7058 7059 intel_update_crtc(state, crtc); 7060 7061 /* 7062 * If this is an already active pipe, it's DDB changed, 7063 * and this isn't the last pipe that needs updating 7064 * then we need to wait for a vblank to pass for the 7065 * new ddb allocation to take effect. 7066 */ 7067 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 7068 &old_crtc_state->wm.skl.ddb) && 7069 (update_pipes | modeset_pipes)) 7070 intel_crtc_wait_for_next_vblank(crtc); 7071 } 7072 } 7073 7074 intel_dbuf_mbus_post_ddb_update(state); 7075 7076 update_pipes = modeset_pipes; 7077 7078 /* 7079 * Enable all pipes that needs a modeset and do not depends on other 7080 * pipes 7081 */ 7082 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7083 enum pipe pipe = crtc->pipe; 7084 7085 if ((modeset_pipes & BIT(pipe)) == 0) 7086 continue; 7087 7088 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7089 continue; 7090 7091 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 7092 is_trans_port_sync_master(new_crtc_state)) 7093 continue; 7094 7095 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7096 7097 intel_enable_crtc(state, crtc); 7098 } 7099 7100 /* 7101 * Then we enable all remaining pipes that depend on other 7102 * pipes: MST slaves and port sync masters 7103 */ 7104 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7105 enum pipe pipe = crtc->pipe; 7106 7107 if ((modeset_pipes & BIT(pipe)) == 0) 7108 continue; 7109 7110 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7111 continue; 7112 7113 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7114 7115 intel_enable_crtc(state, crtc); 7116 } 7117 7118 /* 7119 * Finally we do the plane updates/etc. for all pipes that got enabled. 7120 */ 7121 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7122 enum pipe pipe = crtc->pipe; 7123 7124 if ((update_pipes & BIT(pipe)) == 0) 7125 continue; 7126 7127 intel_pre_update_crtc(state, crtc); 7128 } 7129 7130 /* 7131 * Commit in reverse order to make joiner primary 7132 * send the uapi events after secondaries are done. 7133 */ 7134 for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) { 7135 enum pipe pipe = crtc->pipe; 7136 7137 if ((update_pipes & BIT(pipe)) == 0) 7138 continue; 7139 7140 drm_WARN_ON(display->drm, 7141 skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7142 entries, I915_MAX_PIPES, pipe)); 7143 7144 entries[pipe] = new_crtc_state->wm.skl.ddb; 7145 update_pipes &= ~BIT(pipe); 7146 7147 intel_update_crtc(state, crtc); 7148 } 7149 7150 drm_WARN_ON(display->drm, modeset_pipes); 7151 drm_WARN_ON(display->drm, update_pipes); 7152 } 7153 7154 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 7155 { 7156 struct drm_i915_private *i915 = to_i915(intel_state->base.dev); 7157 struct drm_plane *plane; 7158 struct drm_plane_state *new_plane_state; 7159 long ret; 7160 int i; 7161 7162 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { 7163 if (new_plane_state->fence) { 7164 ret = dma_fence_wait_timeout(new_plane_state->fence, false, 7165 i915_fence_timeout(i915)); 7166 if (ret <= 0) 7167 break; 7168 7169 dma_fence_put(new_plane_state->fence); 7170 new_plane_state->fence = NULL; 7171 } 7172 } 7173 } 7174 7175 static void intel_atomic_dsb_wait_commit(struct intel_crtc_state *crtc_state) 7176 { 7177 if (crtc_state->dsb_commit) 7178 intel_dsb_wait(crtc_state->dsb_commit); 7179 7180 intel_color_wait_commit(crtc_state); 7181 } 7182 7183 static void intel_atomic_dsb_cleanup(struct intel_crtc_state *crtc_state) 7184 { 7185 if (crtc_state->dsb_commit) { 7186 intel_dsb_cleanup(crtc_state->dsb_commit); 7187 crtc_state->dsb_commit = NULL; 7188 } 7189 7190 intel_color_cleanup_commit(crtc_state); 7191 } 7192 7193 static void intel_atomic_cleanup_work(struct work_struct *work) 7194 { 7195 struct intel_atomic_state *state = 7196 container_of(work, struct intel_atomic_state, cleanup_work); 7197 struct intel_display *display = to_intel_display(state); 7198 struct intel_crtc_state *old_crtc_state; 7199 struct intel_crtc *crtc; 7200 int i; 7201 7202 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) 7203 intel_atomic_dsb_cleanup(old_crtc_state); 7204 7205 drm_atomic_helper_cleanup_planes(display->drm, &state->base); 7206 drm_atomic_helper_commit_cleanup_done(&state->base); 7207 drm_atomic_state_put(&state->base); 7208 } 7209 7210 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 7211 { 7212 struct intel_display *display = to_intel_display(state); 7213 struct intel_plane *plane; 7214 struct intel_plane_state *plane_state; 7215 int i; 7216 7217 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7218 struct drm_framebuffer *fb = plane_state->hw.fb; 7219 int cc_plane; 7220 int ret; 7221 7222 if (!fb) 7223 continue; 7224 7225 cc_plane = intel_fb_rc_ccs_cc_plane(fb); 7226 if (cc_plane < 0) 7227 continue; 7228 7229 /* 7230 * The layout of the fast clear color value expected by HW 7231 * (the DRM ABI requiring this value to be located in fb at 7232 * offset 0 of cc plane, plane #2 previous generations or 7233 * plane #1 for flat ccs): 7234 * - 4 x 4 bytes per-channel value 7235 * (in surface type specific float/int format provided by the fb user) 7236 * - 8 bytes native color value used by the display 7237 * (converted/written by GPU during a fast clear operation using the 7238 * above per-channel values) 7239 * 7240 * The commit's FB prepare hook already ensured that FB obj is pinned and the 7241 * caller made sure that the object is synced wrt. the related color clear value 7242 * GPU write on it. 7243 */ 7244 ret = intel_bo_read_from_page(intel_fb_bo(fb), 7245 fb->offsets[cc_plane] + 16, 7246 &plane_state->ccval, 7247 sizeof(plane_state->ccval)); 7248 /* The above could only fail if the FB obj has an unexpected backing store type. */ 7249 drm_WARN_ON(display->drm, ret); 7250 } 7251 } 7252 7253 static void intel_atomic_dsb_prepare(struct intel_atomic_state *state, 7254 struct intel_crtc *crtc) 7255 { 7256 struct intel_display *display = to_intel_display(state); 7257 struct intel_crtc_state *new_crtc_state = 7258 intel_atomic_get_new_crtc_state(state, crtc); 7259 7260 if (!new_crtc_state->hw.active) 7261 return; 7262 7263 if (state->base.legacy_cursor_update) 7264 return; 7265 7266 /* FIXME deal with everything */ 7267 new_crtc_state->use_flipq = 7268 intel_flipq_supported(display) && 7269 !new_crtc_state->do_async_flip && 7270 !new_crtc_state->vrr.enable && 7271 !new_crtc_state->has_psr && 7272 !intel_crtc_needs_modeset(new_crtc_state) && 7273 !intel_crtc_needs_fastset(new_crtc_state) && 7274 !intel_crtc_needs_color_update(new_crtc_state); 7275 7276 new_crtc_state->use_dsb = 7277 !new_crtc_state->use_flipq && 7278 !new_crtc_state->do_async_flip && 7279 (DISPLAY_VER(display) >= 20 || !new_crtc_state->has_psr) && 7280 !intel_crtc_needs_modeset(new_crtc_state) && 7281 !intel_crtc_needs_fastset(new_crtc_state); 7282 7283 intel_color_prepare_commit(state, crtc); 7284 } 7285 7286 static void intel_atomic_dsb_finish(struct intel_atomic_state *state, 7287 struct intel_crtc *crtc) 7288 { 7289 struct intel_display *display = to_intel_display(state); 7290 struct intel_crtc_state *new_crtc_state = 7291 intel_atomic_get_new_crtc_state(state, crtc); 7292 7293 if (!new_crtc_state->use_flipq && 7294 !new_crtc_state->use_dsb && 7295 !new_crtc_state->dsb_color) 7296 return; 7297 7298 /* 7299 * Rough estimate: 7300 * ~64 registers per each plane * 8 planes = 512 7301 * Double that for pipe stuff and other overhead. 7302 */ 7303 new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 7304 new_crtc_state->use_dsb || 7305 new_crtc_state->use_flipq ? 1024 : 16); 7306 if (!new_crtc_state->dsb_commit) { 7307 new_crtc_state->use_flipq = false; 7308 new_crtc_state->use_dsb = false; 7309 intel_color_cleanup_commit(new_crtc_state); 7310 return; 7311 } 7312 7313 if (new_crtc_state->use_flipq || new_crtc_state->use_dsb) { 7314 /* Wa_18034343758 */ 7315 if (new_crtc_state->use_flipq) 7316 intel_flipq_wait_dmc_halt(new_crtc_state->dsb_commit, crtc); 7317 7318 if (intel_crtc_needs_color_update(new_crtc_state)) 7319 intel_color_commit_noarm(new_crtc_state->dsb_commit, 7320 new_crtc_state); 7321 intel_crtc_planes_update_noarm(new_crtc_state->dsb_commit, 7322 state, crtc); 7323 7324 /* 7325 * Ensure we have "Frame Change" event when PSR state is 7326 * SRDENT(PSR1) or DEEP_SLEEP(PSR2). Otherwise DSB vblank 7327 * evasion hangs as PIPEDSL is reading as 0. 7328 */ 7329 intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit, 7330 state, crtc); 7331 7332 intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit, 7333 new_crtc_state); 7334 7335 if (new_crtc_state->use_dsb) 7336 intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit); 7337 7338 if (intel_crtc_needs_color_update(new_crtc_state)) 7339 intel_color_commit_arm(new_crtc_state->dsb_commit, 7340 new_crtc_state); 7341 bdw_set_pipe_misc(new_crtc_state->dsb_commit, 7342 new_crtc_state); 7343 intel_psr2_program_trans_man_trk_ctl(new_crtc_state->dsb_commit, 7344 new_crtc_state); 7345 intel_crtc_planes_update_arm(new_crtc_state->dsb_commit, 7346 state, crtc); 7347 7348 if (DISPLAY_VER(display) >= 9) 7349 skl_detach_scalers(new_crtc_state->dsb_commit, 7350 new_crtc_state); 7351 7352 /* Wa_18034343758 */ 7353 if (new_crtc_state->use_flipq) 7354 intel_flipq_unhalt_dmc(new_crtc_state->dsb_commit, crtc); 7355 } 7356 7357 if (intel_color_uses_chained_dsb(new_crtc_state)) 7358 intel_dsb_chain(state, new_crtc_state->dsb_commit, 7359 new_crtc_state->dsb_color, true); 7360 else if (intel_color_uses_gosub_dsb(new_crtc_state)) 7361 intel_dsb_gosub(new_crtc_state->dsb_commit, 7362 new_crtc_state->dsb_color); 7363 7364 if (new_crtc_state->use_dsb && !intel_color_uses_chained_dsb(new_crtc_state)) { 7365 intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1); 7366 7367 intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state); 7368 intel_dsb_wait_for_delayed_vblank(state, new_crtc_state->dsb_commit); 7369 intel_vrr_check_push_sent(new_crtc_state->dsb_commit, 7370 new_crtc_state); 7371 intel_dsb_interrupt(new_crtc_state->dsb_commit); 7372 } 7373 7374 intel_dsb_finish(new_crtc_state->dsb_commit); 7375 } 7376 7377 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 7378 { 7379 struct intel_display *display = to_intel_display(state); 7380 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm); 7381 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7382 struct intel_crtc *crtc; 7383 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; 7384 intel_wakeref_t wakeref = NULL; 7385 int i; 7386 7387 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7388 intel_atomic_dsb_prepare(state, crtc); 7389 7390 intel_atomic_commit_fence_wait(state); 7391 7392 intel_td_flush(display); 7393 7394 intel_atomic_prepare_plane_clear_colors(state); 7395 7396 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7397 intel_fbc_prepare_dirty_rect(state, crtc); 7398 7399 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7400 intel_atomic_dsb_finish(state, crtc); 7401 7402 drm_atomic_helper_wait_for_dependencies(&state->base); 7403 drm_dp_mst_atomic_wait_for_dependencies(&state->base); 7404 intel_atomic_global_state_wait_for_dependencies(state); 7405 7406 /* 7407 * During full modesets we write a lot of registers, wait 7408 * for PLLs, etc. Doing that while DC states are enabled 7409 * is not a good idea. 7410 * 7411 * During fastsets and other updates we also need to 7412 * disable DC states due to the following scenario: 7413 * 1. DC5 exit and PSR exit happen 7414 * 2. Some or all _noarm() registers are written 7415 * 3. Due to some long delay PSR is re-entered 7416 * 4. DC5 entry -> DMC saves the already written new 7417 * _noarm() registers and the old not yet written 7418 * _arm() registers 7419 * 5. DC5 exit -> DMC restores a mixture of old and 7420 * new register values and arms the update 7421 * 6. PSR exit -> hardware latches a mixture of old and 7422 * new register values -> corrupted frame, or worse 7423 * 7. New _arm() registers are finally written 7424 * 8. Hardware finally latches a complete set of new 7425 * register values, and subsequent frames will be OK again 7426 * 7427 * Also note that due to the pipe CSC hardware issues on 7428 * SKL/GLK DC states must remain off until the pipe CSC 7429 * state readout has happened. Otherwise we risk corrupting 7430 * the CSC latched register values with the readout (see 7431 * skl_read_csc() and skl_color_commit_noarm()). 7432 */ 7433 wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF); 7434 7435 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7436 new_crtc_state, i) { 7437 if (intel_crtc_needs_modeset(new_crtc_state) || 7438 intel_crtc_needs_fastset(new_crtc_state)) 7439 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); 7440 } 7441 7442 intel_commit_modeset_disables(state); 7443 7444 intel_dp_tunnel_atomic_alloc_bw(state); 7445 7446 /* FIXME: Eventually get rid of our crtc->config pointer */ 7447 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7448 crtc->config = new_crtc_state; 7449 7450 /* 7451 * In XE_LPD+ Pmdemand combines many parameters such as voltage index, 7452 * plls, cdclk frequency, QGV point selection parameter etc. Voltage 7453 * index, cdclk/ddiclk frequencies are supposed to be configured before 7454 * the cdclk config is set. 7455 */ 7456 intel_pmdemand_pre_plane_update(state); 7457 7458 if (state->modeset) 7459 drm_atomic_helper_update_legacy_modeset_state(display->drm, &state->base); 7460 7461 intel_set_cdclk_pre_plane_update(state); 7462 7463 if (state->modeset) 7464 intel_modeset_verify_disabled(state); 7465 7466 intel_sagv_pre_plane_update(state); 7467 7468 /* Complete the events for pipes that have now been disabled */ 7469 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7470 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7471 7472 /* Complete events for now disable pipes here. */ 7473 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 7474 spin_lock_irq(&display->drm->event_lock); 7475 drm_crtc_send_vblank_event(&crtc->base, 7476 new_crtc_state->uapi.event); 7477 spin_unlock_irq(&display->drm->event_lock); 7478 7479 new_crtc_state->uapi.event = NULL; 7480 } 7481 } 7482 7483 intel_encoders_update_prepare(state); 7484 7485 intel_dbuf_pre_plane_update(state); 7486 7487 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7488 if (new_crtc_state->do_async_flip) 7489 intel_crtc_enable_flip_done(state, crtc); 7490 } 7491 7492 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7493 display->funcs.display->commit_modeset_enables(state); 7494 7495 /* FIXME probably need to sequence this properly */ 7496 intel_program_dpkgc_latency(state); 7497 7498 intel_wait_for_vblank_workers(state); 7499 7500 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 7501 * already, but still need the state for the delayed optimization. To 7502 * fix this: 7503 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 7504 * - schedule that vblank worker _before_ calling hw_done 7505 * - at the start of commit_tail, cancel it _synchrously 7506 * - switch over to the vblank wait helper in the core after that since 7507 * we don't need out special handling any more. 7508 */ 7509 drm_atomic_helper_wait_for_flip_done(display->drm, &state->base); 7510 7511 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7512 if (new_crtc_state->do_async_flip) 7513 intel_crtc_disable_flip_done(state, crtc); 7514 7515 intel_atomic_dsb_wait_commit(new_crtc_state); 7516 7517 if (!state->base.legacy_cursor_update && !new_crtc_state->use_dsb) 7518 intel_vrr_check_push_sent(NULL, new_crtc_state); 7519 7520 if (new_crtc_state->use_flipq) 7521 intel_flipq_disable(new_crtc_state); 7522 } 7523 7524 /* 7525 * Now that the vblank has passed, we can go ahead and program the 7526 * optimal watermarks on platforms that need two-step watermark 7527 * programming. 7528 * 7529 * TODO: Move this (and other cleanup) to an async worker eventually. 7530 */ 7531 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7532 new_crtc_state, i) { 7533 /* 7534 * Gen2 reports pipe underruns whenever all planes are disabled. 7535 * So re-enable underrun reporting after some planes get enabled. 7536 * 7537 * We do this before .optimize_watermarks() so that we have a 7538 * chance of catching underruns with the intermediate watermarks 7539 * vs. the new plane configuration. 7540 */ 7541 if (DISPLAY_VER(display) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 7542 intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true); 7543 7544 intel_optimize_watermarks(state, crtc); 7545 } 7546 7547 intel_dbuf_post_plane_update(state); 7548 7549 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7550 intel_post_plane_update(state, crtc); 7551 7552 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); 7553 7554 intel_modeset_verify_crtc(state, crtc); 7555 7556 intel_post_plane_update_after_readout(state, crtc); 7557 7558 /* 7559 * DSB cleanup is done in cleanup_work aligning with framebuffer 7560 * cleanup. So copy and reset the dsb structure to sync with 7561 * commit_done and later do dsb cleanup in cleanup_work. 7562 * 7563 * FIXME get rid of this funny new->old swapping 7564 */ 7565 old_crtc_state->dsb_color = fetch_and_zero(&new_crtc_state->dsb_color); 7566 old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit); 7567 } 7568 7569 /* Underruns don't always raise interrupts, so check manually */ 7570 intel_check_cpu_fifo_underruns(display); 7571 intel_check_pch_fifo_underruns(display); 7572 7573 if (state->modeset) 7574 intel_verify_planes(state); 7575 7576 intel_sagv_post_plane_update(state); 7577 intel_set_cdclk_post_plane_update(state); 7578 intel_pmdemand_post_plane_update(state); 7579 7580 drm_atomic_helper_commit_hw_done(&state->base); 7581 intel_atomic_global_state_commit_done(state); 7582 7583 if (state->modeset) { 7584 /* As one of the primary mmio accessors, KMS has a high 7585 * likelihood of triggering bugs in unclaimed access. After we 7586 * finish modesetting, see if an error has been flagged, and if 7587 * so enable debugging for the next modeset - and hope we catch 7588 * the culprit. 7589 */ 7590 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 7591 } 7592 /* 7593 * Delay re-enabling DC states by 17 ms to avoid the off->on->off 7594 * toggling overhead at and above 60 FPS. 7595 */ 7596 intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17); 7597 intel_display_rpm_put(display, state->wakeref); 7598 7599 /* 7600 * Defer the cleanup of the old state to a separate worker to not 7601 * impede the current task (userspace for blocking modesets) that 7602 * are executed inline. For out-of-line asynchronous modesets/flips, 7603 * deferring to a new worker seems overkill, but we would place a 7604 * schedule point (cond_resched()) here anyway to keep latencies 7605 * down. 7606 */ 7607 INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work); 7608 queue_work(display->wq.cleanup, &state->cleanup_work); 7609 } 7610 7611 static void intel_atomic_commit_work(struct work_struct *work) 7612 { 7613 struct intel_atomic_state *state = 7614 container_of(work, struct intel_atomic_state, base.commit_work); 7615 7616 intel_atomic_commit_tail(state); 7617 } 7618 7619 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7620 { 7621 struct intel_plane_state *old_plane_state, *new_plane_state; 7622 struct intel_plane *plane; 7623 int i; 7624 7625 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7626 new_plane_state, i) 7627 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 7628 to_intel_frontbuffer(new_plane_state->hw.fb), 7629 plane->frontbuffer_bit); 7630 } 7631 7632 static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock) 7633 { 7634 int ret; 7635 7636 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 7637 if (ret) 7638 return ret; 7639 7640 ret = intel_atomic_global_state_setup_commit(state); 7641 if (ret) 7642 return ret; 7643 7644 return 0; 7645 } 7646 7647 static int intel_atomic_swap_state(struct intel_atomic_state *state) 7648 { 7649 int ret; 7650 7651 ret = drm_atomic_helper_swap_state(&state->base, true); 7652 if (ret) 7653 return ret; 7654 7655 intel_atomic_swap_global_state(state); 7656 7657 intel_dpll_swap_state(state); 7658 7659 intel_atomic_track_fbs(state); 7660 7661 return 0; 7662 } 7663 7664 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, 7665 bool nonblock) 7666 { 7667 struct intel_display *display = to_intel_display(dev); 7668 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7669 int ret = 0; 7670 7671 state->wakeref = intel_display_rpm_get(display); 7672 7673 /* 7674 * The intel_legacy_cursor_update() fast path takes care 7675 * of avoiding the vblank waits for simple cursor 7676 * movement and flips. For cursor on/off and size changes, 7677 * we want to perform the vblank waits so that watermark 7678 * updates happen during the correct frames. Gen9+ have 7679 * double buffered watermarks and so shouldn't need this. 7680 * 7681 * Unset state->legacy_cursor_update before the call to 7682 * drm_atomic_helper_setup_commit() because otherwise 7683 * drm_atomic_helper_wait_for_flip_done() is a noop and 7684 * we get FIFO underruns because we didn't wait 7685 * for vblank. 7686 * 7687 * FIXME doing watermarks and fb cleanup from a vblank worker 7688 * (assuming we had any) would solve these problems. 7689 */ 7690 if (DISPLAY_VER(display) < 9 && state->base.legacy_cursor_update) { 7691 struct intel_crtc_state *new_crtc_state; 7692 struct intel_crtc *crtc; 7693 int i; 7694 7695 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7696 if (new_crtc_state->wm.need_postvbl_update || 7697 new_crtc_state->update_wm_post) 7698 state->base.legacy_cursor_update = false; 7699 } 7700 7701 ret = intel_atomic_prepare_commit(state); 7702 if (ret) { 7703 drm_dbg_atomic(display->drm, 7704 "Preparing state failed with %i\n", ret); 7705 intel_display_rpm_put(display, state->wakeref); 7706 return ret; 7707 } 7708 7709 ret = intel_atomic_setup_commit(state, nonblock); 7710 if (!ret) 7711 ret = intel_atomic_swap_state(state); 7712 7713 if (ret) { 7714 drm_atomic_helper_unprepare_planes(dev, &state->base); 7715 intel_display_rpm_put(display, state->wakeref); 7716 return ret; 7717 } 7718 7719 drm_atomic_state_get(&state->base); 7720 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 7721 7722 if (nonblock && state->modeset) { 7723 queue_work(display->wq.modeset, &state->base.commit_work); 7724 } else if (nonblock) { 7725 queue_work(display->wq.flip, &state->base.commit_work); 7726 } else { 7727 if (state->modeset) 7728 flush_workqueue(display->wq.modeset); 7729 intel_atomic_commit_tail(state); 7730 } 7731 7732 return 0; 7733 } 7734 7735 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 7736 { 7737 struct intel_display *display = to_intel_display(encoder); 7738 struct intel_encoder *source_encoder; 7739 u32 possible_clones = 0; 7740 7741 for_each_intel_encoder(display->drm, source_encoder) { 7742 if (encoders_cloneable(encoder, source_encoder)) 7743 possible_clones |= drm_encoder_mask(&source_encoder->base); 7744 } 7745 7746 return possible_clones; 7747 } 7748 7749 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 7750 { 7751 struct intel_display *display = to_intel_display(encoder); 7752 struct intel_crtc *crtc; 7753 u32 possible_crtcs = 0; 7754 7755 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, encoder->pipe_mask) 7756 possible_crtcs |= drm_crtc_mask(&crtc->base); 7757 7758 return possible_crtcs; 7759 } 7760 7761 static bool ilk_has_edp_a(struct intel_display *display) 7762 { 7763 if (!display->platform.mobile) 7764 return false; 7765 7766 if ((intel_de_read(display, DP_A) & DP_DETECTED) == 0) 7767 return false; 7768 7769 if (display->platform.ironlake && (intel_de_read(display, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 7770 return false; 7771 7772 return true; 7773 } 7774 7775 static bool intel_ddi_crt_present(struct intel_display *display) 7776 { 7777 if (DISPLAY_VER(display) >= 9) 7778 return false; 7779 7780 if (display->platform.haswell_ult || display->platform.broadwell_ult) 7781 return false; 7782 7783 if (HAS_PCH_LPT_H(display) && 7784 intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 7785 return false; 7786 7787 /* DDI E can't be used if DDI A requires 4 lanes */ 7788 if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 7789 return false; 7790 7791 if (!display->vbt.int_crt_support) 7792 return false; 7793 7794 return true; 7795 } 7796 7797 bool assert_port_valid(struct intel_display *display, enum port port) 7798 { 7799 return !drm_WARN(display->drm, !(DISPLAY_RUNTIME_INFO(display)->port_mask & BIT(port)), 7800 "Platform does not support port %c\n", port_name(port)); 7801 } 7802 7803 void intel_setup_outputs(struct intel_display *display) 7804 { 7805 struct intel_encoder *encoder; 7806 bool dpd_is_edp = false; 7807 7808 intel_pps_unlock_regs_wa(display); 7809 7810 if (!HAS_DISPLAY(display)) 7811 return; 7812 7813 if (HAS_DDI(display)) { 7814 if (intel_ddi_crt_present(display)) 7815 intel_crt_init(display); 7816 7817 intel_bios_for_each_encoder(display, intel_ddi_init); 7818 7819 if (display->platform.geminilake || display->platform.broxton) 7820 vlv_dsi_init(display); 7821 } else if (HAS_PCH_SPLIT(display)) { 7822 int found; 7823 7824 /* 7825 * intel_edp_init_connector() depends on this completing first, 7826 * to prevent the registration of both eDP and LVDS and the 7827 * incorrect sharing of the PPS. 7828 */ 7829 intel_lvds_init(display); 7830 intel_crt_init(display); 7831 7832 dpd_is_edp = intel_dp_is_port_edp(display, PORT_D); 7833 7834 if (ilk_has_edp_a(display)) 7835 g4x_dp_init(display, DP_A, PORT_A); 7836 7837 if (intel_de_read(display, PCH_HDMIB) & SDVO_DETECTED) { 7838 /* PCH SDVOB multiplex with HDMIB */ 7839 found = intel_sdvo_init(display, PCH_SDVOB, PORT_B); 7840 if (!found) 7841 g4x_hdmi_init(display, PCH_HDMIB, PORT_B); 7842 if (!found && (intel_de_read(display, PCH_DP_B) & DP_DETECTED)) 7843 g4x_dp_init(display, PCH_DP_B, PORT_B); 7844 } 7845 7846 if (intel_de_read(display, PCH_HDMIC) & SDVO_DETECTED) 7847 g4x_hdmi_init(display, PCH_HDMIC, PORT_C); 7848 7849 if (!dpd_is_edp && intel_de_read(display, PCH_HDMID) & SDVO_DETECTED) 7850 g4x_hdmi_init(display, PCH_HDMID, PORT_D); 7851 7852 if (intel_de_read(display, PCH_DP_C) & DP_DETECTED) 7853 g4x_dp_init(display, PCH_DP_C, PORT_C); 7854 7855 if (intel_de_read(display, PCH_DP_D) & DP_DETECTED) 7856 g4x_dp_init(display, PCH_DP_D, PORT_D); 7857 } else if (display->platform.valleyview || display->platform.cherryview) { 7858 bool has_edp, has_port; 7859 7860 if (display->platform.valleyview && display->vbt.int_crt_support) 7861 intel_crt_init(display); 7862 7863 /* 7864 * The DP_DETECTED bit is the latched state of the DDC 7865 * SDA pin at boot. However since eDP doesn't require DDC 7866 * (no way to plug in a DP->HDMI dongle) the DDC pins for 7867 * eDP ports may have been muxed to an alternate function. 7868 * Thus we can't rely on the DP_DETECTED bit alone to detect 7869 * eDP ports. Consult the VBT as well as DP_DETECTED to 7870 * detect eDP ports. 7871 * 7872 * Sadly the straps seem to be missing sometimes even for HDMI 7873 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 7874 * and VBT for the presence of the port. Additionally we can't 7875 * trust the port type the VBT declares as we've seen at least 7876 * HDMI ports that the VBT claim are DP or eDP. 7877 */ 7878 has_edp = intel_dp_is_port_edp(display, PORT_B); 7879 has_port = intel_bios_is_port_present(display, PORT_B); 7880 if (intel_de_read(display, VLV_DP_B) & DP_DETECTED || has_port) 7881 has_edp &= g4x_dp_init(display, VLV_DP_B, PORT_B); 7882 if ((intel_de_read(display, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 7883 g4x_hdmi_init(display, VLV_HDMIB, PORT_B); 7884 7885 has_edp = intel_dp_is_port_edp(display, PORT_C); 7886 has_port = intel_bios_is_port_present(display, PORT_C); 7887 if (intel_de_read(display, VLV_DP_C) & DP_DETECTED || has_port) 7888 has_edp &= g4x_dp_init(display, VLV_DP_C, PORT_C); 7889 if ((intel_de_read(display, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 7890 g4x_hdmi_init(display, VLV_HDMIC, PORT_C); 7891 7892 if (display->platform.cherryview) { 7893 /* 7894 * eDP not supported on port D, 7895 * so no need to worry about it 7896 */ 7897 has_port = intel_bios_is_port_present(display, PORT_D); 7898 if (intel_de_read(display, CHV_DP_D) & DP_DETECTED || has_port) 7899 g4x_dp_init(display, CHV_DP_D, PORT_D); 7900 if (intel_de_read(display, CHV_HDMID) & SDVO_DETECTED || has_port) 7901 g4x_hdmi_init(display, CHV_HDMID, PORT_D); 7902 } 7903 7904 vlv_dsi_init(display); 7905 } else if (display->platform.pineview) { 7906 intel_lvds_init(display); 7907 intel_crt_init(display); 7908 } else if (IS_DISPLAY_VER(display, 3, 4)) { 7909 bool found = false; 7910 7911 if (display->platform.mobile) 7912 intel_lvds_init(display); 7913 7914 intel_crt_init(display); 7915 7916 if (intel_de_read(display, GEN3_SDVOB) & SDVO_DETECTED) { 7917 drm_dbg_kms(display->drm, "probing SDVOB\n"); 7918 found = intel_sdvo_init(display, GEN3_SDVOB, PORT_B); 7919 if (!found && display->platform.g4x) { 7920 drm_dbg_kms(display->drm, 7921 "probing HDMI on SDVOB\n"); 7922 g4x_hdmi_init(display, GEN4_HDMIB, PORT_B); 7923 } 7924 7925 if (!found && display->platform.g4x) 7926 g4x_dp_init(display, DP_B, PORT_B); 7927 } 7928 7929 /* Before G4X SDVOC doesn't have its own detect register */ 7930 7931 if (intel_de_read(display, GEN3_SDVOB) & SDVO_DETECTED) { 7932 drm_dbg_kms(display->drm, "probing SDVOC\n"); 7933 found = intel_sdvo_init(display, GEN3_SDVOC, PORT_C); 7934 } 7935 7936 if (!found && (intel_de_read(display, GEN3_SDVOC) & SDVO_DETECTED)) { 7937 7938 if (display->platform.g4x) { 7939 drm_dbg_kms(display->drm, 7940 "probing HDMI on SDVOC\n"); 7941 g4x_hdmi_init(display, GEN4_HDMIC, PORT_C); 7942 } 7943 if (display->platform.g4x) 7944 g4x_dp_init(display, DP_C, PORT_C); 7945 } 7946 7947 if (display->platform.g4x && (intel_de_read(display, DP_D) & DP_DETECTED)) 7948 g4x_dp_init(display, DP_D, PORT_D); 7949 7950 if (SUPPORTS_TV(display)) 7951 intel_tv_init(display); 7952 } else if (DISPLAY_VER(display) == 2) { 7953 if (display->platform.i85x) 7954 intel_lvds_init(display); 7955 7956 intel_crt_init(display); 7957 intel_dvo_init(display); 7958 } 7959 7960 for_each_intel_encoder(display->drm, encoder) { 7961 encoder->base.possible_crtcs = 7962 intel_encoder_possible_crtcs(encoder); 7963 encoder->base.possible_clones = 7964 intel_encoder_possible_clones(encoder); 7965 } 7966 7967 intel_init_pch_refclk(display); 7968 7969 drm_helper_move_panel_connectors_to_head(display->drm); 7970 } 7971 7972 static int max_dotclock(struct intel_display *display) 7973 { 7974 int max_dotclock = display->cdclk.max_dotclk_freq; 7975 7976 if (HAS_ULTRAJOINER(display)) 7977 max_dotclock *= 4; 7978 else if (HAS_UNCOMPRESSED_JOINER(display) || HAS_BIGJOINER(display)) 7979 max_dotclock *= 2; 7980 7981 return max_dotclock; 7982 } 7983 7984 enum drm_mode_status intel_mode_valid(struct drm_device *dev, 7985 const struct drm_display_mode *mode) 7986 { 7987 struct intel_display *display = to_intel_display(dev); 7988 int hdisplay_max, htotal_max; 7989 int vdisplay_max, vtotal_max; 7990 7991 /* 7992 * Can't reject DBLSCAN here because Xorg ddxen can add piles 7993 * of DBLSCAN modes to the output's mode list when they detect 7994 * the scaling mode property on the connector. And they don't 7995 * ask the kernel to validate those modes in any way until 7996 * modeset time at which point the client gets a protocol error. 7997 * So in order to not upset those clients we silently ignore the 7998 * DBLSCAN flag on such connectors. For other connectors we will 7999 * reject modes with the DBLSCAN flag in encoder->compute_config(). 8000 * And we always reject DBLSCAN modes in connector->mode_valid() 8001 * as we never want such modes on the connector's mode list. 8002 */ 8003 8004 if (mode->vscan > 1) 8005 return MODE_NO_VSCAN; 8006 8007 if (mode->flags & DRM_MODE_FLAG_HSKEW) 8008 return MODE_H_ILLEGAL; 8009 8010 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 8011 DRM_MODE_FLAG_NCSYNC | 8012 DRM_MODE_FLAG_PCSYNC)) 8013 return MODE_HSYNC; 8014 8015 if (mode->flags & (DRM_MODE_FLAG_BCAST | 8016 DRM_MODE_FLAG_PIXMUX | 8017 DRM_MODE_FLAG_CLKDIV2)) 8018 return MODE_BAD; 8019 8020 /* 8021 * Reject clearly excessive dotclocks early to 8022 * avoid having to worry about huge integers later. 8023 */ 8024 if (mode->clock > max_dotclock(display)) 8025 return MODE_CLOCK_HIGH; 8026 8027 /* Transcoder timing limits */ 8028 if (DISPLAY_VER(display) >= 11) { 8029 hdisplay_max = 16384; 8030 vdisplay_max = 8192; 8031 htotal_max = 16384; 8032 vtotal_max = 8192; 8033 } else if (DISPLAY_VER(display) >= 9 || 8034 display->platform.broadwell || display->platform.haswell) { 8035 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 8036 vdisplay_max = 4096; 8037 htotal_max = 8192; 8038 vtotal_max = 8192; 8039 } else if (DISPLAY_VER(display) >= 3) { 8040 hdisplay_max = 4096; 8041 vdisplay_max = 4096; 8042 htotal_max = 8192; 8043 vtotal_max = 8192; 8044 } else { 8045 hdisplay_max = 2048; 8046 vdisplay_max = 2048; 8047 htotal_max = 4096; 8048 vtotal_max = 4096; 8049 } 8050 8051 if (mode->hdisplay > hdisplay_max || 8052 mode->hsync_start > htotal_max || 8053 mode->hsync_end > htotal_max || 8054 mode->htotal > htotal_max) 8055 return MODE_H_ILLEGAL; 8056 8057 if (mode->vdisplay > vdisplay_max || 8058 mode->vsync_start > vtotal_max || 8059 mode->vsync_end > vtotal_max || 8060 mode->vtotal > vtotal_max) 8061 return MODE_V_ILLEGAL; 8062 8063 /* 8064 * WM_LINETIME only goes up to (almost) 64 usec, and also 8065 * knowing that the linetime is always bounded will ease the 8066 * mind during various calculations. 8067 */ 8068 if (DIV_ROUND_UP(mode->htotal * 1000, mode->clock) > 64) 8069 return MODE_H_ILLEGAL; 8070 8071 return MODE_OK; 8072 } 8073 8074 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct intel_display *display, 8075 const struct drm_display_mode *mode) 8076 { 8077 /* 8078 * Additional transcoder timing limits, 8079 * excluding BXT/GLK DSI transcoders. 8080 */ 8081 if (DISPLAY_VER(display) >= 5) { 8082 if (mode->hdisplay < 64 || 8083 mode->htotal - mode->hdisplay < 32) 8084 return MODE_H_ILLEGAL; 8085 8086 if (mode->vtotal - mode->vdisplay < 5) 8087 return MODE_V_ILLEGAL; 8088 } else { 8089 if (mode->htotal - mode->hdisplay < 32) 8090 return MODE_H_ILLEGAL; 8091 8092 if (mode->vtotal - mode->vdisplay < 3) 8093 return MODE_V_ILLEGAL; 8094 } 8095 8096 /* 8097 * Cantiga+ cannot handle modes with a hsync front porch of 0. 8098 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 8099 */ 8100 if ((DISPLAY_VER(display) >= 5 || display->platform.g4x) && 8101 mode->hsync_start == mode->hdisplay) 8102 return MODE_H_ILLEGAL; 8103 8104 return MODE_OK; 8105 } 8106 8107 enum drm_mode_status 8108 intel_mode_valid_max_plane_size(struct intel_display *display, 8109 const struct drm_display_mode *mode, 8110 int num_joined_pipes) 8111 { 8112 int plane_width_max, plane_height_max; 8113 8114 /* 8115 * intel_mode_valid() should be 8116 * sufficient on older platforms. 8117 */ 8118 if (DISPLAY_VER(display) < 9) 8119 return MODE_OK; 8120 8121 /* 8122 * Most people will probably want a fullscreen 8123 * plane so let's not advertize modes that are 8124 * too big for that. 8125 */ 8126 if (DISPLAY_VER(display) >= 30) { 8127 plane_width_max = 6144 * num_joined_pipes; 8128 plane_height_max = 4800; 8129 } else if (DISPLAY_VER(display) >= 11) { 8130 plane_width_max = 5120 * num_joined_pipes; 8131 plane_height_max = 4320; 8132 } else { 8133 plane_width_max = 5120; 8134 plane_height_max = 4096; 8135 } 8136 8137 if (mode->hdisplay > plane_width_max) 8138 return MODE_H_ILLEGAL; 8139 8140 if (mode->vdisplay > plane_height_max) 8141 return MODE_V_ILLEGAL; 8142 8143 return MODE_OK; 8144 } 8145 8146 static const struct intel_display_funcs skl_display_funcs = { 8147 .get_pipe_config = hsw_get_pipe_config, 8148 .crtc_enable = hsw_crtc_enable, 8149 .crtc_disable = hsw_crtc_disable, 8150 .commit_modeset_enables = skl_commit_modeset_enables, 8151 .get_initial_plane_config = skl_get_initial_plane_config, 8152 .fixup_initial_plane_config = skl_fixup_initial_plane_config, 8153 }; 8154 8155 static const struct intel_display_funcs ddi_display_funcs = { 8156 .get_pipe_config = hsw_get_pipe_config, 8157 .crtc_enable = hsw_crtc_enable, 8158 .crtc_disable = hsw_crtc_disable, 8159 .commit_modeset_enables = intel_commit_modeset_enables, 8160 .get_initial_plane_config = i9xx_get_initial_plane_config, 8161 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8162 }; 8163 8164 static const struct intel_display_funcs pch_split_display_funcs = { 8165 .get_pipe_config = ilk_get_pipe_config, 8166 .crtc_enable = ilk_crtc_enable, 8167 .crtc_disable = ilk_crtc_disable, 8168 .commit_modeset_enables = intel_commit_modeset_enables, 8169 .get_initial_plane_config = i9xx_get_initial_plane_config, 8170 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8171 }; 8172 8173 static const struct intel_display_funcs vlv_display_funcs = { 8174 .get_pipe_config = i9xx_get_pipe_config, 8175 .crtc_enable = valleyview_crtc_enable, 8176 .crtc_disable = i9xx_crtc_disable, 8177 .commit_modeset_enables = intel_commit_modeset_enables, 8178 .get_initial_plane_config = i9xx_get_initial_plane_config, 8179 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8180 }; 8181 8182 static const struct intel_display_funcs i9xx_display_funcs = { 8183 .get_pipe_config = i9xx_get_pipe_config, 8184 .crtc_enable = i9xx_crtc_enable, 8185 .crtc_disable = i9xx_crtc_disable, 8186 .commit_modeset_enables = intel_commit_modeset_enables, 8187 .get_initial_plane_config = i9xx_get_initial_plane_config, 8188 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8189 }; 8190 8191 /** 8192 * intel_init_display_hooks - initialize the display modesetting hooks 8193 * @display: display device private 8194 */ 8195 void intel_init_display_hooks(struct intel_display *display) 8196 { 8197 if (DISPLAY_VER(display) >= 9) { 8198 display->funcs.display = &skl_display_funcs; 8199 } else if (HAS_DDI(display)) { 8200 display->funcs.display = &ddi_display_funcs; 8201 } else if (HAS_PCH_SPLIT(display)) { 8202 display->funcs.display = &pch_split_display_funcs; 8203 } else if (display->platform.cherryview || 8204 display->platform.valleyview) { 8205 display->funcs.display = &vlv_display_funcs; 8206 } else { 8207 display->funcs.display = &i9xx_display_funcs; 8208 } 8209 } 8210 8211 int intel_initial_commit(struct intel_display *display) 8212 { 8213 struct drm_atomic_state *state = NULL; 8214 struct drm_modeset_acquire_ctx ctx; 8215 struct intel_crtc *crtc; 8216 int ret = 0; 8217 8218 state = drm_atomic_state_alloc(display->drm); 8219 if (!state) 8220 return -ENOMEM; 8221 8222 drm_modeset_acquire_init(&ctx, 0); 8223 8224 state->acquire_ctx = &ctx; 8225 to_intel_atomic_state(state)->internal = true; 8226 8227 retry: 8228 for_each_intel_crtc(display->drm, crtc) { 8229 struct intel_crtc_state *crtc_state = 8230 intel_atomic_get_crtc_state(state, crtc); 8231 8232 if (IS_ERR(crtc_state)) { 8233 ret = PTR_ERR(crtc_state); 8234 goto out; 8235 } 8236 8237 if (!crtc_state->hw.active) 8238 crtc_state->inherited = false; 8239 8240 if (crtc_state->hw.active) { 8241 struct intel_encoder *encoder; 8242 8243 ret = drm_atomic_add_affected_planes(state, &crtc->base); 8244 if (ret) 8245 goto out; 8246 8247 /* 8248 * FIXME hack to force a LUT update to avoid the 8249 * plane update forcing the pipe gamma on without 8250 * having a proper LUT loaded. Remove once we 8251 * have readout for pipe gamma enable. 8252 */ 8253 crtc_state->uapi.color_mgmt_changed = true; 8254 8255 for_each_intel_encoder_mask(display->drm, encoder, 8256 crtc_state->uapi.encoder_mask) { 8257 if (encoder->initial_fastset_check && 8258 !encoder->initial_fastset_check(encoder, crtc_state)) { 8259 ret = drm_atomic_add_affected_connectors(state, 8260 &crtc->base); 8261 if (ret) 8262 goto out; 8263 } 8264 } 8265 } 8266 } 8267 8268 ret = drm_atomic_commit(state); 8269 8270 out: 8271 if (ret == -EDEADLK) { 8272 drm_atomic_state_clear(state); 8273 drm_modeset_backoff(&ctx); 8274 goto retry; 8275 } 8276 8277 drm_atomic_state_put(state); 8278 8279 drm_modeset_drop_locks(&ctx); 8280 drm_modeset_acquire_fini(&ctx); 8281 8282 return ret; 8283 } 8284 8285 void i830_enable_pipe(struct intel_display *display, enum pipe pipe) 8286 { 8287 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8288 enum transcoder cpu_transcoder = (enum transcoder)pipe; 8289 /* 640x480@60Hz, ~25175 kHz */ 8290 struct dpll clock = { 8291 .m1 = 18, 8292 .m2 = 7, 8293 .p1 = 13, 8294 .p2 = 4, 8295 .n = 2, 8296 }; 8297 u32 dpll, fp; 8298 int i; 8299 8300 drm_WARN_ON(display->drm, 8301 i9xx_calc_dpll_params(48000, &clock) != 25154); 8302 8303 drm_dbg_kms(display->drm, 8304 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 8305 pipe_name(pipe), clock.vco, clock.dot); 8306 8307 fp = i9xx_dpll_compute_fp(&clock); 8308 dpll = DPLL_DVO_2X_MODE | 8309 DPLL_VGA_MODE_DIS | 8310 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 8311 PLL_P2_DIVIDE_BY_4 | 8312 PLL_REF_INPUT_DREFCLK | 8313 DPLL_VCO_ENABLE; 8314 8315 intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder), 8316 HACTIVE(640 - 1) | HTOTAL(800 - 1)); 8317 intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder), 8318 HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); 8319 intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder), 8320 HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); 8321 intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), 8322 VACTIVE(480 - 1) | VTOTAL(525 - 1)); 8323 intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), 8324 VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); 8325 intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder), 8326 VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); 8327 intel_de_write(display, PIPESRC(display, pipe), 8328 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); 8329 8330 intel_de_write(display, FP0(pipe), fp); 8331 intel_de_write(display, FP1(pipe), fp); 8332 8333 /* 8334 * Apparently we need to have VGA mode enabled prior to changing 8335 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 8336 * dividers, even though the register value does change. 8337 */ 8338 intel_de_write(display, DPLL(display, pipe), 8339 dpll & ~DPLL_VGA_MODE_DIS); 8340 intel_de_write(display, DPLL(display, pipe), dpll); 8341 8342 /* Wait for the clocks to stabilize. */ 8343 intel_de_posting_read(display, DPLL(display, pipe)); 8344 udelay(150); 8345 8346 /* The pixel multiplier can only be updated once the 8347 * DPLL is enabled and the clocks are stable. 8348 * 8349 * So write it again. 8350 */ 8351 intel_de_write(display, DPLL(display, pipe), dpll); 8352 8353 /* We do this three times for luck */ 8354 for (i = 0; i < 3 ; i++) { 8355 intel_de_write(display, DPLL(display, pipe), dpll); 8356 intel_de_posting_read(display, DPLL(display, pipe)); 8357 udelay(150); /* wait for warmup */ 8358 } 8359 8360 intel_de_write(display, TRANSCONF(display, pipe), TRANSCONF_ENABLE); 8361 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8362 8363 intel_wait_for_pipe_scanline_moving(crtc); 8364 } 8365 8366 void i830_disable_pipe(struct intel_display *display, enum pipe pipe) 8367 { 8368 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8369 8370 drm_dbg_kms(display->drm, "disabling pipe %c due to force quirk\n", 8371 pipe_name(pipe)); 8372 8373 drm_WARN_ON(display->drm, 8374 intel_de_read(display, DSPCNTR(display, PLANE_A)) & DISP_ENABLE); 8375 drm_WARN_ON(display->drm, 8376 intel_de_read(display, DSPCNTR(display, PLANE_B)) & DISP_ENABLE); 8377 drm_WARN_ON(display->drm, 8378 intel_de_read(display, DSPCNTR(display, PLANE_C)) & DISP_ENABLE); 8379 drm_WARN_ON(display->drm, 8380 intel_de_read(display, CURCNTR(display, PIPE_A)) & MCURSOR_MODE_MASK); 8381 drm_WARN_ON(display->drm, 8382 intel_de_read(display, CURCNTR(display, PIPE_B)) & MCURSOR_MODE_MASK); 8383 8384 intel_de_write(display, TRANSCONF(display, pipe), 0); 8385 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8386 8387 intel_wait_for_pipe_scanline_stopped(crtc); 8388 8389 intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS); 8390 intel_de_posting_read(display, DPLL(display, pipe)); 8391 } 8392 8393 bool intel_scanout_needs_vtd_wa(struct intel_display *display) 8394 { 8395 return IS_DISPLAY_VER(display, 6, 11) && intel_display_vtd_active(display); 8396 } 8397