1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dma-resv.h> 28 #include <linux/i2c.h> 29 #include <linux/input.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 #include <linux/string_helpers.h> 34 35 #include <drm/display/drm_dp_helper.h> 36 #include <drm/display/drm_dp_tunnel.h> 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_atomic_uapi.h> 40 #include <drm/drm_damage_helper.h> 41 #include <drm/drm_edid.h> 42 #include <drm/drm_fixed.h> 43 #include <drm/drm_fourcc.h> 44 #include <drm/drm_print.h> 45 #include <drm/drm_probe_helper.h> 46 #include <drm/drm_rect.h> 47 #include <drm/drm_vblank.h> 48 49 #include "g4x_dp.h" 50 #include "g4x_hdmi.h" 51 #include "hsw_ips.h" 52 #include "i915_config.h" 53 #include "i915_reg.h" 54 #include "i9xx_plane.h" 55 #include "i9xx_plane_regs.h" 56 #include "i9xx_wm.h" 57 #include "intel_alpm.h" 58 #include "intel_atomic.h" 59 #include "intel_audio.h" 60 #include "intel_bo.h" 61 #include "intel_bw.h" 62 #include "intel_casf.h" 63 #include "intel_cdclk.h" 64 #include "intel_clock_gating.h" 65 #include "intel_color.h" 66 #include "intel_crt.h" 67 #include "intel_crtc.h" 68 #include "intel_crtc_state_dump.h" 69 #include "intel_cursor.h" 70 #include "intel_cursor_regs.h" 71 #include "intel_cx0_phy.h" 72 #include "intel_ddi.h" 73 #include "intel_de.h" 74 #include "intel_display_driver.h" 75 #include "intel_display_power.h" 76 #include "intel_display_regs.h" 77 #include "intel_display_rpm.h" 78 #include "intel_display_types.h" 79 #include "intel_display_utils.h" 80 #include "intel_display_wa.h" 81 #include "intel_dmc.h" 82 #include "intel_dp.h" 83 #include "intel_dp_link_training.h" 84 #include "intel_dp_mst.h" 85 #include "intel_dp_tunnel.h" 86 #include "intel_dpll.h" 87 #include "intel_dpll_mgr.h" 88 #include "intel_dpt.h" 89 #include "intel_dpt_common.h" 90 #include "intel_drrs.h" 91 #include "intel_dsb.h" 92 #include "intel_dsi.h" 93 #include "intel_dvo.h" 94 #include "intel_fb.h" 95 #include "intel_fbc.h" 96 #include "intel_fdi.h" 97 #include "intel_fifo_underrun.h" 98 #include "intel_flipq.h" 99 #include "intel_frontbuffer.h" 100 #include "intel_hdmi.h" 101 #include "intel_hotplug.h" 102 #include "intel_initial_plane.h" 103 #include "intel_link_bw.h" 104 #include "intel_lt_phy.h" 105 #include "intel_lvds.h" 106 #include "intel_lvds_regs.h" 107 #include "intel_modeset_setup.h" 108 #include "intel_modeset_verify.h" 109 #include "intel_overlay.h" 110 #include "intel_panel.h" 111 #include "intel_pch_display.h" 112 #include "intel_pch_refclk.h" 113 #include "intel_pfit.h" 114 #include "intel_pipe_crc.h" 115 #include "intel_plane.h" 116 #include "intel_pmdemand.h" 117 #include "intel_pps.h" 118 #include "intel_psr.h" 119 #include "intel_psr_regs.h" 120 #include "intel_sdvo.h" 121 #include "intel_snps_phy.h" 122 #include "intel_tc.h" 123 #include "intel_tdf.h" 124 #include "intel_tv.h" 125 #include "intel_vblank.h" 126 #include "intel_vdsc.h" 127 #include "intel_vdsc_regs.h" 128 #include "intel_vga.h" 129 #include "intel_vrr.h" 130 #include "intel_wm.h" 131 #include "skl_scaler.h" 132 #include "skl_universal_plane.h" 133 #include "skl_watermark.h" 134 #include "vlv_dsi.h" 135 #include "vlv_dsi_pll.h" 136 #include "vlv_dsi_regs.h" 137 138 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 139 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 140 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 141 static void bdw_set_pipe_misc(struct intel_dsb *dsb, 142 const struct intel_crtc_state *crtc_state); 143 144 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 145 { 146 return (crtc_state->active_planes & 147 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 148 } 149 150 /* WA Display #0827: Gen9:all */ 151 static void 152 skl_wa_827(struct intel_display *display, enum pipe pipe, bool enable) 153 { 154 intel_de_rmw(display, CLKGATE_DIS_PSL(pipe), 155 DUPS1_GATING_DIS | DUPS2_GATING_DIS, 156 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); 157 } 158 159 /* Wa_2006604312:icl,ehl */ 160 static void 161 icl_wa_scalerclkgating(struct intel_display *display, enum pipe pipe, 162 bool enable) 163 { 164 intel_de_rmw(display, CLKGATE_DIS_PSL(pipe), 165 DPFR_GATING_DIS, 166 enable ? DPFR_GATING_DIS : 0); 167 } 168 169 /* Wa_1604331009:icl,jsl,ehl */ 170 static void 171 icl_wa_cursorclkgating(struct intel_display *display, enum pipe pipe, 172 bool enable) 173 { 174 intel_de_rmw(display, CLKGATE_DIS_PSL(pipe), 175 CURSOR_GATING_DIS, 176 enable ? CURSOR_GATING_DIS : 0); 177 } 178 179 static bool 180 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 181 { 182 return crtc_state->master_transcoder != INVALID_TRANSCODER; 183 } 184 185 bool 186 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 187 { 188 return crtc_state->sync_mode_slaves_mask != 0; 189 } 190 191 bool 192 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 193 { 194 return is_trans_port_sync_master(crtc_state) || 195 is_trans_port_sync_slave(crtc_state); 196 } 197 198 static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state) 199 { 200 return ffs(crtc_state->joiner_pipes) - 1; 201 } 202 203 /* 204 * The following helper functions, despite being named for bigjoiner, 205 * are applicable to both bigjoiner and uncompressed joiner configurations. 206 */ 207 static bool is_bigjoiner(const struct intel_crtc_state *crtc_state) 208 { 209 return hweight8(crtc_state->joiner_pipes) >= 2; 210 } 211 212 static u8 bigjoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 213 { 214 if (!is_bigjoiner(crtc_state)) 215 return 0; 216 217 return crtc_state->joiner_pipes & (0b01010101 << joiner_primary_pipe(crtc_state)); 218 } 219 220 static unsigned int bigjoiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 221 { 222 if (!is_bigjoiner(crtc_state)) 223 return 0; 224 225 return crtc_state->joiner_pipes & (0b10101010 << joiner_primary_pipe(crtc_state)); 226 } 227 228 bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state) 229 { 230 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 231 232 if (!is_bigjoiner(crtc_state)) 233 return false; 234 235 return BIT(crtc->pipe) & bigjoiner_primary_pipes(crtc_state); 236 } 237 238 bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state) 239 { 240 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 241 242 if (!is_bigjoiner(crtc_state)) 243 return false; 244 245 return BIT(crtc->pipe) & bigjoiner_secondary_pipes(crtc_state); 246 } 247 248 u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state) 249 { 250 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 251 252 if (!is_bigjoiner(crtc_state)) 253 return BIT(crtc->pipe); 254 255 return bigjoiner_primary_pipes(crtc_state); 256 } 257 258 u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state) 259 { 260 return bigjoiner_secondary_pipes(crtc_state); 261 } 262 263 bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state) 264 { 265 return intel_crtc_num_joined_pipes(crtc_state) >= 4; 266 } 267 268 static u8 ultrajoiner_primary_pipes(const struct intel_crtc_state *crtc_state) 269 { 270 if (!intel_crtc_is_ultrajoiner(crtc_state)) 271 return 0; 272 273 return crtc_state->joiner_pipes & (0b00010001 << joiner_primary_pipe(crtc_state)); 274 } 275 276 bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state) 277 { 278 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 279 280 return intel_crtc_is_ultrajoiner(crtc_state) && 281 BIT(crtc->pipe) & ultrajoiner_primary_pipes(crtc_state); 282 } 283 284 /* 285 * The ultrajoiner enable bit doesn't seem to follow primary/secondary logic or 286 * any other logic, so lets just add helper function to 287 * at least hide this hassle.. 288 */ 289 static u8 ultrajoiner_enable_pipes(const struct intel_crtc_state *crtc_state) 290 { 291 if (!intel_crtc_is_ultrajoiner(crtc_state)) 292 return 0; 293 294 return crtc_state->joiner_pipes & (0b01110111 << joiner_primary_pipe(crtc_state)); 295 } 296 297 bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state) 298 { 299 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 300 301 return intel_crtc_is_ultrajoiner(crtc_state) && 302 BIT(crtc->pipe) & ultrajoiner_enable_pipes(crtc_state); 303 } 304 305 u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state) 306 { 307 if (crtc_state->joiner_pipes) 308 return crtc_state->joiner_pipes & ~BIT(joiner_primary_pipe(crtc_state)); 309 else 310 return 0; 311 } 312 313 bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state) 314 { 315 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 316 317 return crtc_state->joiner_pipes && 318 crtc->pipe != joiner_primary_pipe(crtc_state); 319 } 320 321 bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state) 322 { 323 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 324 325 return crtc_state->joiner_pipes && 326 crtc->pipe == joiner_primary_pipe(crtc_state); 327 } 328 329 int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state) 330 { 331 return hweight8(intel_crtc_joined_pipe_mask(crtc_state)); 332 } 333 334 u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) 335 { 336 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 337 338 return BIT(crtc->pipe) | crtc_state->joiner_pipes; 339 } 340 341 struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state) 342 { 343 struct intel_display *display = to_intel_display(crtc_state); 344 345 if (intel_crtc_is_joiner_secondary(crtc_state)) 346 return intel_crtc_for_pipe(display, joiner_primary_pipe(crtc_state)); 347 else 348 return to_intel_crtc(crtc_state->uapi.crtc); 349 } 350 351 static void 352 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 353 { 354 struct intel_display *display = to_intel_display(old_crtc_state); 355 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 356 357 if (DISPLAY_VER(display) >= 4) { 358 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 359 360 /* Wait for the Pipe State to go off */ 361 if (intel_de_wait_for_clear_ms(display, TRANSCONF(display, cpu_transcoder), 362 TRANSCONF_STATE_ENABLE, 100)) 363 drm_WARN(display->drm, 1, "pipe_off wait timed out\n"); 364 } else { 365 intel_wait_for_pipe_scanline_stopped(crtc); 366 } 367 } 368 369 void assert_transcoder(struct intel_display *display, 370 enum transcoder cpu_transcoder, bool state) 371 { 372 bool cur_state; 373 enum intel_display_power_domain power_domain; 374 struct ref_tracker *wakeref; 375 376 /* we keep both pipes enabled on 830 */ 377 if (display->platform.i830) 378 state = true; 379 380 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 381 wakeref = intel_display_power_get_if_enabled(display, power_domain); 382 if (wakeref) { 383 u32 val = intel_de_read(display, 384 TRANSCONF(display, cpu_transcoder)); 385 cur_state = !!(val & TRANSCONF_ENABLE); 386 387 intel_display_power_put(display, power_domain, wakeref); 388 } else { 389 cur_state = false; 390 } 391 392 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 393 "transcoder %s assertion failure (expected %s, current %s)\n", 394 transcoder_name(cpu_transcoder), str_on_off(state), 395 str_on_off(cur_state)); 396 } 397 398 static void assert_plane(struct intel_plane *plane, bool state) 399 { 400 struct intel_display *display = to_intel_display(plane->base.dev); 401 enum pipe pipe; 402 bool cur_state; 403 404 cur_state = plane->get_hw_state(plane, &pipe); 405 406 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 407 "%s assertion failure (expected %s, current %s)\n", 408 plane->base.name, str_on_off(state), 409 str_on_off(cur_state)); 410 } 411 412 #define assert_plane_enabled(p) assert_plane(p, true) 413 #define assert_plane_disabled(p) assert_plane(p, false) 414 415 static void assert_planes_disabled(struct intel_crtc *crtc) 416 { 417 struct intel_display *display = to_intel_display(crtc); 418 struct intel_plane *plane; 419 420 for_each_intel_plane_on_crtc(display->drm, crtc, plane) 421 assert_plane_disabled(plane); 422 } 423 424 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 425 { 426 struct intel_display *display = to_intel_display(new_crtc_state); 427 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 428 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 429 enum pipe pipe = crtc->pipe; 430 u32 val; 431 432 drm_dbg_kms(display->drm, "enabling pipe %c\n", pipe_name(pipe)); 433 434 assert_planes_disabled(crtc); 435 436 /* 437 * A pipe without a PLL won't actually be able to drive bits from 438 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 439 * need the check. 440 */ 441 if (HAS_GMCH(display)) { 442 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 443 assert_dsi_pll_enabled(display); 444 else 445 assert_pll_enabled(display, pipe); 446 } else { 447 if (new_crtc_state->has_pch_encoder) { 448 /* if driving the PCH, we need FDI enabled */ 449 assert_fdi_rx_pll_enabled(display, 450 intel_crtc_pch_transcoder(crtc)); 451 assert_fdi_tx_pll_enabled(display, 452 (enum pipe) cpu_transcoder); 453 } 454 /* FIXME: assert CPU port conditions for SNB+ */ 455 } 456 457 /* Wa_22012358565:adl-p */ 458 if (DISPLAY_VER(display) == 13) 459 intel_de_rmw(display, PIPE_ARB_CTL(display, pipe), 460 0, PIPE_ARB_USE_PROG_SLOTS); 461 462 if (DISPLAY_VER(display) >= 14) { 463 u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA; 464 u32 set = 0; 465 466 if (DISPLAY_VER(display) == 14) 467 set |= DP_FEC_BS_JITTER_WA; 468 469 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 470 clear, set); 471 } 472 473 val = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); 474 if (val & TRANSCONF_ENABLE) { 475 /* we keep both pipes enabled on 830 */ 476 drm_WARN_ON(display->drm, !display->platform.i830); 477 return; 478 } 479 480 /* Wa_1409098942:adlp+ */ 481 if (DISPLAY_VER(display) >= 13 && 482 new_crtc_state->dsc.compression_enable) { 483 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 484 val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK, 485 TRANSCONF_PIXEL_COUNT_SCALING_X4); 486 } 487 488 intel_de_write(display, TRANSCONF(display, cpu_transcoder), 489 val | TRANSCONF_ENABLE); 490 intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder)); 491 492 /* 493 * Until the pipe starts PIPEDSL reads will return a stale value, 494 * which causes an apparent vblank timestamp jump when PIPEDSL 495 * resets to its proper value. That also messes up the frame count 496 * when it's derived from the timestamps. So let's wait for the 497 * pipe to start properly before we call drm_crtc_vblank_on() 498 */ 499 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 500 intel_wait_for_pipe_scanline_moving(crtc); 501 } 502 503 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 504 { 505 struct intel_display *display = to_intel_display(old_crtc_state); 506 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 507 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 508 enum pipe pipe = crtc->pipe; 509 u32 val; 510 511 drm_dbg_kms(display->drm, "disabling pipe %c\n", pipe_name(pipe)); 512 513 /* 514 * Make sure planes won't keep trying to pump pixels to us, 515 * or we might hang the display. 516 */ 517 assert_planes_disabled(crtc); 518 519 val = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); 520 if ((val & TRANSCONF_ENABLE) == 0) 521 return; 522 523 /* 524 * Double wide has implications for planes 525 * so best keep it disabled when not needed. 526 */ 527 if (old_crtc_state->double_wide) 528 val &= ~TRANSCONF_DOUBLE_WIDE; 529 530 /* Don't disable pipe or pipe PLLs if needed */ 531 if (!display->platform.i830) 532 val &= ~TRANSCONF_ENABLE; 533 534 /* Wa_1409098942:adlp+ */ 535 if (DISPLAY_VER(display) >= 13 && 536 old_crtc_state->dsc.compression_enable) 537 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; 538 539 intel_de_write(display, TRANSCONF(display, cpu_transcoder), val); 540 541 if (DISPLAY_VER(display) >= 12) 542 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 543 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 544 545 if ((val & TRANSCONF_ENABLE) == 0) 546 intel_wait_for_pipe_off(old_crtc_state); 547 } 548 549 u32 intel_plane_fb_max_stride(struct intel_display *display, 550 const struct drm_format_info *info, 551 u64 modifier) 552 { 553 struct intel_crtc *crtc; 554 struct intel_plane *plane; 555 556 /* 557 * We assume the primary plane for pipe A has 558 * the highest stride limits of them all, 559 * if in case pipe A is disabled, use the first pipe from pipe_mask. 560 */ 561 crtc = intel_first_crtc(display); 562 if (!crtc) 563 return 0; 564 565 plane = to_intel_plane(crtc->base.primary); 566 567 return plane->max_stride(plane, info, modifier, 568 DRM_MODE_ROTATE_0); 569 } 570 571 u32 intel_dumb_fb_max_stride(struct drm_device *drm, 572 u32 pixel_format, u64 modifier) 573 { 574 struct intel_display *display = to_intel_display(drm); 575 576 if (!HAS_DISPLAY(display)) 577 return 0; 578 579 return intel_plane_fb_max_stride(display, 580 drm_get_format_info(drm, pixel_format, modifier), 581 modifier); 582 } 583 584 void intel_set_plane_visible(struct intel_crtc_state *crtc_state, 585 struct intel_plane_state *plane_state, 586 bool visible) 587 { 588 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 589 590 plane_state->uapi.visible = visible; 591 592 if (visible) 593 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 594 else 595 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 596 } 597 598 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) 599 { 600 struct intel_display *display = to_intel_display(crtc_state); 601 struct drm_plane *plane; 602 603 /* 604 * Active_planes aliases if multiple "primary" or cursor planes 605 * have been used on the same (or wrong) pipe. plane_mask uses 606 * unique ids, hence we can use that to reconstruct active_planes. 607 */ 608 crtc_state->enabled_planes = 0; 609 crtc_state->active_planes = 0; 610 611 drm_for_each_plane_mask(plane, display->drm, 612 crtc_state->uapi.plane_mask) { 613 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 614 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 615 } 616 } 617 618 void intel_plane_disable_noatomic(struct intel_crtc *crtc, 619 struct intel_plane *plane) 620 { 621 struct intel_display *display = to_intel_display(crtc); 622 struct intel_crtc_state *crtc_state = 623 to_intel_crtc_state(crtc->base.state); 624 struct intel_plane_state *plane_state = 625 to_intel_plane_state(plane->base.state); 626 627 drm_dbg_kms(display->drm, 628 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 629 plane->base.base.id, plane->base.name, 630 crtc->base.base.id, crtc->base.name); 631 632 intel_plane_set_invisible(crtc_state, plane_state); 633 intel_set_plane_visible(crtc_state, plane_state, false); 634 intel_plane_fixup_bitmasks(crtc_state); 635 636 skl_wm_plane_disable_noatomic(crtc, plane); 637 638 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 639 hsw_ips_disable(crtc_state)) { 640 crtc_state->ips_enabled = false; 641 intel_initial_plane_vblank_wait(crtc); 642 } 643 644 /* 645 * Vblank time updates from the shadow to live plane control register 646 * are blocked if the memory self-refresh mode is active at that 647 * moment. So to make sure the plane gets truly disabled, disable 648 * first the self-refresh mode. The self-refresh enable bit in turn 649 * will be checked/applied by the HW only at the next frame start 650 * event which is after the vblank start event, so we need to have a 651 * wait-for-vblank between disabling the plane and the pipe. 652 */ 653 if (HAS_GMCH(display) && 654 intel_set_memory_cxsr(display, false)) 655 intel_initial_plane_vblank_wait(crtc); 656 657 /* 658 * Gen2 reports pipe underruns whenever all planes are disabled. 659 * So disable underrun reporting before all the planes get disabled. 660 */ 661 if (DISPLAY_VER(display) == 2 && !crtc_state->active_planes) 662 intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, false); 663 664 intel_plane_disable_arm(NULL, plane, crtc_state); 665 intel_initial_plane_vblank_wait(crtc); 666 } 667 668 unsigned int 669 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 670 { 671 int x = 0, y = 0; 672 673 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 674 plane_state->view.color_plane[0].offset, 0); 675 676 return y; 677 } 678 679 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 680 { 681 struct intel_display *display = to_intel_display(crtc_state); 682 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 683 enum pipe pipe = crtc->pipe; 684 u32 tmp; 685 686 tmp = intel_de_read(display, PIPE_CHICKEN(pipe)); 687 688 /* 689 * Display WA #1153: icl 690 * enable hardware to bypass the alpha math 691 * and rounding for per-pixel values 00 and 0xff 692 */ 693 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 694 /* 695 * Display WA # 1605353570: icl 696 * Set the pixel rounding bit to 1 for allowing 697 * passthrough of Frame buffer pixels unmodified 698 * across pipe 699 */ 700 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 701 702 /* 703 * Underrun recovery must always be disabled on display 13+. 704 * DG2 chicken bit meaning is inverted compared to other platforms. 705 */ 706 if (display->platform.dg2) 707 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 708 else if ((DISPLAY_VER(display) >= 13) && (DISPLAY_VER(display) < 30)) 709 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 710 711 /* Wa_14010547955:dg2 */ 712 if (display->platform.dg2) 713 tmp |= DG2_RENDER_CCSTAG_4_3_EN; 714 715 intel_de_write(display, PIPE_CHICKEN(pipe), tmp); 716 } 717 718 bool intel_has_pending_fb_unpin(struct intel_display *display) 719 { 720 struct drm_crtc *crtc; 721 bool cleanup_done; 722 723 drm_for_each_crtc(crtc, display->drm) { 724 struct drm_crtc_commit *commit; 725 spin_lock(&crtc->commit_lock); 726 commit = list_first_entry_or_null(&crtc->commit_list, 727 struct drm_crtc_commit, commit_entry); 728 cleanup_done = commit ? 729 try_wait_for_completion(&commit->cleanup_done) : true; 730 spin_unlock(&crtc->commit_lock); 731 732 if (cleanup_done) 733 continue; 734 735 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 736 737 return true; 738 } 739 740 return false; 741 } 742 743 /* 744 * Finds the encoder associated with the given CRTC. This can only be 745 * used when we know that the CRTC isn't feeding multiple encoders! 746 */ 747 struct intel_encoder * 748 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 749 const struct intel_crtc_state *crtc_state) 750 { 751 const struct drm_connector_state *connector_state; 752 const struct drm_connector *connector; 753 struct intel_encoder *encoder = NULL; 754 struct intel_crtc *primary_crtc; 755 int num_encoders = 0; 756 int i; 757 758 primary_crtc = intel_primary_crtc(crtc_state); 759 760 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 761 if (connector_state->crtc != &primary_crtc->base) 762 continue; 763 764 encoder = to_intel_encoder(connector_state->best_encoder); 765 num_encoders++; 766 } 767 768 drm_WARN(state->base.dev, num_encoders != 1, 769 "%d encoders for pipe %c\n", 770 num_encoders, pipe_name(primary_crtc->pipe)); 771 772 return encoder; 773 } 774 775 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 776 { 777 if (crtc->overlay) 778 (void) intel_overlay_switch_off(crtc->overlay); 779 780 /* Let userspace switch the overlay on again. In most cases userspace 781 * has to recompute where to put it anyway. 782 */ 783 } 784 785 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 786 { 787 struct intel_display *display = to_intel_display(crtc_state); 788 789 if (!crtc_state->nv12_planes) 790 return false; 791 792 /* WA Display #0827: Gen9:all */ 793 if (DISPLAY_VER(display) == 9) 794 return true; 795 796 return false; 797 } 798 799 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 800 { 801 struct intel_display *display = to_intel_display(crtc_state); 802 803 /* Wa_2006604312:icl,ehl */ 804 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(display) == 11) 805 return true; 806 807 return false; 808 } 809 810 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 811 { 812 struct intel_display *display = to_intel_display(crtc_state); 813 814 /* Wa_1604331009:icl,jsl,ehl */ 815 if (is_hdr_mode(crtc_state) && 816 crtc_state->active_planes & BIT(PLANE_CURSOR) && 817 DISPLAY_VER(display) == 11) 818 return true; 819 820 return false; 821 } 822 823 static void intel_async_flip_vtd_wa(struct intel_display *display, 824 enum pipe pipe, bool enable) 825 { 826 if (DISPLAY_VER(display) == 9) { 827 /* 828 * "Plane N stretch max must be programmed to 11b (x1) 829 * when Async flips are enabled on that plane." 830 */ 831 intel_de_rmw(display, CHICKEN_PIPESL_1(pipe), 832 SKL_PLANE1_STRETCH_MAX_MASK, 833 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 834 } else { 835 /* Also needed on HSW/BDW albeit undocumented */ 836 intel_de_rmw(display, CHICKEN_PIPESL_1(pipe), 837 HSW_PRI_STRETCH_MAX_MASK, 838 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 839 } 840 } 841 842 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 843 { 844 struct intel_display *display = to_intel_display(crtc_state); 845 846 return crtc_state->uapi.async_flip && intel_display_vtd_active(display) && 847 (DISPLAY_VER(display) == 9 || display->platform.broadwell || 848 display->platform.haswell); 849 } 850 851 static void intel_encoders_audio_enable(struct intel_atomic_state *state, 852 struct intel_crtc *crtc) 853 { 854 const struct intel_crtc_state *crtc_state = 855 intel_atomic_get_new_crtc_state(state, crtc); 856 const struct drm_connector_state *conn_state; 857 struct drm_connector *conn; 858 int i; 859 860 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 861 struct intel_encoder *encoder = 862 to_intel_encoder(conn_state->best_encoder); 863 864 if (conn_state->crtc != &crtc->base) 865 continue; 866 867 if (encoder->audio_enable) 868 encoder->audio_enable(encoder, crtc_state, conn_state); 869 } 870 } 871 872 static void intel_encoders_audio_disable(struct intel_atomic_state *state, 873 struct intel_crtc *crtc) 874 { 875 const struct intel_crtc_state *old_crtc_state = 876 intel_atomic_get_old_crtc_state(state, crtc); 877 const struct drm_connector_state *old_conn_state; 878 struct drm_connector *conn; 879 int i; 880 881 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 882 struct intel_encoder *encoder = 883 to_intel_encoder(old_conn_state->best_encoder); 884 885 if (old_conn_state->crtc != &crtc->base) 886 continue; 887 888 if (encoder->audio_disable) 889 encoder->audio_disable(encoder, old_crtc_state, old_conn_state); 890 } 891 } 892 893 #define is_enabling(feature, old_crtc_state, new_crtc_state) \ 894 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ 895 (new_crtc_state)->feature) 896 #define is_disabling(feature, old_crtc_state, new_crtc_state) \ 897 ((old_crtc_state)->feature && \ 898 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) 899 900 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 901 const struct intel_crtc_state *new_crtc_state) 902 { 903 if (!new_crtc_state->hw.active) 904 return false; 905 906 return is_enabling(active_planes, old_crtc_state, new_crtc_state); 907 } 908 909 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 910 const struct intel_crtc_state *new_crtc_state) 911 { 912 if (!old_crtc_state->hw.active) 913 return false; 914 915 return is_disabling(active_planes, old_crtc_state, new_crtc_state); 916 } 917 918 static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, 919 const struct intel_crtc_state *new_crtc_state) 920 { 921 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline || 922 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || 923 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || 924 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || 925 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full || 926 old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start || 927 old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end; 928 } 929 930 static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state, 931 const struct intel_crtc_state *new_crtc_state) 932 { 933 return old_crtc_state->cmrr.cmrr_m != new_crtc_state->cmrr.cmrr_m || 934 old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n; 935 } 936 937 static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state, 938 struct intel_crtc *crtc) 939 { 940 const struct intel_crtc_state *old_crtc_state = 941 intel_atomic_get_old_crtc_state(state, crtc); 942 const struct intel_crtc_state *new_crtc_state = 943 intel_atomic_get_new_crtc_state(state, crtc); 944 945 if (!new_crtc_state->hw.active) 946 return false; 947 948 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) || 949 (new_crtc_state->vrr.enable && 950 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 951 vrr_params_changed(old_crtc_state, new_crtc_state))); 952 } 953 954 bool intel_crtc_vrr_disabling(struct intel_atomic_state *state, 955 struct intel_crtc *crtc) 956 { 957 const struct intel_crtc_state *old_crtc_state = 958 intel_atomic_get_old_crtc_state(state, crtc); 959 const struct intel_crtc_state *new_crtc_state = 960 intel_atomic_get_new_crtc_state(state, crtc); 961 962 if (!old_crtc_state->hw.active) 963 return false; 964 965 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) || 966 (old_crtc_state->vrr.enable && 967 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 968 vrr_params_changed(old_crtc_state, new_crtc_state))); 969 } 970 971 static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, 972 const struct intel_crtc_state *new_crtc_state) 973 { 974 if (!new_crtc_state->hw.active) 975 return false; 976 977 return is_enabling(has_audio, old_crtc_state, new_crtc_state) || 978 (new_crtc_state->has_audio && 979 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 980 } 981 982 static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, 983 const struct intel_crtc_state *new_crtc_state) 984 { 985 if (!old_crtc_state->hw.active) 986 return false; 987 988 return is_disabling(has_audio, old_crtc_state, new_crtc_state) || 989 (old_crtc_state->has_audio && 990 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 991 } 992 993 static bool intel_casf_enabling(const struct intel_crtc_state *new_crtc_state, 994 const struct intel_crtc_state *old_crtc_state) 995 { 996 if (!new_crtc_state->hw.active) 997 return false; 998 999 return is_enabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state); 1000 } 1001 1002 static bool intel_casf_disabling(const struct intel_crtc_state *old_crtc_state, 1003 const struct intel_crtc_state *new_crtc_state) 1004 { 1005 if (!new_crtc_state->hw.active) 1006 return false; 1007 1008 return is_disabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state); 1009 } 1010 1011 #undef is_disabling 1012 #undef is_enabling 1013 1014 static void intel_post_plane_update(struct intel_atomic_state *state, 1015 struct intel_crtc *crtc) 1016 { 1017 struct intel_display *display = to_intel_display(state); 1018 const struct intel_crtc_state *old_crtc_state = 1019 intel_atomic_get_old_crtc_state(state, crtc); 1020 const struct intel_crtc_state *new_crtc_state = 1021 intel_atomic_get_new_crtc_state(state, crtc); 1022 enum pipe pipe = crtc->pipe; 1023 1024 intel_frontbuffer_flip(display, new_crtc_state->fb_bits); 1025 1026 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 1027 intel_update_watermarks(display); 1028 1029 intel_fbc_post_update(state, crtc); 1030 1031 if (needs_async_flip_vtd_wa(old_crtc_state) && 1032 !needs_async_flip_vtd_wa(new_crtc_state)) 1033 intel_async_flip_vtd_wa(display, pipe, false); 1034 1035 if (needs_nv12_wa(old_crtc_state) && 1036 !needs_nv12_wa(new_crtc_state)) 1037 skl_wa_827(display, pipe, false); 1038 1039 if (needs_scalerclk_wa(old_crtc_state) && 1040 !needs_scalerclk_wa(new_crtc_state)) 1041 icl_wa_scalerclkgating(display, pipe, false); 1042 1043 if (needs_cursorclk_wa(old_crtc_state) && 1044 !needs_cursorclk_wa(new_crtc_state)) 1045 icl_wa_cursorclkgating(display, pipe, false); 1046 1047 if (intel_crtc_needs_color_update(new_crtc_state)) 1048 intel_color_post_update(new_crtc_state); 1049 1050 if (audio_enabling(old_crtc_state, new_crtc_state)) 1051 intel_encoders_audio_enable(state, crtc); 1052 1053 if (intel_display_wa(display, 14011503117)) { 1054 if (old_crtc_state->pch_pfit.enabled != new_crtc_state->pch_pfit.enabled) 1055 adl_scaler_ecc_unmask(new_crtc_state); 1056 } 1057 1058 intel_alpm_post_plane_update(state, crtc); 1059 1060 intel_psr_post_plane_update(state, crtc); 1061 } 1062 1063 static void intel_post_plane_update_after_readout(struct intel_atomic_state *state, 1064 struct intel_crtc *crtc) 1065 { 1066 const struct intel_crtc_state *new_crtc_state = 1067 intel_atomic_get_new_crtc_state(state, crtc); 1068 1069 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ 1070 hsw_ips_post_update(state, crtc); 1071 1072 /* 1073 * Activate DRRS after state readout to avoid 1074 * dp_m_n vs. dp_m2_n2 confusion on BDW+. 1075 */ 1076 intel_drrs_activate(new_crtc_state); 1077 } 1078 1079 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 1080 struct intel_crtc *crtc) 1081 { 1082 const struct intel_crtc_state *crtc_state = 1083 intel_atomic_get_new_crtc_state(state, crtc); 1084 u8 update_planes = crtc_state->update_planes; 1085 const struct intel_plane_state __maybe_unused *plane_state; 1086 struct intel_plane *plane; 1087 int i; 1088 1089 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1090 if (plane->pipe == crtc->pipe && 1091 update_planes & BIT(plane->id)) 1092 plane->enable_flip_done(plane); 1093 } 1094 } 1095 1096 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 1097 struct intel_crtc *crtc) 1098 { 1099 const struct intel_crtc_state *crtc_state = 1100 intel_atomic_get_new_crtc_state(state, crtc); 1101 u8 update_planes = crtc_state->update_planes; 1102 const struct intel_plane_state __maybe_unused *plane_state; 1103 struct intel_plane *plane; 1104 int i; 1105 1106 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1107 if (plane->pipe == crtc->pipe && 1108 update_planes & BIT(plane->id)) 1109 plane->disable_flip_done(plane); 1110 } 1111 } 1112 1113 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 1114 struct intel_crtc *crtc) 1115 { 1116 const struct intel_crtc_state *old_crtc_state = 1117 intel_atomic_get_old_crtc_state(state, crtc); 1118 const struct intel_crtc_state *new_crtc_state = 1119 intel_atomic_get_new_crtc_state(state, crtc); 1120 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & 1121 ~new_crtc_state->async_flip_planes; 1122 const struct intel_plane_state *old_plane_state; 1123 struct intel_plane *plane; 1124 bool need_vbl_wait = false; 1125 int i; 1126 1127 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1128 if (plane->need_async_flip_toggle_wa && 1129 plane->pipe == crtc->pipe && 1130 disable_async_flip_planes & BIT(plane->id)) { 1131 /* 1132 * Apart from the async flip bit we want to 1133 * preserve the old state for the plane. 1134 */ 1135 intel_plane_async_flip(NULL, plane, 1136 old_crtc_state, old_plane_state, false); 1137 need_vbl_wait = true; 1138 } 1139 } 1140 1141 if (need_vbl_wait) 1142 intel_crtc_wait_for_next_vblank(crtc); 1143 } 1144 1145 static void intel_pre_plane_update(struct intel_atomic_state *state, 1146 struct intel_crtc *crtc) 1147 { 1148 struct intel_display *display = to_intel_display(state); 1149 const struct intel_crtc_state *old_crtc_state = 1150 intel_atomic_get_old_crtc_state(state, crtc); 1151 const struct intel_crtc_state *new_crtc_state = 1152 intel_atomic_get_new_crtc_state(state, crtc); 1153 enum pipe pipe = crtc->pipe; 1154 1155 intel_alpm_pre_plane_update(state, crtc); 1156 intel_psr_pre_plane_update(state, crtc); 1157 1158 if (intel_crtc_vrr_disabling(state, crtc)) { 1159 intel_vrr_disable(old_crtc_state); 1160 intel_vrr_dcb_reset(old_crtc_state, crtc); 1161 intel_crtc_update_active_timings(old_crtc_state, false); 1162 } 1163 1164 if (audio_disabling(old_crtc_state, new_crtc_state)) 1165 intel_encoders_audio_disable(state, crtc); 1166 1167 if (intel_casf_disabling(old_crtc_state, new_crtc_state)) 1168 intel_casf_disable(new_crtc_state); 1169 1170 intel_drrs_deactivate(old_crtc_state); 1171 1172 if (hsw_ips_pre_update(state, crtc)) 1173 intel_crtc_wait_for_next_vblank(crtc); 1174 1175 if (intel_fbc_pre_update(state, crtc)) 1176 intel_crtc_wait_for_next_vblank(crtc); 1177 1178 if (!needs_async_flip_vtd_wa(old_crtc_state) && 1179 needs_async_flip_vtd_wa(new_crtc_state)) 1180 intel_async_flip_vtd_wa(display, pipe, true); 1181 1182 /* Display WA 827 */ 1183 if (!needs_nv12_wa(old_crtc_state) && 1184 needs_nv12_wa(new_crtc_state)) 1185 skl_wa_827(display, pipe, true); 1186 1187 /* Wa_2006604312:icl,ehl */ 1188 if (!needs_scalerclk_wa(old_crtc_state) && 1189 needs_scalerclk_wa(new_crtc_state)) 1190 icl_wa_scalerclkgating(display, pipe, true); 1191 1192 /* Wa_1604331009:icl,jsl,ehl */ 1193 if (!needs_cursorclk_wa(old_crtc_state) && 1194 needs_cursorclk_wa(new_crtc_state)) 1195 icl_wa_cursorclkgating(display, pipe, true); 1196 1197 /* 1198 * Vblank time updates from the shadow to live plane control register 1199 * are blocked if the memory self-refresh mode is active at that 1200 * moment. So to make sure the plane gets truly disabled, disable 1201 * first the self-refresh mode. The self-refresh enable bit in turn 1202 * will be checked/applied by the HW only at the next frame start 1203 * event which is after the vblank start event, so we need to have a 1204 * wait-for-vblank between disabling the plane and the pipe. 1205 */ 1206 if (HAS_GMCH(display) && old_crtc_state->hw.active && 1207 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false)) 1208 intel_crtc_wait_for_next_vblank(crtc); 1209 1210 /* 1211 * IVB workaround: must disable low power watermarks for at least 1212 * one frame before enabling scaling. LP watermarks can be re-enabled 1213 * when scaling is disabled. 1214 * 1215 * WaCxSRDisabledForSpriteScaling:ivb 1216 */ 1217 if (!HAS_GMCH(display) && old_crtc_state->hw.active && 1218 new_crtc_state->disable_cxsr && ilk_disable_cxsr(display)) 1219 intel_crtc_wait_for_next_vblank(crtc); 1220 1221 /* 1222 * If we're doing a modeset we don't need to do any 1223 * pre-vblank watermark programming here. 1224 */ 1225 if (!intel_crtc_needs_modeset(new_crtc_state)) { 1226 /* 1227 * For platforms that support atomic watermarks, program the 1228 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1229 * will be the intermediate values that are safe for both pre- and 1230 * post- vblank; when vblank happens, the 'active' values will be set 1231 * to the final 'target' values and we'll do this again to get the 1232 * optimal watermarks. For gen9+ platforms, the values we program here 1233 * will be the final target values which will get automatically latched 1234 * at vblank time; no further programming will be necessary. 1235 * 1236 * If a platform hasn't been transitioned to atomic watermarks yet, 1237 * we'll continue to update watermarks the old way, if flags tell 1238 * us to. 1239 */ 1240 if (!intel_initial_watermarks(state, crtc)) 1241 if (new_crtc_state->update_wm_pre) 1242 intel_update_watermarks(display); 1243 } 1244 1245 /* 1246 * Gen2 reports pipe underruns whenever all planes are disabled. 1247 * So disable underrun reporting before all the planes get disabled. 1248 * 1249 * We do this after .initial_watermarks() so that we have a 1250 * chance of catching underruns with the intermediate watermarks 1251 * vs. the old plane configuration. 1252 */ 1253 if (DISPLAY_VER(display) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1254 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 1255 1256 /* 1257 * WA for platforms where async address update enable bit 1258 * is double buffered and only latched at start of vblank. 1259 */ 1260 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) 1261 intel_crtc_async_flip_disable_wa(state, crtc); 1262 } 1263 1264 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1265 struct intel_crtc *crtc) 1266 { 1267 struct intel_display *display = to_intel_display(state); 1268 const struct intel_crtc_state *new_crtc_state = 1269 intel_atomic_get_new_crtc_state(state, crtc); 1270 unsigned int update_mask = new_crtc_state->update_planes; 1271 const struct intel_plane_state *old_plane_state; 1272 struct intel_plane *plane; 1273 unsigned fb_bits = 0; 1274 int i; 1275 1276 intel_crtc_dpms_overlay_disable(crtc); 1277 1278 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1279 if (crtc->pipe != plane->pipe || 1280 !(update_mask & BIT(plane->id))) 1281 continue; 1282 1283 intel_plane_disable_arm(NULL, plane, new_crtc_state); 1284 1285 if (old_plane_state->uapi.visible) 1286 fb_bits |= plane->frontbuffer_bit; 1287 } 1288 1289 intel_frontbuffer_flip(display, fb_bits); 1290 } 1291 1292 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1293 { 1294 struct intel_display *display = to_intel_display(state); 1295 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1296 struct intel_crtc *crtc; 1297 int i; 1298 1299 /* 1300 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 1301 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 1302 */ 1303 if (display->dpll.mgr) { 1304 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1305 if (intel_crtc_needs_modeset(new_crtc_state)) 1306 continue; 1307 1308 new_crtc_state->intel_dpll = old_crtc_state->intel_dpll; 1309 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1310 } 1311 } 1312 } 1313 1314 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1315 struct intel_crtc *crtc) 1316 { 1317 const struct intel_crtc_state *crtc_state = 1318 intel_atomic_get_new_crtc_state(state, crtc); 1319 const struct drm_connector_state *conn_state; 1320 struct drm_connector *conn; 1321 int i; 1322 1323 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1324 struct intel_encoder *encoder = 1325 to_intel_encoder(conn_state->best_encoder); 1326 1327 if (conn_state->crtc != &crtc->base) 1328 continue; 1329 1330 if (encoder->pre_pll_enable) 1331 encoder->pre_pll_enable(state, encoder, 1332 crtc_state, conn_state); 1333 } 1334 } 1335 1336 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1337 struct intel_crtc *crtc) 1338 { 1339 const struct intel_crtc_state *crtc_state = 1340 intel_atomic_get_new_crtc_state(state, crtc); 1341 const struct drm_connector_state *conn_state; 1342 struct drm_connector *conn; 1343 int i; 1344 1345 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1346 struct intel_encoder *encoder = 1347 to_intel_encoder(conn_state->best_encoder); 1348 1349 if (conn_state->crtc != &crtc->base) 1350 continue; 1351 1352 if (encoder->pre_enable) 1353 encoder->pre_enable(state, encoder, 1354 crtc_state, conn_state); 1355 } 1356 } 1357 1358 static void intel_encoders_enable(struct intel_atomic_state *state, 1359 struct intel_crtc *crtc) 1360 { 1361 const struct intel_crtc_state *crtc_state = 1362 intel_atomic_get_new_crtc_state(state, crtc); 1363 const struct drm_connector_state *conn_state; 1364 struct drm_connector *conn; 1365 int i; 1366 1367 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1368 struct intel_encoder *encoder = 1369 to_intel_encoder(conn_state->best_encoder); 1370 1371 if (conn_state->crtc != &crtc->base) 1372 continue; 1373 1374 if (encoder->enable) 1375 encoder->enable(state, encoder, 1376 crtc_state, conn_state); 1377 intel_opregion_notify_encoder(encoder, true); 1378 } 1379 } 1380 1381 static void intel_encoders_disable(struct intel_atomic_state *state, 1382 struct intel_crtc *crtc) 1383 { 1384 const struct intel_crtc_state *old_crtc_state = 1385 intel_atomic_get_old_crtc_state(state, crtc); 1386 const struct drm_connector_state *old_conn_state; 1387 struct drm_connector *conn; 1388 int i; 1389 1390 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1391 struct intel_encoder *encoder = 1392 to_intel_encoder(old_conn_state->best_encoder); 1393 1394 if (old_conn_state->crtc != &crtc->base) 1395 continue; 1396 1397 intel_opregion_notify_encoder(encoder, false); 1398 if (encoder->disable) 1399 encoder->disable(state, encoder, 1400 old_crtc_state, old_conn_state); 1401 } 1402 } 1403 1404 static void intel_encoders_post_disable(struct intel_atomic_state *state, 1405 struct intel_crtc *crtc) 1406 { 1407 const struct intel_crtc_state *old_crtc_state = 1408 intel_atomic_get_old_crtc_state(state, crtc); 1409 const struct drm_connector_state *old_conn_state; 1410 struct drm_connector *conn; 1411 int i; 1412 1413 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1414 struct intel_encoder *encoder = 1415 to_intel_encoder(old_conn_state->best_encoder); 1416 1417 if (old_conn_state->crtc != &crtc->base) 1418 continue; 1419 1420 if (encoder->post_disable) 1421 encoder->post_disable(state, encoder, 1422 old_crtc_state, old_conn_state); 1423 } 1424 } 1425 1426 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1427 struct intel_crtc *crtc) 1428 { 1429 const struct intel_crtc_state *old_crtc_state = 1430 intel_atomic_get_old_crtc_state(state, crtc); 1431 const struct drm_connector_state *old_conn_state; 1432 struct drm_connector *conn; 1433 int i; 1434 1435 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1436 struct intel_encoder *encoder = 1437 to_intel_encoder(old_conn_state->best_encoder); 1438 1439 if (old_conn_state->crtc != &crtc->base) 1440 continue; 1441 1442 if (encoder->post_pll_disable) 1443 encoder->post_pll_disable(state, encoder, 1444 old_crtc_state, old_conn_state); 1445 } 1446 } 1447 1448 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1449 struct intel_crtc *crtc) 1450 { 1451 const struct intel_crtc_state *crtc_state = 1452 intel_atomic_get_new_crtc_state(state, crtc); 1453 const struct drm_connector_state *conn_state; 1454 struct drm_connector *conn; 1455 int i; 1456 1457 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1458 struct intel_encoder *encoder = 1459 to_intel_encoder(conn_state->best_encoder); 1460 1461 if (conn_state->crtc != &crtc->base) 1462 continue; 1463 1464 if (encoder->update_pipe) 1465 encoder->update_pipe(state, encoder, 1466 crtc_state, conn_state); 1467 } 1468 } 1469 1470 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1471 { 1472 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1473 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1474 1475 if (crtc_state->has_pch_encoder) { 1476 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1477 &crtc_state->fdi_m_n); 1478 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1479 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1480 &crtc_state->dp_m_n); 1481 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1482 &crtc_state->dp_m2_n2); 1483 } 1484 1485 intel_set_transcoder_timings(crtc_state); 1486 1487 ilk_set_pipeconf(crtc_state); 1488 } 1489 1490 static void ilk_crtc_enable(struct intel_atomic_state *state, 1491 struct intel_crtc *crtc) 1492 { 1493 struct intel_display *display = to_intel_display(crtc); 1494 const struct intel_crtc_state *new_crtc_state = 1495 intel_atomic_get_new_crtc_state(state, crtc); 1496 enum pipe pipe = crtc->pipe; 1497 1498 if (drm_WARN_ON(display->drm, crtc->active)) 1499 return; 1500 1501 /* 1502 * Sometimes spurious CPU pipe underruns happen during FDI 1503 * training, at least with VGA+HDMI cloning. Suppress them. 1504 * 1505 * On ILK we get an occasional spurious CPU pipe underruns 1506 * between eDP port A enable and vdd enable. Also PCH port 1507 * enable seems to result in the occasional CPU pipe underrun. 1508 * 1509 * Spurious PCH underruns also occur during PCH enabling. 1510 */ 1511 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 1512 intel_set_pch_fifo_underrun_reporting(display, pipe, false); 1513 1514 ilk_configure_cpu_transcoder(new_crtc_state); 1515 1516 intel_set_pipe_src_size(new_crtc_state); 1517 1518 crtc->active = true; 1519 1520 intel_encoders_pre_enable(state, crtc); 1521 1522 if (new_crtc_state->has_pch_encoder) { 1523 ilk_pch_pre_enable(state, crtc); 1524 } else { 1525 assert_fdi_tx_disabled(display, pipe); 1526 assert_fdi_rx_disabled(display, pipe); 1527 } 1528 1529 ilk_pfit_enable(new_crtc_state); 1530 1531 /* 1532 * On ILK+ LUT must be loaded before the pipe is running but with 1533 * clocks enabled 1534 */ 1535 intel_color_modeset(new_crtc_state); 1536 1537 intel_initial_watermarks(state, crtc); 1538 intel_enable_transcoder(new_crtc_state); 1539 1540 if (new_crtc_state->has_pch_encoder) 1541 ilk_pch_enable(state, crtc); 1542 1543 intel_crtc_vblank_on(new_crtc_state); 1544 1545 intel_encoders_enable(state, crtc); 1546 1547 if (HAS_PCH_CPT(display)) 1548 intel_wait_for_pipe_scanline_moving(crtc); 1549 1550 /* 1551 * Must wait for vblank to avoid spurious PCH FIFO underruns. 1552 * And a second vblank wait is needed at least on ILK with 1553 * some interlaced HDMI modes. Let's do the double wait always 1554 * in case there are more corner cases we don't know about. 1555 */ 1556 if (new_crtc_state->has_pch_encoder) { 1557 intel_crtc_wait_for_next_vblank(crtc); 1558 intel_crtc_wait_for_next_vblank(crtc); 1559 } 1560 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 1561 intel_set_pch_fifo_underrun_reporting(display, pipe, true); 1562 } 1563 1564 /* Display WA #1180: WaDisableScalarClockGating: glk */ 1565 static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state) 1566 { 1567 struct intel_display *display = to_intel_display(crtc_state); 1568 1569 return DISPLAY_VER(display) == 10 && crtc_state->pch_pfit.enabled; 1570 } 1571 1572 static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable) 1573 { 1574 struct intel_display *display = to_intel_display(crtc); 1575 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1576 1577 intel_de_rmw(display, CLKGATE_DIS_PSL(crtc->pipe), 1578 mask, enable ? mask : 0); 1579 } 1580 1581 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1582 { 1583 struct intel_display *display = to_intel_display(crtc_state); 1584 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1585 1586 intel_de_write(display, WM_LINETIME(crtc->pipe), 1587 HSW_LINETIME(crtc_state->linetime) | 1588 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1589 } 1590 1591 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1592 { 1593 struct intel_display *display = to_intel_display(crtc_state); 1594 1595 intel_de_rmw(display, CHICKEN_TRANS(display, crtc_state->cpu_transcoder), 1596 HSW_FRAME_START_DELAY_MASK, 1597 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1598 } 1599 1600 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1601 { 1602 struct intel_display *display = to_intel_display(crtc_state); 1603 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1604 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1605 1606 if (crtc_state->has_pch_encoder) { 1607 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1608 &crtc_state->fdi_m_n); 1609 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1610 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1611 &crtc_state->dp_m_n); 1612 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1613 &crtc_state->dp_m2_n2); 1614 } 1615 1616 intel_set_transcoder_timings(crtc_state); 1617 intel_vrr_set_transcoder_timings(crtc_state); 1618 1619 if (cpu_transcoder != TRANSCODER_EDP) 1620 intel_de_write(display, TRANS_MULT(display, cpu_transcoder), 1621 crtc_state->pixel_multiplier - 1); 1622 1623 hsw_set_frame_start_delay(crtc_state); 1624 1625 hsw_set_transconf(crtc_state); 1626 } 1627 1628 static void hsw_crtc_enable(struct intel_atomic_state *state, 1629 struct intel_crtc *crtc) 1630 { 1631 struct intel_display *display = to_intel_display(state); 1632 const struct intel_crtc_state *new_crtc_state = 1633 intel_atomic_get_new_crtc_state(state, crtc); 1634 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1635 struct intel_crtc *pipe_crtc; 1636 int i; 1637 1638 if (drm_WARN_ON(display->drm, crtc->active)) 1639 return; 1640 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1641 const struct intel_crtc_state *new_pipe_crtc_state = 1642 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1643 1644 intel_dmc_enable_pipe(new_pipe_crtc_state); 1645 } 1646 1647 intel_encoders_pre_pll_enable(state, crtc); 1648 1649 if (new_crtc_state->intel_dpll) 1650 intel_dpll_enable(new_crtc_state); 1651 1652 intel_encoders_pre_enable(state, crtc); 1653 1654 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1655 const struct intel_crtc_state *pipe_crtc_state = 1656 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1657 1658 intel_dsc_enable(pipe_crtc_state); 1659 1660 if (HAS_UNCOMPRESSED_JOINER(display)) 1661 intel_uncompressed_joiner_enable(pipe_crtc_state); 1662 1663 intel_set_pipe_src_size(pipe_crtc_state); 1664 1665 if (DISPLAY_VER(display) >= 9 || display->platform.broadwell) 1666 bdw_set_pipe_misc(NULL, pipe_crtc_state); 1667 } 1668 1669 if (!transcoder_is_dsi(cpu_transcoder)) 1670 hsw_configure_cpu_transcoder(new_crtc_state); 1671 1672 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1673 const struct intel_crtc_state *pipe_crtc_state = 1674 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1675 1676 pipe_crtc->active = true; 1677 1678 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) 1679 glk_pipe_scaler_clock_gating_wa(pipe_crtc, true); 1680 1681 if (DISPLAY_VER(display) >= 9) 1682 skl_pfit_enable(pipe_crtc_state); 1683 else 1684 ilk_pfit_enable(pipe_crtc_state); 1685 1686 /* 1687 * On ILK+ LUT must be loaded before the pipe is running but with 1688 * clocks enabled 1689 */ 1690 intel_color_modeset(pipe_crtc_state); 1691 1692 hsw_set_linetime_wm(pipe_crtc_state); 1693 1694 if (DISPLAY_VER(display) >= 11) 1695 icl_set_pipe_chicken(pipe_crtc_state); 1696 1697 intel_initial_watermarks(state, pipe_crtc); 1698 } 1699 1700 intel_encoders_enable(state, crtc); 1701 1702 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { 1703 const struct intel_crtc_state *pipe_crtc_state = 1704 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1705 enum pipe hsw_workaround_pipe; 1706 1707 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) { 1708 intel_crtc_wait_for_next_vblank(pipe_crtc); 1709 glk_pipe_scaler_clock_gating_wa(pipe_crtc, false); 1710 } 1711 1712 /* 1713 * If we change the relative order between pipe/planes 1714 * enabling, we need to change the workaround. 1715 */ 1716 hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe; 1717 if (display->platform.haswell && hsw_workaround_pipe != INVALID_PIPE) { 1718 struct intel_crtc *wa_crtc = 1719 intel_crtc_for_pipe(display, hsw_workaround_pipe); 1720 1721 intel_crtc_wait_for_next_vblank(wa_crtc); 1722 intel_crtc_wait_for_next_vblank(wa_crtc); 1723 } 1724 } 1725 } 1726 1727 static void ilk_crtc_disable(struct intel_atomic_state *state, 1728 struct intel_crtc *crtc) 1729 { 1730 struct intel_display *display = to_intel_display(crtc); 1731 const struct intel_crtc_state *old_crtc_state = 1732 intel_atomic_get_old_crtc_state(state, crtc); 1733 enum pipe pipe = crtc->pipe; 1734 1735 /* 1736 * Sometimes spurious CPU pipe underruns happen when the 1737 * pipe is already disabled, but FDI RX/TX is still enabled. 1738 * Happens at least with VGA+HDMI cloning. Suppress them. 1739 */ 1740 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 1741 intel_set_pch_fifo_underrun_reporting(display, pipe, false); 1742 1743 intel_encoders_disable(state, crtc); 1744 1745 intel_crtc_vblank_off(old_crtc_state); 1746 1747 intel_disable_transcoder(old_crtc_state); 1748 1749 ilk_pfit_disable(old_crtc_state); 1750 1751 if (old_crtc_state->has_pch_encoder) 1752 ilk_pch_disable(state, crtc); 1753 1754 intel_encoders_post_disable(state, crtc); 1755 1756 if (old_crtc_state->has_pch_encoder) 1757 ilk_pch_post_disable(state, crtc); 1758 1759 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 1760 intel_set_pch_fifo_underrun_reporting(display, pipe, true); 1761 } 1762 1763 static void hsw_crtc_disable(struct intel_atomic_state *state, 1764 struct intel_crtc *crtc) 1765 { 1766 struct intel_display *display = to_intel_display(state); 1767 const struct intel_crtc_state *old_crtc_state = 1768 intel_atomic_get_old_crtc_state(state, crtc); 1769 struct intel_crtc *pipe_crtc; 1770 int i; 1771 1772 /* 1773 * FIXME collapse everything to one hook. 1774 * Need care with mst->ddi interactions. 1775 */ 1776 intel_encoders_disable(state, crtc); 1777 intel_encoders_post_disable(state, crtc); 1778 1779 intel_dpll_disable(old_crtc_state); 1780 1781 intel_encoders_post_pll_disable(state, crtc); 1782 1783 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1784 const struct intel_crtc_state *old_pipe_crtc_state = 1785 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1786 1787 intel_dmc_disable_pipe(old_pipe_crtc_state); 1788 } 1789 } 1790 1791 /* Prefer intel_encoder_is_combo() */ 1792 bool intel_phy_is_combo(struct intel_display *display, enum phy phy) 1793 { 1794 if (phy == PHY_NONE) 1795 return false; 1796 else if (display->platform.alderlake_s) 1797 return phy <= PHY_E; 1798 else if (display->platform.dg1 || display->platform.rocketlake) 1799 return phy <= PHY_D; 1800 else if (display->platform.jasperlake || display->platform.elkhartlake) 1801 return phy <= PHY_C; 1802 else if (display->platform.alderlake_p || IS_DISPLAY_VER(display, 11, 12)) 1803 return phy <= PHY_B; 1804 else 1805 /* 1806 * DG2 outputs labelled as "combo PHY" in the bspec use 1807 * SNPS PHYs with completely different programming, 1808 * hence we always return false here. 1809 */ 1810 return false; 1811 } 1812 1813 /* 1814 * This function returns true if the DDI port respective to the PHY enumeration 1815 * is a Type-C capable port. 1816 * 1817 * Depending on the VBT, the port might be configured 1818 * as a "dedicated external" port, meaning that actual physical PHY is outside 1819 * of the Type-C subsystem and, as such, not really a "Type-C PHY". 1820 * 1821 * Prefer intel_encoder_is_tc(), especially if you really need to know if we 1822 * are dealing with Type-C connections. 1823 */ 1824 bool intel_phy_is_tc(struct intel_display *display, enum phy phy) 1825 { 1826 /* 1827 * Discrete GPU phy's are not attached to FIA's to support TC 1828 * subsystem Legacy or non-legacy, and only support native DP/HDMI 1829 */ 1830 if (display->platform.dgfx) 1831 return false; 1832 1833 if (DISPLAY_VER(display) >= 13) 1834 return phy >= PHY_F && phy <= PHY_I; 1835 else if (display->platform.tigerlake) 1836 return phy >= PHY_D && phy <= PHY_I; 1837 else if (display->platform.icelake) 1838 return phy >= PHY_C && phy <= PHY_F; 1839 1840 return false; 1841 } 1842 1843 /* Prefer intel_encoder_is_snps() */ 1844 bool intel_phy_is_snps(struct intel_display *display, enum phy phy) 1845 { 1846 /* 1847 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port 1848 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). 1849 */ 1850 return display->platform.dg2 && phy > PHY_NONE && phy <= PHY_E; 1851 } 1852 1853 /* Prefer intel_encoder_to_phy() */ 1854 enum phy intel_port_to_phy(struct intel_display *display, enum port port) 1855 { 1856 if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) 1857 return PHY_D + port - PORT_D_XELPD; 1858 else if (DISPLAY_VER(display) >= 13 && port >= PORT_TC1) 1859 return PHY_F + port - PORT_TC1; 1860 else if (display->platform.alderlake_s && port >= PORT_TC1) 1861 return PHY_B + port - PORT_TC1; 1862 else if ((display->platform.dg1 || display->platform.rocketlake) && port >= PORT_TC1) 1863 return PHY_C + port - PORT_TC1; 1864 else if ((display->platform.jasperlake || display->platform.elkhartlake) && 1865 port == PORT_D) 1866 return PHY_A; 1867 1868 return PHY_A + port - PORT_A; 1869 } 1870 1871 /* Prefer intel_encoder_to_tc() */ 1872 /* 1873 * Return TC_PORT_1..I915_MAX_TC_PORTS for any TypeC DDI port. The function 1874 * can be also called for TypeC DDI ports not connected to a TypeC PHY such as 1875 * the PORT_TC1..4 ports on RKL/ADLS/BMG. 1876 */ 1877 enum tc_port intel_port_to_tc(struct intel_display *display, enum port port) 1878 { 1879 if (DISPLAY_VER(display) >= 12) 1880 return TC_PORT_1 + port - PORT_TC1; 1881 else 1882 return TC_PORT_1 + port - PORT_C; 1883 } 1884 1885 /* 1886 * Return TC_PORT_1..I915_MAX_TC_PORTS for TypeC DDI ports connected to a TypeC PHY. 1887 * Note that on RKL, ADLS, BMG the PORT_TC1..4 ports are connected to a non-TypeC 1888 * PHY, so on those platforms the function returns TC_PORT_NONE. 1889 */ 1890 enum tc_port intel_tc_phy_port_to_tc(struct intel_display *display, enum port port) 1891 { 1892 if (!intel_phy_is_tc(display, intel_port_to_phy(display, port))) 1893 return TC_PORT_NONE; 1894 1895 return intel_port_to_tc(display, port); 1896 } 1897 1898 enum phy intel_encoder_to_phy(struct intel_encoder *encoder) 1899 { 1900 struct intel_display *display = to_intel_display(encoder); 1901 1902 return intel_port_to_phy(display, encoder->port); 1903 } 1904 1905 bool intel_encoder_is_combo(struct intel_encoder *encoder) 1906 { 1907 struct intel_display *display = to_intel_display(encoder); 1908 1909 return intel_phy_is_combo(display, intel_encoder_to_phy(encoder)); 1910 } 1911 1912 bool intel_encoder_is_snps(struct intel_encoder *encoder) 1913 { 1914 struct intel_display *display = to_intel_display(encoder); 1915 1916 return intel_phy_is_snps(display, intel_encoder_to_phy(encoder)); 1917 } 1918 1919 bool intel_encoder_is_tc(struct intel_encoder *encoder) 1920 { 1921 struct intel_display *display = to_intel_display(encoder); 1922 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1923 1924 if (dig_port && dig_port->dedicated_external) 1925 return false; 1926 1927 return intel_phy_is_tc(display, intel_encoder_to_phy(encoder)); 1928 } 1929 1930 enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder) 1931 { 1932 struct intel_display *display = to_intel_display(encoder); 1933 1934 return intel_tc_phy_port_to_tc(display, encoder->port); 1935 } 1936 1937 enum intel_display_power_domain 1938 intel_aux_power_domain(struct intel_digital_port *dig_port) 1939 { 1940 struct intel_display *display = to_intel_display(dig_port); 1941 1942 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 1943 return intel_display_power_tbt_aux_domain(display, dig_port->aux_ch); 1944 1945 return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch); 1946 } 1947 1948 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, 1949 struct intel_power_domain_mask *mask) 1950 { 1951 struct intel_display *display = to_intel_display(crtc_state); 1952 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1953 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1954 struct drm_encoder *encoder; 1955 enum pipe pipe = crtc->pipe; 1956 1957 bitmap_zero(mask->bits, POWER_DOMAIN_NUM); 1958 1959 if (!crtc_state->hw.active) 1960 return; 1961 1962 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); 1963 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); 1964 if (crtc_state->pch_pfit.enabled || 1965 crtc_state->pch_pfit.force_thru) 1966 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); 1967 1968 drm_for_each_encoder_mask(encoder, display->drm, 1969 crtc_state->uapi.encoder_mask) { 1970 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 1971 1972 set_bit(intel_encoder->power_domain, mask->bits); 1973 } 1974 1975 if (HAS_DDI(display) && crtc_state->has_audio) 1976 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); 1977 1978 if (crtc_state->intel_dpll) 1979 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); 1980 1981 if (crtc_state->dsc.compression_enable) 1982 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); 1983 } 1984 1985 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, 1986 struct intel_power_domain_mask *old_domains) 1987 { 1988 struct intel_display *display = to_intel_display(crtc_state); 1989 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1990 enum intel_display_power_domain domain; 1991 struct intel_power_domain_mask domains, new_domains; 1992 1993 get_crtc_power_domains(crtc_state, &domains); 1994 1995 bitmap_andnot(new_domains.bits, 1996 domains.bits, 1997 crtc->enabled_power_domains.mask.bits, 1998 POWER_DOMAIN_NUM); 1999 bitmap_andnot(old_domains->bits, 2000 crtc->enabled_power_domains.mask.bits, 2001 domains.bits, 2002 POWER_DOMAIN_NUM); 2003 2004 for_each_power_domain(domain, &new_domains) 2005 intel_display_power_get_in_set(display, 2006 &crtc->enabled_power_domains, 2007 domain); 2008 } 2009 2010 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, 2011 struct intel_power_domain_mask *domains) 2012 { 2013 struct intel_display *display = to_intel_display(crtc); 2014 2015 intel_display_power_put_mask_in_set(display, 2016 &crtc->enabled_power_domains, 2017 domains); 2018 } 2019 2020 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 2021 { 2022 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2023 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2024 2025 if (intel_crtc_has_dp_encoder(crtc_state)) { 2026 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 2027 &crtc_state->dp_m_n); 2028 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 2029 &crtc_state->dp_m2_n2); 2030 } 2031 2032 intel_set_transcoder_timings(crtc_state); 2033 2034 i9xx_set_pipeconf(crtc_state); 2035 } 2036 2037 static void valleyview_crtc_enable(struct intel_atomic_state *state, 2038 struct intel_crtc *crtc) 2039 { 2040 struct intel_display *display = to_intel_display(crtc); 2041 const struct intel_crtc_state *new_crtc_state = 2042 intel_atomic_get_new_crtc_state(state, crtc); 2043 enum pipe pipe = crtc->pipe; 2044 2045 if (drm_WARN_ON(display->drm, crtc->active)) 2046 return; 2047 2048 i9xx_configure_cpu_transcoder(new_crtc_state); 2049 2050 intel_set_pipe_src_size(new_crtc_state); 2051 2052 intel_de_write(display, VLV_PIPE_MSA_MISC(display, pipe), 0); 2053 2054 if (display->platform.cherryview && pipe == PIPE_B) { 2055 intel_de_write(display, CHV_BLEND(display, pipe), 2056 CHV_BLEND_LEGACY); 2057 intel_de_write(display, CHV_CANVAS(display, pipe), 0); 2058 } 2059 2060 crtc->active = true; 2061 2062 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 2063 2064 intel_encoders_pre_pll_enable(state, crtc); 2065 2066 if (display->platform.cherryview) 2067 chv_enable_pll(new_crtc_state); 2068 else 2069 vlv_enable_pll(new_crtc_state); 2070 2071 intel_encoders_pre_enable(state, crtc); 2072 2073 i9xx_pfit_enable(new_crtc_state); 2074 2075 intel_color_modeset(new_crtc_state); 2076 2077 intel_initial_watermarks(state, crtc); 2078 intel_enable_transcoder(new_crtc_state); 2079 2080 intel_crtc_vblank_on(new_crtc_state); 2081 2082 intel_encoders_enable(state, crtc); 2083 } 2084 2085 static void i9xx_crtc_enable(struct intel_atomic_state *state, 2086 struct intel_crtc *crtc) 2087 { 2088 struct intel_display *display = to_intel_display(crtc); 2089 const struct intel_crtc_state *new_crtc_state = 2090 intel_atomic_get_new_crtc_state(state, crtc); 2091 enum pipe pipe = crtc->pipe; 2092 2093 if (drm_WARN_ON(display->drm, crtc->active)) 2094 return; 2095 2096 i9xx_configure_cpu_transcoder(new_crtc_state); 2097 2098 intel_set_pipe_src_size(new_crtc_state); 2099 2100 crtc->active = true; 2101 2102 if (DISPLAY_VER(display) != 2) 2103 intel_set_cpu_fifo_underrun_reporting(display, pipe, true); 2104 2105 intel_encoders_pre_enable(state, crtc); 2106 2107 i9xx_enable_pll(new_crtc_state); 2108 2109 i9xx_pfit_enable(new_crtc_state); 2110 2111 intel_color_modeset(new_crtc_state); 2112 2113 if (!intel_initial_watermarks(state, crtc)) 2114 intel_update_watermarks(display); 2115 intel_enable_transcoder(new_crtc_state); 2116 2117 intel_crtc_vblank_on(new_crtc_state); 2118 2119 intel_encoders_enable(state, crtc); 2120 2121 /* prevents spurious underruns */ 2122 if (DISPLAY_VER(display) == 2) 2123 intel_crtc_wait_for_next_vblank(crtc); 2124 } 2125 2126 static void i9xx_crtc_disable(struct intel_atomic_state *state, 2127 struct intel_crtc *crtc) 2128 { 2129 struct intel_display *display = to_intel_display(state); 2130 struct intel_crtc_state *old_crtc_state = 2131 intel_atomic_get_old_crtc_state(state, crtc); 2132 enum pipe pipe = crtc->pipe; 2133 2134 /* 2135 * On gen2 planes are double buffered but the pipe isn't, so we must 2136 * wait for planes to fully turn off before disabling the pipe. 2137 */ 2138 if (DISPLAY_VER(display) == 2) 2139 intel_crtc_wait_for_next_vblank(crtc); 2140 2141 intel_encoders_disable(state, crtc); 2142 2143 intel_crtc_vblank_off(old_crtc_state); 2144 2145 intel_disable_transcoder(old_crtc_state); 2146 2147 i9xx_pfit_disable(old_crtc_state); 2148 2149 intel_encoders_post_disable(state, crtc); 2150 2151 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2152 if (display->platform.cherryview) 2153 chv_disable_pll(display, pipe); 2154 else if (display->platform.valleyview) 2155 vlv_disable_pll(display, pipe); 2156 else 2157 i9xx_disable_pll(old_crtc_state); 2158 } 2159 2160 intel_encoders_post_pll_disable(state, crtc); 2161 2162 if (DISPLAY_VER(display) != 2) 2163 intel_set_cpu_fifo_underrun_reporting(display, pipe, false); 2164 2165 if (!display->funcs.wm->initial_watermarks) 2166 intel_update_watermarks(display); 2167 2168 /* clock the pipe down to 640x480@60 to potentially save power */ 2169 if (display->platform.i830) 2170 i830_enable_pipe(display, pipe); 2171 } 2172 2173 void intel_encoder_destroy(struct drm_encoder *encoder) 2174 { 2175 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2176 2177 drm_encoder_cleanup(encoder); 2178 kfree(intel_encoder); 2179 } 2180 2181 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2182 { 2183 struct intel_display *display = to_intel_display(crtc); 2184 2185 /* GDG double wide on either pipe, otherwise pipe A only */ 2186 return HAS_DOUBLE_WIDE(display) && 2187 (crtc->pipe == PIPE_A || display->platform.i915g); 2188 } 2189 2190 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2191 { 2192 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 2193 struct drm_rect src; 2194 2195 /* 2196 * We only use IF-ID interlacing. If we ever use 2197 * PF-ID we'll need to adjust the pixel_rate here. 2198 */ 2199 2200 if (!crtc_state->pch_pfit.enabled) 2201 return pixel_rate; 2202 2203 drm_rect_init(&src, 0, 0, 2204 drm_rect_width(&crtc_state->pipe_src) << 16, 2205 drm_rect_height(&crtc_state->pipe_src) << 16); 2206 2207 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 2208 pixel_rate); 2209 } 2210 2211 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 2212 const struct drm_display_mode *timings) 2213 { 2214 mode->hdisplay = timings->crtc_hdisplay; 2215 mode->htotal = timings->crtc_htotal; 2216 mode->hsync_start = timings->crtc_hsync_start; 2217 mode->hsync_end = timings->crtc_hsync_end; 2218 2219 mode->vdisplay = timings->crtc_vdisplay; 2220 mode->vtotal = timings->crtc_vtotal; 2221 mode->vsync_start = timings->crtc_vsync_start; 2222 mode->vsync_end = timings->crtc_vsync_end; 2223 2224 mode->flags = timings->flags; 2225 mode->type = DRM_MODE_TYPE_DRIVER; 2226 2227 mode->clock = timings->crtc_clock; 2228 2229 drm_mode_set_name(mode); 2230 } 2231 2232 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2233 { 2234 struct intel_display *display = to_intel_display(crtc_state); 2235 2236 if (HAS_GMCH(display)) 2237 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2238 crtc_state->pixel_rate = 2239 crtc_state->hw.pipe_mode.crtc_clock; 2240 else 2241 crtc_state->pixel_rate = 2242 ilk_pipe_pixel_rate(crtc_state); 2243 } 2244 2245 static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state, 2246 struct drm_display_mode *mode) 2247 { 2248 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2249 2250 if (num_pipes == 1) 2251 return; 2252 2253 mode->crtc_clock /= num_pipes; 2254 mode->crtc_hdisplay /= num_pipes; 2255 mode->crtc_hblank_start /= num_pipes; 2256 mode->crtc_hblank_end /= num_pipes; 2257 mode->crtc_hsync_start /= num_pipes; 2258 mode->crtc_hsync_end /= num_pipes; 2259 mode->crtc_htotal /= num_pipes; 2260 } 2261 2262 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, 2263 struct drm_display_mode *mode) 2264 { 2265 int overlap = crtc_state->splitter.pixel_overlap; 2266 int n = crtc_state->splitter.link_count; 2267 2268 if (!crtc_state->splitter.enable) 2269 return; 2270 2271 /* 2272 * eDP MSO uses segment timings from EDID for transcoder 2273 * timings, but full mode for everything else. 2274 * 2275 * h_full = (h_segment - pixel_overlap) * link_count 2276 */ 2277 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; 2278 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; 2279 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; 2280 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; 2281 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; 2282 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; 2283 mode->crtc_clock *= n; 2284 } 2285 2286 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 2287 { 2288 struct drm_display_mode *mode = &crtc_state->hw.mode; 2289 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2290 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2291 2292 /* 2293 * Start with the adjusted_mode crtc timings, which 2294 * have been filled with the transcoder timings. 2295 */ 2296 drm_mode_copy(pipe_mode, adjusted_mode); 2297 2298 /* Expand MSO per-segment transcoder timings to full */ 2299 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2300 2301 /* 2302 * We want the full numbers in adjusted_mode normal timings, 2303 * adjusted_mode crtc timings are left with the raw transcoder 2304 * timings. 2305 */ 2306 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 2307 2308 /* Populate the "user" mode with full numbers */ 2309 drm_mode_copy(mode, pipe_mode); 2310 intel_mode_from_crtc_timings(mode, mode); 2311 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * 2312 intel_crtc_num_joined_pipes(crtc_state); 2313 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); 2314 2315 /* Derive per-pipe timings in case joiner is used */ 2316 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2317 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2318 2319 intel_crtc_compute_pixel_rate(crtc_state); 2320 } 2321 2322 void intel_encoder_get_config(struct intel_encoder *encoder, 2323 struct intel_crtc_state *crtc_state) 2324 { 2325 encoder->get_config(encoder, crtc_state); 2326 2327 intel_crtc_readout_derived_state(crtc_state); 2328 } 2329 2330 static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state) 2331 { 2332 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2333 int width, height; 2334 2335 if (num_pipes == 1) 2336 return; 2337 2338 width = drm_rect_width(&crtc_state->pipe_src); 2339 height = drm_rect_height(&crtc_state->pipe_src); 2340 2341 drm_rect_init(&crtc_state->pipe_src, 0, 0, 2342 width / num_pipes, height); 2343 } 2344 2345 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) 2346 { 2347 struct intel_display *display = to_intel_display(crtc_state); 2348 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2349 2350 intel_joiner_compute_pipe_src(crtc_state); 2351 2352 /* 2353 * Pipe horizontal size must be even in: 2354 * - DVO ganged mode 2355 * - LVDS dual channel mode 2356 * - Double wide pipe 2357 */ 2358 if (drm_rect_width(&crtc_state->pipe_src) & 1) { 2359 if (crtc_state->double_wide) { 2360 drm_dbg_kms(display->drm, 2361 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", 2362 crtc->base.base.id, crtc->base.name); 2363 return -EINVAL; 2364 } 2365 2366 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 2367 intel_is_dual_link_lvds(display)) { 2368 drm_dbg_kms(display->drm, 2369 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", 2370 crtc->base.base.id, crtc->base.name); 2371 return -EINVAL; 2372 } 2373 } 2374 2375 return 0; 2376 } 2377 2378 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) 2379 { 2380 struct intel_display *display = to_intel_display(crtc_state); 2381 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2382 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2383 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2384 int clock_limit = display->cdclk.max_dotclk_freq; 2385 2386 /* 2387 * Start with the adjusted_mode crtc timings, which 2388 * have been filled with the transcoder timings. 2389 */ 2390 drm_mode_copy(pipe_mode, adjusted_mode); 2391 2392 /* Expand MSO per-segment transcoder timings to full */ 2393 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2394 2395 /* Derive per-pipe timings in case joiner is used */ 2396 intel_joiner_adjust_timings(crtc_state, pipe_mode); 2397 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2398 2399 if (DISPLAY_VER(display) < 4) { 2400 clock_limit = display->cdclk.max_cdclk_freq * 9 / 10; 2401 2402 /* 2403 * Enable double wide mode when the dot clock 2404 * is > 90% of the (display) core speed. 2405 */ 2406 if (intel_crtc_supports_double_wide(crtc) && 2407 pipe_mode->crtc_clock > clock_limit) { 2408 clock_limit = display->cdclk.max_dotclk_freq; 2409 crtc_state->double_wide = true; 2410 } 2411 } 2412 2413 if (pipe_mode->crtc_clock > clock_limit) { 2414 drm_dbg_kms(display->drm, 2415 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 2416 crtc->base.base.id, crtc->base.name, 2417 pipe_mode->crtc_clock, clock_limit, 2418 str_yes_no(crtc_state->double_wide)); 2419 return -EINVAL; 2420 } 2421 2422 return 0; 2423 } 2424 2425 static int intel_crtc_set_context_latency(struct intel_crtc_state *crtc_state) 2426 { 2427 struct intel_display *display = to_intel_display(crtc_state); 2428 int set_context_latency = 0; 2429 2430 if (!HAS_DSB(display)) 2431 return 0; 2432 2433 set_context_latency = max(set_context_latency, 2434 intel_psr_min_set_context_latency(crtc_state)); 2435 2436 return set_context_latency; 2437 } 2438 2439 static int intel_crtc_compute_set_context_latency(struct intel_atomic_state *state, 2440 struct intel_crtc *crtc) 2441 { 2442 struct intel_display *display = to_intel_display(state); 2443 struct intel_crtc_state *crtc_state = 2444 intel_atomic_get_new_crtc_state(state, crtc); 2445 struct drm_display_mode *adjusted_mode = 2446 &crtc_state->hw.adjusted_mode; 2447 int set_context_latency, max_vblank_delay; 2448 2449 set_context_latency = intel_crtc_set_context_latency(crtc_state); 2450 2451 max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1; 2452 2453 if (set_context_latency > max_vblank_delay) { 2454 drm_dbg_kms(display->drm, "[CRTC:%d:%s] set context latency (%d) exceeds max (%d)\n", 2455 crtc->base.base.id, crtc->base.name, 2456 set_context_latency, 2457 max_vblank_delay); 2458 return -EINVAL; 2459 } 2460 2461 crtc_state->set_context_latency = set_context_latency; 2462 adjusted_mode->crtc_vblank_start += set_context_latency; 2463 2464 return 0; 2465 } 2466 2467 static int intel_crtc_compute_config(struct intel_atomic_state *state, 2468 struct intel_crtc *crtc) 2469 { 2470 struct intel_crtc_state *crtc_state = 2471 intel_atomic_get_new_crtc_state(state, crtc); 2472 int ret; 2473 2474 ret = intel_dpll_crtc_compute_clock(state, crtc); 2475 if (ret) 2476 return ret; 2477 2478 ret = intel_crtc_compute_set_context_latency(state, crtc); 2479 if (ret) 2480 return ret; 2481 2482 ret = intel_crtc_compute_pipe_src(crtc_state); 2483 if (ret) 2484 return ret; 2485 2486 ret = intel_crtc_compute_pipe_mode(crtc_state); 2487 if (ret) 2488 return ret; 2489 2490 intel_crtc_compute_pixel_rate(crtc_state); 2491 2492 if (crtc_state->has_pch_encoder) 2493 return ilk_fdi_compute_config(crtc, crtc_state); 2494 2495 intel_vrr_compute_guardband(crtc_state); 2496 2497 return 0; 2498 } 2499 2500 static void 2501 intel_reduce_m_n_ratio(u32 *num, u32 *den) 2502 { 2503 while (*num > DATA_LINK_M_N_MASK || 2504 *den > DATA_LINK_M_N_MASK) { 2505 *num >>= 1; 2506 *den >>= 1; 2507 } 2508 } 2509 2510 static void compute_m_n(u32 *ret_m, u32 *ret_n, 2511 u32 m, u32 n, u32 constant_n) 2512 { 2513 if (constant_n) 2514 *ret_n = constant_n; 2515 else 2516 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2517 2518 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2519 intel_reduce_m_n_ratio(ret_m, ret_n); 2520 } 2521 2522 void 2523 intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, 2524 int pixel_clock, int link_clock, 2525 int bw_overhead, 2526 struct intel_link_m_n *m_n) 2527 { 2528 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); 2529 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, 2530 bw_overhead); 2531 u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); 2532 2533 /* 2534 * Windows/BIOS uses fixed M/N values always. Follow suit. 2535 * 2536 * Also several DP dongles in particular seem to be fussy 2537 * about too large link M/N values. Presumably the 20bit 2538 * value used by Windows/BIOS is acceptable to everyone. 2539 */ 2540 m_n->tu = 64; 2541 compute_m_n(&m_n->data_m, &m_n->data_n, 2542 data_m, data_n, 2543 0x8000000); 2544 2545 compute_m_n(&m_n->link_m, &m_n->link_n, 2546 pixel_clock, link_symbol_clock, 2547 0x80000); 2548 } 2549 2550 void intel_panel_sanitize_ssc(struct intel_display *display) 2551 { 2552 /* 2553 * There may be no VBT; and if the BIOS enabled SSC we can 2554 * just keep using it to avoid unnecessary flicker. Whereas if the 2555 * BIOS isn't using it, don't assume it will work even if the VBT 2556 * indicates as much. 2557 */ 2558 if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) { 2559 bool bios_lvds_use_ssc = intel_de_read(display, 2560 PCH_DREF_CONTROL) & 2561 DREF_SSC1_ENABLE; 2562 2563 if (display->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2564 drm_dbg_kms(display->drm, 2565 "SSC %s by BIOS, overriding VBT which says %s\n", 2566 str_enabled_disabled(bios_lvds_use_ssc), 2567 str_enabled_disabled(display->vbt.lvds_use_ssc)); 2568 display->vbt.lvds_use_ssc = bios_lvds_use_ssc; 2569 } 2570 } 2571 } 2572 2573 void intel_zero_m_n(struct intel_link_m_n *m_n) 2574 { 2575 /* corresponds to 0 register value */ 2576 memset(m_n, 0, sizeof(*m_n)); 2577 m_n->tu = 1; 2578 } 2579 2580 void intel_set_m_n(struct intel_display *display, 2581 const struct intel_link_m_n *m_n, 2582 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 2583 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 2584 { 2585 intel_de_write(display, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 2586 intel_de_write(display, data_n_reg, m_n->data_n); 2587 intel_de_write(display, link_m_reg, m_n->link_m); 2588 /* 2589 * On BDW+ writing LINK_N arms the double buffered update 2590 * of all the M/N registers, so it must be written last. 2591 */ 2592 intel_de_write(display, link_n_reg, m_n->link_n); 2593 } 2594 2595 bool intel_cpu_transcoder_has_m2_n2(struct intel_display *display, 2596 enum transcoder transcoder) 2597 { 2598 if (display->platform.haswell) 2599 return transcoder == TRANSCODER_EDP; 2600 2601 return IS_DISPLAY_VER(display, 5, 7) || display->platform.cherryview; 2602 } 2603 2604 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 2605 enum transcoder transcoder, 2606 const struct intel_link_m_n *m_n) 2607 { 2608 struct intel_display *display = to_intel_display(crtc); 2609 enum pipe pipe = crtc->pipe; 2610 2611 if (DISPLAY_VER(display) >= 5) 2612 intel_set_m_n(display, m_n, 2613 PIPE_DATA_M1(display, transcoder), 2614 PIPE_DATA_N1(display, transcoder), 2615 PIPE_LINK_M1(display, transcoder), 2616 PIPE_LINK_N1(display, transcoder)); 2617 else 2618 intel_set_m_n(display, m_n, 2619 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 2620 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 2621 } 2622 2623 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 2624 enum transcoder transcoder, 2625 const struct intel_link_m_n *m_n) 2626 { 2627 struct intel_display *display = to_intel_display(crtc); 2628 2629 if (!intel_cpu_transcoder_has_m2_n2(display, transcoder)) 2630 return; 2631 2632 intel_set_m_n(display, m_n, 2633 PIPE_DATA_M2(display, transcoder), 2634 PIPE_DATA_N2(display, transcoder), 2635 PIPE_LINK_M2(display, transcoder), 2636 PIPE_LINK_N2(display, transcoder)); 2637 } 2638 2639 static bool 2640 transcoder_has_vrr(const struct intel_crtc_state *crtc_state) 2641 { 2642 struct intel_display *display = to_intel_display(crtc_state); 2643 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2644 2645 return HAS_VRR(display) && !transcoder_is_dsi(cpu_transcoder); 2646 } 2647 2648 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 2649 { 2650 struct intel_display *display = to_intel_display(crtc_state); 2651 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2652 enum pipe pipe = crtc->pipe; 2653 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2654 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2655 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2656 int vsyncshift = 0; 2657 2658 drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)); 2659 2660 /* We need to be careful not to changed the adjusted mode, for otherwise 2661 * the hw state checker will get angry at the mismatch. */ 2662 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2663 crtc_vtotal = adjusted_mode->crtc_vtotal; 2664 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2665 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2666 2667 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2668 /* the chip adds 2 halflines automatically */ 2669 crtc_vtotal -= 1; 2670 crtc_vblank_end -= 1; 2671 2672 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2673 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 2674 else 2675 vsyncshift = adjusted_mode->crtc_hsync_start - 2676 adjusted_mode->crtc_htotal / 2; 2677 if (vsyncshift < 0) 2678 vsyncshift += adjusted_mode->crtc_htotal; 2679 } 2680 2681 /* 2682 * VBLANK_START no longer works on ADL+, instead we must use 2683 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. 2684 */ 2685 if (DISPLAY_VER(display) >= 13) { 2686 intel_de_write(display, 2687 TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder), 2688 crtc_state->set_context_latency); 2689 2690 /* 2691 * VBLANK_START not used by hw, just clear it 2692 * to make it stand out in register dumps. 2693 */ 2694 crtc_vblank_start = 1; 2695 } else if (DISPLAY_VER(display) == 12) { 2696 /* VBLANK_START - VACTIVE defines SCL on TGL */ 2697 crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency; 2698 } 2699 2700 if (DISPLAY_VER(display) >= 4 && DISPLAY_VER(display) < 35) 2701 intel_de_write(display, 2702 TRANS_VSYNCSHIFT(display, cpu_transcoder), 2703 vsyncshift); 2704 2705 intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder), 2706 HACTIVE(adjusted_mode->crtc_hdisplay - 1) | 2707 HTOTAL(adjusted_mode->crtc_htotal - 1)); 2708 intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder), 2709 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | 2710 HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); 2711 intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder), 2712 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | 2713 HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); 2714 2715 /* 2716 * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal 2717 * bits are not required. Since the support for these bits is going to 2718 * be deprecated in upcoming platforms, avoid writing these bits for the 2719 * platforms that do not use legacy Timing Generator. 2720 */ 2721 if (intel_vrr_always_use_vrr_tg(display)) 2722 crtc_vtotal = 1; 2723 2724 intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), 2725 VACTIVE(crtc_vdisplay - 1) | 2726 VTOTAL(crtc_vtotal - 1)); 2727 intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), 2728 VBLANK_START(crtc_vblank_start - 1) | 2729 VBLANK_END(crtc_vblank_end - 1)); 2730 intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder), 2731 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | 2732 VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); 2733 2734 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2735 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 2736 * documented on the DDI_FUNC_CTL register description, EDP Input Select 2737 * bits. */ 2738 if (display->platform.haswell && cpu_transcoder == TRANSCODER_EDP && 2739 (pipe == PIPE_B || pipe == PIPE_C)) 2740 intel_de_write(display, TRANS_VTOTAL(display, pipe), 2741 VACTIVE(crtc_vdisplay - 1) | 2742 VTOTAL(crtc_vtotal - 1)); 2743 2744 if (DISPLAY_VER(display) >= 30) { 2745 /* 2746 * Address issues for resolutions with high refresh rate that 2747 * have small Hblank, specifically where Hblank is smaller than 2748 * one MTP. Simulations indicate this will address the 2749 * jitter issues that currently causes BS to be immediately 2750 * followed by BE which DPRX devices are unable to handle. 2751 * https://groups.vesa.org/wg/DP/document/20494 2752 */ 2753 intel_de_write(display, DP_MIN_HBLANK_CTL(cpu_transcoder), 2754 crtc_state->min_hblank); 2755 } 2756 } 2757 2758 static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state) 2759 { 2760 struct intel_display *display = to_intel_display(crtc_state); 2761 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2762 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2763 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2764 2765 drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)); 2766 2767 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2768 crtc_vtotal = adjusted_mode->crtc_vtotal; 2769 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2770 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2771 2772 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2773 /* the chip adds 2 halflines automatically */ 2774 crtc_vtotal -= 1; 2775 crtc_vblank_end -= 1; 2776 } 2777 2778 if (DISPLAY_VER(display) >= 13) { 2779 intel_de_write(display, 2780 TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder), 2781 crtc_state->set_context_latency); 2782 2783 /* 2784 * VBLANK_START not used by hw, just clear it 2785 * to make it stand out in register dumps. 2786 */ 2787 crtc_vblank_start = 1; 2788 } else if (DISPLAY_VER(display) == 12) { 2789 /* VBLANK_START - VACTIVE defines SCL on TGL */ 2790 crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency; 2791 } 2792 2793 /* 2794 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode. 2795 * But let's write it anyway to keep the state checker happy. 2796 */ 2797 intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), 2798 VBLANK_START(crtc_vblank_start - 1) | 2799 VBLANK_END(crtc_vblank_end - 1)); 2800 /* 2801 * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal 2802 * bits are not required. Since the support for these bits is going to 2803 * be deprecated in upcoming platforms, avoid writing these bits for the 2804 * platforms that do not use legacy Timing Generator. 2805 */ 2806 if (intel_vrr_always_use_vrr_tg(display)) 2807 crtc_vtotal = 1; 2808 2809 /* 2810 * The double buffer latch point for TRANS_VTOTAL 2811 * is the transcoder's undelayed vblank. 2812 */ 2813 intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), 2814 VACTIVE(crtc_vdisplay - 1) | 2815 VTOTAL(crtc_vtotal - 1)); 2816 2817 intel_vrr_set_fixed_rr_timings(crtc_state); 2818 intel_vrr_transcoder_enable(crtc_state); 2819 } 2820 2821 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 2822 { 2823 struct intel_display *display = to_intel_display(crtc_state); 2824 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2825 int width = drm_rect_width(&crtc_state->pipe_src); 2826 int height = drm_rect_height(&crtc_state->pipe_src); 2827 enum pipe pipe = crtc->pipe; 2828 2829 /* pipesrc controls the size that is scaled from, which should 2830 * always be the user's requested size. 2831 */ 2832 intel_de_write(display, PIPESRC(display, pipe), 2833 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2834 } 2835 2836 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 2837 { 2838 struct intel_display *display = to_intel_display(crtc_state); 2839 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2840 2841 if (DISPLAY_VER(display) == 2 || DISPLAY_VER(display) >= 35) 2842 return false; 2843 2844 if (DISPLAY_VER(display) >= 9 || 2845 display->platform.broadwell || display->platform.haswell) 2846 return intel_de_read(display, 2847 TRANSCONF(display, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; 2848 else 2849 return intel_de_read(display, 2850 TRANSCONF(display, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; 2851 } 2852 2853 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 2854 struct intel_crtc_state *pipe_config) 2855 { 2856 struct intel_display *display = to_intel_display(crtc); 2857 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2858 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2859 u32 tmp; 2860 2861 tmp = intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder)); 2862 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; 2863 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; 2864 2865 if (!transcoder_is_dsi(cpu_transcoder)) { 2866 tmp = intel_de_read(display, 2867 TRANS_HBLANK(display, cpu_transcoder)); 2868 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; 2869 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; 2870 } 2871 2872 tmp = intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder)); 2873 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; 2874 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; 2875 2876 tmp = intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder)); 2877 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; 2878 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; 2879 2880 /* FIXME TGL+ DSI transcoders have this! */ 2881 if (!transcoder_is_dsi(cpu_transcoder)) { 2882 tmp = intel_de_read(display, 2883 TRANS_VBLANK(display, cpu_transcoder)); 2884 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; 2885 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; 2886 } 2887 tmp = intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder)); 2888 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; 2889 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; 2890 2891 if (intel_pipe_is_interlaced(pipe_config)) { 2892 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; 2893 adjusted_mode->crtc_vtotal += 1; 2894 adjusted_mode->crtc_vblank_end += 1; 2895 } 2896 2897 if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder)) { 2898 pipe_config->set_context_latency = 2899 intel_de_read(display, 2900 TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder)); 2901 adjusted_mode->crtc_vblank_start = 2902 adjusted_mode->crtc_vdisplay + 2903 pipe_config->set_context_latency; 2904 } else if (DISPLAY_VER(display) == 12) { 2905 /* 2906 * TGL doesn't have a dedicated register for SCL. 2907 * Instead, the hardware derives SCL from the difference between 2908 * TRANS_VBLANK.vblank_start and TRANS_VTOTAL.vactive. 2909 * To reflect the HW behaviour, readout the value for SCL as 2910 * Vblank start - Vactive. 2911 */ 2912 pipe_config->set_context_latency = 2913 adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay; 2914 } 2915 2916 if (DISPLAY_VER(display) >= 30) 2917 pipe_config->min_hblank = intel_de_read(display, 2918 DP_MIN_HBLANK_CTL(cpu_transcoder)); 2919 } 2920 2921 static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) 2922 { 2923 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2924 int num_pipes = intel_crtc_num_joined_pipes(crtc_state); 2925 enum pipe primary_pipe, pipe = crtc->pipe; 2926 int width; 2927 2928 if (num_pipes == 1) 2929 return; 2930 2931 primary_pipe = joiner_primary_pipe(crtc_state); 2932 width = drm_rect_width(&crtc_state->pipe_src); 2933 2934 drm_rect_translate_to(&crtc_state->pipe_src, 2935 (pipe - primary_pipe) * width, 0); 2936 } 2937 2938 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 2939 struct intel_crtc_state *pipe_config) 2940 { 2941 struct intel_display *display = to_intel_display(crtc); 2942 u32 tmp; 2943 2944 tmp = intel_de_read(display, PIPESRC(display, crtc->pipe)); 2945 2946 drm_rect_init(&pipe_config->pipe_src, 0, 0, 2947 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, 2948 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); 2949 2950 intel_joiner_adjust_pipe_src(pipe_config); 2951 } 2952 2953 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 2954 { 2955 struct intel_display *display = to_intel_display(crtc_state); 2956 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2957 u32 val = 0; 2958 2959 /* 2960 * - We keep both pipes enabled on 830 2961 * - During modeset the pipe is still disabled and must remain so 2962 * - During fastset the pipe is already enabled and must remain so 2963 */ 2964 if (display->platform.i830 || !intel_crtc_needs_modeset(crtc_state)) 2965 val |= TRANSCONF_ENABLE; 2966 2967 if (crtc_state->double_wide) 2968 val |= TRANSCONF_DOUBLE_WIDE; 2969 2970 /* only g4x and later have fancy bpc/dither controls */ 2971 if (display->platform.g4x || display->platform.valleyview || 2972 display->platform.cherryview) { 2973 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 2974 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 2975 val |= TRANSCONF_DITHER_EN | 2976 TRANSCONF_DITHER_TYPE_SP; 2977 2978 switch (crtc_state->pipe_bpp) { 2979 default: 2980 /* Case prevented by intel_choose_pipe_bpp_dither. */ 2981 MISSING_CASE(crtc_state->pipe_bpp); 2982 fallthrough; 2983 case 18: 2984 val |= TRANSCONF_BPC_6; 2985 break; 2986 case 24: 2987 val |= TRANSCONF_BPC_8; 2988 break; 2989 case 30: 2990 val |= TRANSCONF_BPC_10; 2991 break; 2992 } 2993 } 2994 2995 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 2996 if (DISPLAY_VER(display) < 4 || 2997 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2998 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; 2999 else 3000 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; 3001 } else { 3002 val |= TRANSCONF_INTERLACE_PROGRESSIVE; 3003 } 3004 3005 if ((display->platform.valleyview || display->platform.cherryview) && 3006 crtc_state->limited_color_range) 3007 val |= TRANSCONF_COLOR_RANGE_SELECT; 3008 3009 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3010 3011 if (crtc_state->wgc_enable) 3012 val |= TRANSCONF_WGC_ENABLE; 3013 3014 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3015 3016 intel_de_write(display, TRANSCONF(display, cpu_transcoder), val); 3017 intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder)); 3018 } 3019 3020 static enum intel_output_format 3021 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) 3022 { 3023 struct intel_display *display = to_intel_display(crtc); 3024 u32 tmp; 3025 3026 tmp = intel_de_read(display, PIPE_MISC(crtc->pipe)); 3027 3028 if (tmp & PIPE_MISC_YUV420_ENABLE) { 3029 /* 3030 * We support 4:2:0 in full blend mode only. 3031 * For xe3_lpd+ this is implied in YUV420 Enable bit. 3032 * Ensure the same for prior platforms in YUV420 Mode bit. 3033 */ 3034 if (DISPLAY_VER(display) < 30) 3035 drm_WARN_ON(display->drm, 3036 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); 3037 3038 return INTEL_OUTPUT_FORMAT_YCBCR420; 3039 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { 3040 return INTEL_OUTPUT_FORMAT_YCBCR444; 3041 } else { 3042 return INTEL_OUTPUT_FORMAT_RGB; 3043 } 3044 } 3045 3046 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 3047 struct intel_crtc_state *pipe_config) 3048 { 3049 struct intel_display *display = to_intel_display(crtc); 3050 enum intel_display_power_domain power_domain; 3051 enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe; 3052 struct ref_tracker *wakeref; 3053 bool ret = false; 3054 u32 tmp; 3055 3056 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3057 wakeref = intel_display_power_get_if_enabled(display, power_domain); 3058 if (!wakeref) 3059 return false; 3060 3061 tmp = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); 3062 if (!(tmp & TRANSCONF_ENABLE)) 3063 goto out; 3064 3065 pipe_config->cpu_transcoder = cpu_transcoder; 3066 3067 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3068 pipe_config->sink_format = pipe_config->output_format; 3069 3070 if (display->platform.g4x || display->platform.valleyview || 3071 display->platform.cherryview) { 3072 switch (tmp & TRANSCONF_BPC_MASK) { 3073 case TRANSCONF_BPC_6: 3074 pipe_config->pipe_bpp = 18; 3075 break; 3076 case TRANSCONF_BPC_8: 3077 pipe_config->pipe_bpp = 24; 3078 break; 3079 case TRANSCONF_BPC_10: 3080 pipe_config->pipe_bpp = 30; 3081 break; 3082 default: 3083 MISSING_CASE(tmp); 3084 break; 3085 } 3086 } 3087 3088 if ((display->platform.valleyview || display->platform.cherryview) && 3089 (tmp & TRANSCONF_COLOR_RANGE_SELECT)) 3090 pipe_config->limited_color_range = true; 3091 3092 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); 3093 3094 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3095 3096 if ((display->platform.valleyview || display->platform.cherryview) && 3097 (tmp & TRANSCONF_WGC_ENABLE)) 3098 pipe_config->wgc_enable = true; 3099 3100 intel_color_get_config(pipe_config); 3101 3102 if (HAS_DOUBLE_WIDE(display)) 3103 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; 3104 3105 intel_get_transcoder_timings(crtc, pipe_config); 3106 intel_get_pipe_src_size(crtc, pipe_config); 3107 3108 i9xx_pfit_get_config(pipe_config); 3109 3110 i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state); 3111 3112 if (DISPLAY_VER(display) >= 4) { 3113 tmp = pipe_config->dpll_hw_state.i9xx.dpll_md; 3114 pipe_config->pixel_multiplier = 3115 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 3116 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 3117 } else if (display->platform.i945g || display->platform.i945gm || 3118 display->platform.g33 || display->platform.pineview) { 3119 tmp = pipe_config->dpll_hw_state.i9xx.dpll; 3120 pipe_config->pixel_multiplier = 3121 ((tmp & SDVO_MULTIPLIER_MASK) 3122 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 3123 } else { 3124 /* Note that on i915G/GM the pixel multiplier is in the sdvo 3125 * port and will be fixed up in the encoder->get_config 3126 * function. */ 3127 pipe_config->pixel_multiplier = 1; 3128 } 3129 3130 if (display->platform.cherryview) 3131 chv_crtc_clock_get(pipe_config); 3132 else if (display->platform.valleyview) 3133 vlv_crtc_clock_get(pipe_config); 3134 else 3135 i9xx_crtc_clock_get(pipe_config); 3136 3137 /* 3138 * Normally the dotclock is filled in by the encoder .get_config() 3139 * but in case the pipe is enabled w/o any ports we need a sane 3140 * default. 3141 */ 3142 pipe_config->hw.adjusted_mode.crtc_clock = 3143 pipe_config->port_clock / pipe_config->pixel_multiplier; 3144 3145 ret = true; 3146 3147 out: 3148 intel_display_power_put(display, power_domain, wakeref); 3149 3150 return ret; 3151 } 3152 3153 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3154 { 3155 struct intel_display *display = to_intel_display(crtc_state); 3156 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3157 u32 val = 0; 3158 3159 /* 3160 * - During modeset the pipe is still disabled and must remain so 3161 * - During fastset the pipe is already enabled and must remain so 3162 */ 3163 if (!intel_crtc_needs_modeset(crtc_state)) 3164 val |= TRANSCONF_ENABLE; 3165 3166 switch (crtc_state->pipe_bpp) { 3167 default: 3168 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3169 MISSING_CASE(crtc_state->pipe_bpp); 3170 fallthrough; 3171 case 18: 3172 val |= TRANSCONF_BPC_6; 3173 break; 3174 case 24: 3175 val |= TRANSCONF_BPC_8; 3176 break; 3177 case 30: 3178 val |= TRANSCONF_BPC_10; 3179 break; 3180 case 36: 3181 val |= TRANSCONF_BPC_12; 3182 break; 3183 } 3184 3185 if (crtc_state->dither) 3186 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3187 3188 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3189 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3190 else 3191 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3192 3193 /* 3194 * This would end up with an odd purple hue over 3195 * the entire display. Make sure we don't do it. 3196 */ 3197 drm_WARN_ON(display->drm, crtc_state->limited_color_range && 3198 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3199 3200 if (crtc_state->limited_color_range && 3201 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3202 val |= TRANSCONF_COLOR_RANGE_SELECT; 3203 3204 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3205 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; 3206 3207 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3208 3209 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3210 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3211 3212 intel_de_write(display, TRANSCONF(display, cpu_transcoder), val); 3213 intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder)); 3214 } 3215 3216 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3217 { 3218 struct intel_display *display = to_intel_display(crtc_state); 3219 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3220 u32 val = 0; 3221 3222 /* 3223 * - During modeset the pipe is still disabled and must remain so 3224 * - During fastset the pipe is already enabled and must remain so 3225 */ 3226 if (!intel_crtc_needs_modeset(crtc_state)) 3227 val |= TRANSCONF_ENABLE; 3228 3229 if (display->platform.haswell && crtc_state->dither) 3230 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3231 3232 if (DISPLAY_VER(display) < 35) { 3233 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3234 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3235 else 3236 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3237 } 3238 3239 if (display->platform.haswell && 3240 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3241 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; 3242 3243 intel_de_write(display, TRANSCONF(display, cpu_transcoder), val); 3244 intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder)); 3245 } 3246 3247 static void bdw_set_pipe_misc(struct intel_dsb *dsb, 3248 const struct intel_crtc_state *crtc_state) 3249 { 3250 struct intel_display *display = to_intel_display(crtc_state); 3251 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3252 u32 val = 0; 3253 3254 switch (crtc_state->pipe_bpp) { 3255 case 18: 3256 val |= PIPE_MISC_BPC_6; 3257 break; 3258 case 24: 3259 val |= PIPE_MISC_BPC_8; 3260 break; 3261 case 30: 3262 val |= PIPE_MISC_BPC_10; 3263 break; 3264 case 36: 3265 /* Port output 12BPC defined for ADLP+ */ 3266 if (DISPLAY_VER(display) >= 13) 3267 val |= PIPE_MISC_BPC_12_ADLP; 3268 break; 3269 default: 3270 MISSING_CASE(crtc_state->pipe_bpp); 3271 break; 3272 } 3273 3274 if (crtc_state->dither) 3275 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; 3276 3277 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3278 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3279 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; 3280 3281 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3282 val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE : 3283 PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND; 3284 3285 if (DISPLAY_VER(display) >= 11 && is_hdr_mode(crtc_state)) 3286 val |= PIPE_MISC_HDR_MODE_PRECISION; 3287 3288 if (DISPLAY_VER(display) >= 12) 3289 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; 3290 3291 /* allow PSR with sprite enabled */ 3292 if (display->platform.broadwell) 3293 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; 3294 3295 intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val); 3296 } 3297 3298 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) 3299 { 3300 struct intel_display *display = to_intel_display(crtc); 3301 u32 tmp; 3302 3303 tmp = intel_de_read(display, PIPE_MISC(crtc->pipe)); 3304 3305 switch (tmp & PIPE_MISC_BPC_MASK) { 3306 case PIPE_MISC_BPC_6: 3307 return 18; 3308 case PIPE_MISC_BPC_8: 3309 return 24; 3310 case PIPE_MISC_BPC_10: 3311 return 30; 3312 /* 3313 * PORT OUTPUT 12 BPC defined for ADLP+. 3314 * 3315 * TODO: 3316 * For previous platforms with DSI interface, bits 5:7 3317 * are used for storing pipe_bpp irrespective of dithering. 3318 * Since the value of 12 BPC is not defined for these bits 3319 * on older platforms, need to find a workaround for 12 BPC 3320 * MIPI DSI HW readout. 3321 */ 3322 case PIPE_MISC_BPC_12_ADLP: 3323 if (DISPLAY_VER(display) >= 13) 3324 return 36; 3325 fallthrough; 3326 default: 3327 MISSING_CASE(tmp); 3328 return 0; 3329 } 3330 } 3331 3332 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3333 { 3334 /* 3335 * Account for spread spectrum to avoid 3336 * oversubscribing the link. Max center spread 3337 * is 2.5%; use 5% for safety's sake. 3338 */ 3339 u32 bps = target_clock * bpp * 21 / 20; 3340 return DIV_ROUND_UP(bps, link_bw * 8); 3341 } 3342 3343 void intel_get_m_n(struct intel_display *display, 3344 struct intel_link_m_n *m_n, 3345 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 3346 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3347 { 3348 m_n->link_m = intel_de_read(display, link_m_reg) & DATA_LINK_M_N_MASK; 3349 m_n->link_n = intel_de_read(display, link_n_reg) & DATA_LINK_M_N_MASK; 3350 m_n->data_m = intel_de_read(display, data_m_reg) & DATA_LINK_M_N_MASK; 3351 m_n->data_n = intel_de_read(display, data_n_reg) & DATA_LINK_M_N_MASK; 3352 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(display, data_m_reg)) + 1; 3353 } 3354 3355 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3356 enum transcoder transcoder, 3357 struct intel_link_m_n *m_n) 3358 { 3359 struct intel_display *display = to_intel_display(crtc); 3360 enum pipe pipe = crtc->pipe; 3361 3362 if (DISPLAY_VER(display) >= 5) 3363 intel_get_m_n(display, m_n, 3364 PIPE_DATA_M1(display, transcoder), 3365 PIPE_DATA_N1(display, transcoder), 3366 PIPE_LINK_M1(display, transcoder), 3367 PIPE_LINK_N1(display, transcoder)); 3368 else 3369 intel_get_m_n(display, m_n, 3370 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 3371 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3372 } 3373 3374 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 3375 enum transcoder transcoder, 3376 struct intel_link_m_n *m_n) 3377 { 3378 struct intel_display *display = to_intel_display(crtc); 3379 3380 if (!intel_cpu_transcoder_has_m2_n2(display, transcoder)) 3381 return; 3382 3383 intel_get_m_n(display, m_n, 3384 PIPE_DATA_M2(display, transcoder), 3385 PIPE_DATA_N2(display, transcoder), 3386 PIPE_LINK_M2(display, transcoder), 3387 PIPE_LINK_N2(display, transcoder)); 3388 } 3389 3390 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3391 struct intel_crtc_state *pipe_config) 3392 { 3393 struct intel_display *display = to_intel_display(crtc); 3394 enum intel_display_power_domain power_domain; 3395 enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe; 3396 struct ref_tracker *wakeref; 3397 bool ret = false; 3398 u32 tmp; 3399 3400 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3401 wakeref = intel_display_power_get_if_enabled(display, power_domain); 3402 if (!wakeref) 3403 return false; 3404 3405 tmp = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); 3406 if (!(tmp & TRANSCONF_ENABLE)) 3407 goto out; 3408 3409 pipe_config->cpu_transcoder = cpu_transcoder; 3410 3411 switch (tmp & TRANSCONF_BPC_MASK) { 3412 case TRANSCONF_BPC_6: 3413 pipe_config->pipe_bpp = 18; 3414 break; 3415 case TRANSCONF_BPC_8: 3416 pipe_config->pipe_bpp = 24; 3417 break; 3418 case TRANSCONF_BPC_10: 3419 pipe_config->pipe_bpp = 30; 3420 break; 3421 case TRANSCONF_BPC_12: 3422 pipe_config->pipe_bpp = 36; 3423 break; 3424 default: 3425 break; 3426 } 3427 3428 if (tmp & TRANSCONF_COLOR_RANGE_SELECT) 3429 pipe_config->limited_color_range = true; 3430 3431 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { 3432 case TRANSCONF_OUTPUT_COLORSPACE_YUV601: 3433 case TRANSCONF_OUTPUT_COLORSPACE_YUV709: 3434 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3435 break; 3436 default: 3437 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3438 break; 3439 } 3440 3441 pipe_config->sink_format = pipe_config->output_format; 3442 3443 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); 3444 3445 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3446 3447 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); 3448 3449 intel_color_get_config(pipe_config); 3450 3451 pipe_config->pixel_multiplier = 1; 3452 3453 ilk_pch_get_config(pipe_config); 3454 3455 intel_get_transcoder_timings(crtc, pipe_config); 3456 intel_get_pipe_src_size(crtc, pipe_config); 3457 3458 ilk_pfit_get_config(pipe_config); 3459 3460 ret = true; 3461 3462 out: 3463 intel_display_power_put(display, power_domain, wakeref); 3464 3465 return ret; 3466 } 3467 3468 static u8 joiner_pipes(struct intel_display *display) 3469 { 3470 u8 pipes; 3471 3472 if (DISPLAY_VER(display) >= 12) 3473 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3474 else if (DISPLAY_VER(display) >= 11) 3475 pipes = BIT(PIPE_B) | BIT(PIPE_C); 3476 else 3477 pipes = 0; 3478 3479 return pipes & DISPLAY_RUNTIME_INFO(display)->pipe_mask; 3480 } 3481 3482 static bool transcoder_ddi_func_is_enabled(struct intel_display *display, 3483 enum transcoder cpu_transcoder) 3484 { 3485 enum intel_display_power_domain power_domain; 3486 u32 tmp = 0; 3487 3488 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3489 3490 with_intel_display_power_if_enabled(display, power_domain) 3491 tmp = intel_de_read(display, 3492 TRANS_DDI_FUNC_CTL(display, cpu_transcoder)); 3493 3494 return tmp & TRANS_DDI_FUNC_ENABLE; 3495 } 3496 3497 static void enabled_uncompressed_joiner_pipes(struct intel_display *display, 3498 u8 *primary_pipes, u8 *secondary_pipes) 3499 { 3500 struct intel_crtc *crtc; 3501 3502 *primary_pipes = 0; 3503 *secondary_pipes = 0; 3504 3505 if (!HAS_UNCOMPRESSED_JOINER(display)) 3506 return; 3507 3508 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, 3509 joiner_pipes(display)) { 3510 enum intel_display_power_domain power_domain; 3511 enum pipe pipe = crtc->pipe; 3512 3513 power_domain = POWER_DOMAIN_PIPE(pipe); 3514 with_intel_display_power_if_enabled(display, power_domain) { 3515 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3516 3517 if (tmp & UNCOMPRESSED_JOINER_PRIMARY) 3518 *primary_pipes |= BIT(pipe); 3519 if (tmp & UNCOMPRESSED_JOINER_SECONDARY) 3520 *secondary_pipes |= BIT(pipe); 3521 } 3522 } 3523 } 3524 3525 static void enabled_bigjoiner_pipes(struct intel_display *display, 3526 u8 *primary_pipes, u8 *secondary_pipes) 3527 { 3528 struct intel_crtc *crtc; 3529 3530 *primary_pipes = 0; 3531 *secondary_pipes = 0; 3532 3533 if (!HAS_BIGJOINER(display)) 3534 return; 3535 3536 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, 3537 joiner_pipes(display)) { 3538 enum intel_display_power_domain power_domain; 3539 enum pipe pipe = crtc->pipe; 3540 3541 power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); 3542 with_intel_display_power_if_enabled(display, power_domain) { 3543 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3544 3545 if (!(tmp & BIG_JOINER_ENABLE)) 3546 continue; 3547 3548 if (tmp & PRIMARY_BIG_JOINER_ENABLE) 3549 *primary_pipes |= BIT(pipe); 3550 else 3551 *secondary_pipes |= BIT(pipe); 3552 } 3553 } 3554 } 3555 3556 static u8 expected_secondary_pipes(u8 primary_pipes, int num_pipes) 3557 { 3558 u8 secondary_pipes = 0; 3559 3560 for (int i = 1; i < num_pipes; i++) 3561 secondary_pipes |= primary_pipes << i; 3562 3563 return secondary_pipes; 3564 } 3565 3566 static u8 expected_uncompressed_joiner_secondary_pipes(u8 uncompjoiner_primary_pipes) 3567 { 3568 return expected_secondary_pipes(uncompjoiner_primary_pipes, 2); 3569 } 3570 3571 static u8 expected_bigjoiner_secondary_pipes(u8 bigjoiner_primary_pipes) 3572 { 3573 return expected_secondary_pipes(bigjoiner_primary_pipes, 2); 3574 } 3575 3576 static u8 get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes) 3577 { 3578 primary_pipes &= GENMASK(pipe, 0); 3579 3580 return primary_pipes ? BIT(fls(primary_pipes) - 1) : 0; 3581 } 3582 3583 static u8 expected_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes) 3584 { 3585 return expected_secondary_pipes(ultrajoiner_primary_pipes, 4); 3586 } 3587 3588 static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes, 3589 u8 ultrajoiner_secondary_pipes) 3590 { 3591 return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3; 3592 } 3593 3594 static void enabled_ultrajoiner_pipes(struct intel_display *display, 3595 u8 *primary_pipes, u8 *secondary_pipes) 3596 { 3597 struct intel_crtc *crtc; 3598 3599 *primary_pipes = 0; 3600 *secondary_pipes = 0; 3601 3602 if (!HAS_ULTRAJOINER(display)) 3603 return; 3604 3605 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, 3606 joiner_pipes(display)) { 3607 enum intel_display_power_domain power_domain; 3608 enum pipe pipe = crtc->pipe; 3609 3610 power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); 3611 with_intel_display_power_if_enabled(display, power_domain) { 3612 u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); 3613 3614 if (!(tmp & ULTRA_JOINER_ENABLE)) 3615 continue; 3616 3617 if (tmp & PRIMARY_ULTRA_JOINER_ENABLE) 3618 *primary_pipes |= BIT(pipe); 3619 else 3620 *secondary_pipes |= BIT(pipe); 3621 } 3622 } 3623 } 3624 3625 static void enabled_joiner_pipes(struct intel_display *display, 3626 enum pipe pipe, 3627 u8 *primary_pipe, u8 *secondary_pipes) 3628 { 3629 u8 primary_ultrajoiner_pipes; 3630 u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes; 3631 u8 secondary_ultrajoiner_pipes; 3632 u8 secondary_uncompressed_joiner_pipes, secondary_bigjoiner_pipes; 3633 u8 ultrajoiner_pipes; 3634 u8 uncompressed_joiner_pipes, bigjoiner_pipes; 3635 3636 enabled_ultrajoiner_pipes(display, &primary_ultrajoiner_pipes, 3637 &secondary_ultrajoiner_pipes); 3638 /* 3639 * For some strange reason the last pipe in the set of four 3640 * shouldn't have ultrajoiner enable bit set in hardware. 3641 * Set the bit anyway to make life easier. 3642 */ 3643 drm_WARN_ON(display->drm, 3644 expected_secondary_pipes(primary_ultrajoiner_pipes, 3) != 3645 secondary_ultrajoiner_pipes); 3646 secondary_ultrajoiner_pipes = 3647 fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes, 3648 secondary_ultrajoiner_pipes); 3649 3650 drm_WARN_ON(display->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0); 3651 3652 enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes, 3653 &secondary_uncompressed_joiner_pipes); 3654 3655 drm_WARN_ON(display->drm, 3656 (primary_uncompressed_joiner_pipes & secondary_uncompressed_joiner_pipes) != 0); 3657 3658 enabled_bigjoiner_pipes(display, &primary_bigjoiner_pipes, 3659 &secondary_bigjoiner_pipes); 3660 3661 drm_WARN_ON(display->drm, 3662 (primary_bigjoiner_pipes & secondary_bigjoiner_pipes) != 0); 3663 3664 ultrajoiner_pipes = primary_ultrajoiner_pipes | secondary_ultrajoiner_pipes; 3665 uncompressed_joiner_pipes = primary_uncompressed_joiner_pipes | 3666 secondary_uncompressed_joiner_pipes; 3667 bigjoiner_pipes = primary_bigjoiner_pipes | secondary_bigjoiner_pipes; 3668 3669 drm_WARN(display->drm, (ultrajoiner_pipes & bigjoiner_pipes) != ultrajoiner_pipes, 3670 "Ultrajoiner pipes(%#x) should be bigjoiner pipes(%#x)\n", 3671 ultrajoiner_pipes, bigjoiner_pipes); 3672 3673 drm_WARN(display->drm, secondary_ultrajoiner_pipes != 3674 expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), 3675 "Wrong secondary ultrajoiner pipes(expected %#x, current %#x)\n", 3676 expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), 3677 secondary_ultrajoiner_pipes); 3678 3679 drm_WARN(display->drm, (uncompressed_joiner_pipes & bigjoiner_pipes) != 0, 3680 "Uncompressed joiner pipes(%#x) and bigjoiner pipes(%#x) can't intersect\n", 3681 uncompressed_joiner_pipes, bigjoiner_pipes); 3682 3683 drm_WARN(display->drm, secondary_bigjoiner_pipes != 3684 expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), 3685 "Wrong secondary bigjoiner pipes(expected %#x, current %#x)\n", 3686 expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), 3687 secondary_bigjoiner_pipes); 3688 3689 drm_WARN(display->drm, secondary_uncompressed_joiner_pipes != 3690 expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), 3691 "Wrong secondary uncompressed joiner pipes(expected %#x, current %#x)\n", 3692 expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), 3693 secondary_uncompressed_joiner_pipes); 3694 3695 *primary_pipe = 0; 3696 *secondary_pipes = 0; 3697 3698 if (ultrajoiner_pipes & BIT(pipe)) { 3699 *primary_pipe = get_joiner_primary_pipe(pipe, primary_ultrajoiner_pipes); 3700 *secondary_pipes = secondary_ultrajoiner_pipes & 3701 expected_ultrajoiner_secondary_pipes(*primary_pipe); 3702 3703 drm_WARN(display->drm, 3704 expected_ultrajoiner_secondary_pipes(*primary_pipe) != 3705 *secondary_pipes, 3706 "Wrong ultrajoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3707 *primary_pipe, 3708 expected_ultrajoiner_secondary_pipes(*primary_pipe), 3709 *secondary_pipes); 3710 return; 3711 } 3712 3713 if (uncompressed_joiner_pipes & BIT(pipe)) { 3714 *primary_pipe = get_joiner_primary_pipe(pipe, primary_uncompressed_joiner_pipes); 3715 *secondary_pipes = secondary_uncompressed_joiner_pipes & 3716 expected_uncompressed_joiner_secondary_pipes(*primary_pipe); 3717 3718 drm_WARN(display->drm, 3719 expected_uncompressed_joiner_secondary_pipes(*primary_pipe) != 3720 *secondary_pipes, 3721 "Wrong uncompressed joiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3722 *primary_pipe, 3723 expected_uncompressed_joiner_secondary_pipes(*primary_pipe), 3724 *secondary_pipes); 3725 return; 3726 } 3727 3728 if (bigjoiner_pipes & BIT(pipe)) { 3729 *primary_pipe = get_joiner_primary_pipe(pipe, primary_bigjoiner_pipes); 3730 *secondary_pipes = secondary_bigjoiner_pipes & 3731 expected_bigjoiner_secondary_pipes(*primary_pipe); 3732 3733 drm_WARN(display->drm, 3734 expected_bigjoiner_secondary_pipes(*primary_pipe) != 3735 *secondary_pipes, 3736 "Wrong bigjoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", 3737 *primary_pipe, 3738 expected_bigjoiner_secondary_pipes(*primary_pipe), 3739 *secondary_pipes); 3740 return; 3741 } 3742 } 3743 3744 static u8 hsw_panel_transcoders(struct intel_display *display) 3745 { 3746 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 3747 3748 if (DISPLAY_VER(display) >= 11) 3749 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 3750 3751 return panel_transcoder_mask; 3752 } 3753 3754 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3755 { 3756 struct intel_display *display = to_intel_display(crtc); 3757 u8 panel_transcoder_mask = hsw_panel_transcoders(display); 3758 enum transcoder cpu_transcoder; 3759 u8 primary_pipe, secondary_pipes; 3760 u8 enabled_transcoders = 0; 3761 3762 /* 3763 * XXX: Do intel_display_power_get_if_enabled before reading this (for 3764 * consistency and less surprising code; it's in always on power). 3765 */ 3766 for_each_cpu_transcoder_masked(display, cpu_transcoder, 3767 panel_transcoder_mask) { 3768 enum intel_display_power_domain power_domain; 3769 enum pipe trans_pipe; 3770 u32 tmp = 0; 3771 3772 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3773 with_intel_display_power_if_enabled(display, power_domain) 3774 tmp = intel_de_read(display, 3775 TRANS_DDI_FUNC_CTL(display, cpu_transcoder)); 3776 3777 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 3778 continue; 3779 3780 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 3781 default: 3782 drm_WARN(display->drm, 1, 3783 "unknown pipe linked to transcoder %s\n", 3784 transcoder_name(cpu_transcoder)); 3785 fallthrough; 3786 case TRANS_DDI_EDP_INPUT_A_ONOFF: 3787 case TRANS_DDI_EDP_INPUT_A_ON: 3788 trans_pipe = PIPE_A; 3789 break; 3790 case TRANS_DDI_EDP_INPUT_B_ONOFF: 3791 trans_pipe = PIPE_B; 3792 break; 3793 case TRANS_DDI_EDP_INPUT_C_ONOFF: 3794 trans_pipe = PIPE_C; 3795 break; 3796 case TRANS_DDI_EDP_INPUT_D_ONOFF: 3797 trans_pipe = PIPE_D; 3798 break; 3799 } 3800 3801 if (trans_pipe == crtc->pipe) 3802 enabled_transcoders |= BIT(cpu_transcoder); 3803 } 3804 3805 /* single pipe or joiner primary */ 3806 cpu_transcoder = (enum transcoder) crtc->pipe; 3807 if (transcoder_ddi_func_is_enabled(display, cpu_transcoder)) 3808 enabled_transcoders |= BIT(cpu_transcoder); 3809 3810 /* joiner secondary -> consider the primary pipe's transcoder as well */ 3811 enabled_joiner_pipes(display, crtc->pipe, &primary_pipe, &secondary_pipes); 3812 if (secondary_pipes & BIT(crtc->pipe)) { 3813 cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1; 3814 if (transcoder_ddi_func_is_enabled(display, cpu_transcoder)) 3815 enabled_transcoders |= BIT(cpu_transcoder); 3816 } 3817 3818 return enabled_transcoders; 3819 } 3820 3821 static bool has_edp_transcoders(u8 enabled_transcoders) 3822 { 3823 return enabled_transcoders & BIT(TRANSCODER_EDP); 3824 } 3825 3826 static bool has_dsi_transcoders(u8 enabled_transcoders) 3827 { 3828 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 3829 BIT(TRANSCODER_DSI_1)); 3830 } 3831 3832 static bool has_pipe_transcoders(u8 enabled_transcoders) 3833 { 3834 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 3835 BIT(TRANSCODER_DSI_0) | 3836 BIT(TRANSCODER_DSI_1)); 3837 } 3838 3839 static void assert_enabled_transcoders(struct intel_display *display, 3840 u8 enabled_transcoders) 3841 { 3842 /* Only one type of transcoder please */ 3843 drm_WARN_ON(display->drm, 3844 has_edp_transcoders(enabled_transcoders) + 3845 has_dsi_transcoders(enabled_transcoders) + 3846 has_pipe_transcoders(enabled_transcoders) > 1); 3847 3848 /* Only DSI transcoders can be ganged */ 3849 drm_WARN_ON(display->drm, 3850 !has_dsi_transcoders(enabled_transcoders) && 3851 !is_power_of_2(enabled_transcoders)); 3852 } 3853 3854 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 3855 struct intel_crtc_state *pipe_config, 3856 struct intel_display_power_domain_set *power_domain_set) 3857 { 3858 struct intel_display *display = to_intel_display(crtc); 3859 unsigned long enabled_transcoders; 3860 u32 tmp; 3861 3862 enabled_transcoders = hsw_enabled_transcoders(crtc); 3863 if (!enabled_transcoders) 3864 return false; 3865 3866 assert_enabled_transcoders(display, enabled_transcoders); 3867 3868 /* 3869 * With the exception of DSI we should only ever have 3870 * a single enabled transcoder. With DSI let's just 3871 * pick the first one. 3872 */ 3873 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 3874 3875 if (!intel_display_power_get_in_set_if_enabled(display, power_domain_set, 3876 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 3877 return false; 3878 3879 if (hsw_panel_transcoders(display) & BIT(pipe_config->cpu_transcoder)) { 3880 tmp = intel_de_read(display, 3881 TRANS_DDI_FUNC_CTL(display, pipe_config->cpu_transcoder)); 3882 3883 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 3884 pipe_config->pch_pfit.force_thru = true; 3885 } 3886 3887 tmp = intel_de_read(display, 3888 TRANSCONF(display, pipe_config->cpu_transcoder)); 3889 3890 return tmp & TRANSCONF_ENABLE; 3891 } 3892 3893 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 3894 struct intel_crtc_state *pipe_config, 3895 struct intel_display_power_domain_set *power_domain_set) 3896 { 3897 struct intel_display *display = to_intel_display(crtc); 3898 enum transcoder cpu_transcoder; 3899 enum port port; 3900 u32 tmp; 3901 3902 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 3903 if (port == PORT_A) 3904 cpu_transcoder = TRANSCODER_DSI_A; 3905 else 3906 cpu_transcoder = TRANSCODER_DSI_C; 3907 3908 if (!intel_display_power_get_in_set_if_enabled(display, power_domain_set, 3909 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 3910 continue; 3911 3912 /* 3913 * The PLL needs to be enabled with a valid divider 3914 * configuration, otherwise accessing DSI registers will hang 3915 * the machine. See BSpec North Display Engine 3916 * registers/MIPI[BXT]. We can break out here early, since we 3917 * need the same DSI PLL to be enabled for both DSI ports. 3918 */ 3919 if (!bxt_dsi_pll_is_enabled(display)) 3920 break; 3921 3922 /* XXX: this works for video mode only */ 3923 tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port)); 3924 if (!(tmp & DPI_ENABLE)) 3925 continue; 3926 3927 tmp = intel_de_read(display, MIPI_CTRL(display, port)); 3928 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 3929 continue; 3930 3931 pipe_config->cpu_transcoder = cpu_transcoder; 3932 break; 3933 } 3934 3935 return transcoder_is_dsi(pipe_config->cpu_transcoder); 3936 } 3937 3938 static void intel_joiner_get_config(struct intel_crtc_state *crtc_state) 3939 { 3940 struct intel_display *display = to_intel_display(crtc_state); 3941 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3942 u8 primary_pipe, secondary_pipes; 3943 enum pipe pipe = crtc->pipe; 3944 3945 enabled_joiner_pipes(display, pipe, &primary_pipe, &secondary_pipes); 3946 3947 if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0) 3948 return; 3949 3950 crtc_state->joiner_pipes = primary_pipe | secondary_pipes; 3951 } 3952 3953 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 3954 struct intel_crtc_state *pipe_config) 3955 { 3956 struct intel_display *display = to_intel_display(crtc); 3957 bool active; 3958 u32 tmp; 3959 3960 if (!intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains, 3961 POWER_DOMAIN_PIPE(crtc->pipe))) 3962 return false; 3963 3964 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains); 3965 3966 if ((display->platform.geminilake || display->platform.broxton) && 3967 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) { 3968 drm_WARN_ON(display->drm, active); 3969 active = true; 3970 } 3971 3972 if (!active) 3973 goto out; 3974 3975 intel_joiner_get_config(pipe_config); 3976 intel_dsc_get_config(pipe_config); 3977 3978 /* intel_vrr_get_config() depends on .framestart_delay */ 3979 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 3980 tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder)); 3981 3982 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 3983 } else { 3984 /* no idea if this is correct */ 3985 pipe_config->framestart_delay = 1; 3986 } 3987 3988 /* 3989 * intel_vrr_get_config() depends on TRANS_SET_CONTEXT_LATENCY 3990 * readout done by intel_get_transcoder_timings(). 3991 */ 3992 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 3993 DISPLAY_VER(display) >= 11) 3994 intel_get_transcoder_timings(crtc, pipe_config); 3995 3996 if (transcoder_has_vrr(pipe_config)) 3997 intel_vrr_get_config(pipe_config); 3998 3999 intel_get_pipe_src_size(crtc, pipe_config); 4000 4001 if (display->platform.haswell) { 4002 u32 tmp = intel_de_read(display, 4003 TRANSCONF(display, pipe_config->cpu_transcoder)); 4004 4005 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) 4006 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 4007 else 4008 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 4009 } else { 4010 pipe_config->output_format = 4011 bdw_get_pipe_misc_output_format(crtc); 4012 } 4013 4014 pipe_config->sink_format = pipe_config->output_format; 4015 4016 intel_color_get_config(pipe_config); 4017 4018 tmp = intel_de_read(display, WM_LINETIME(crtc->pipe)); 4019 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 4020 if (display->platform.broadwell || display->platform.haswell) 4021 pipe_config->ips_linetime = 4022 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 4023 4024 if (intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains, 4025 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 4026 if (DISPLAY_VER(display) >= 9) 4027 skl_scaler_get_config(pipe_config); 4028 else 4029 ilk_pfit_get_config(pipe_config); 4030 } 4031 4032 hsw_ips_get_config(pipe_config); 4033 4034 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 4035 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4036 pipe_config->pixel_multiplier = 4037 intel_de_read(display, 4038 TRANS_MULT(display, pipe_config->cpu_transcoder)) + 1; 4039 } else { 4040 pipe_config->pixel_multiplier = 1; 4041 } 4042 4043 out: 4044 intel_display_power_put_all_in_set(display, &crtc->hw_readout_power_domains); 4045 4046 return active; 4047 } 4048 4049 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 4050 { 4051 struct intel_display *display = to_intel_display(crtc_state); 4052 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4053 4054 if (!display->funcs.display->get_pipe_config(crtc, crtc_state)) 4055 return false; 4056 4057 crtc_state->hw.active = true; 4058 4059 intel_crtc_readout_derived_state(crtc_state); 4060 4061 return true; 4062 } 4063 4064 int intel_dotclock_calculate(int link_freq, 4065 const struct intel_link_m_n *m_n) 4066 { 4067 /* 4068 * The calculation for the data clock -> pixel clock is: 4069 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 4070 * But we want to avoid losing precision if possible, so: 4071 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 4072 * 4073 * and for link freq (10kbs units) -> pixel clock it is: 4074 * link_symbol_clock = link_freq * 10 / link_symbol_size 4075 * pixel_clock = (m * link_symbol_clock) / n 4076 * or for more precision: 4077 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size) 4078 */ 4079 4080 if (!m_n->link_n) 4081 return 0; 4082 4083 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10), 4084 m_n->link_n * intel_dp_link_symbol_size(link_freq)); 4085 } 4086 4087 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) 4088 { 4089 int dotclock; 4090 4091 if (intel_crtc_has_dp_encoder(pipe_config)) 4092 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 4093 &pipe_config->dp_m_n); 4094 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) 4095 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, 4096 pipe_config->pipe_bpp); 4097 else 4098 dotclock = pipe_config->port_clock; 4099 4100 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 4101 !intel_crtc_has_dp_encoder(pipe_config)) 4102 dotclock *= 2; 4103 4104 if (pipe_config->pixel_multiplier) 4105 dotclock /= pipe_config->pixel_multiplier; 4106 4107 return dotclock; 4108 } 4109 4110 /* Returns the currently programmed mode of the given encoder. */ 4111 struct drm_display_mode * 4112 intel_encoder_current_mode(struct intel_encoder *encoder) 4113 { 4114 struct intel_display *display = to_intel_display(encoder); 4115 struct intel_crtc_state *crtc_state; 4116 struct drm_display_mode *mode; 4117 struct intel_crtc *crtc; 4118 enum pipe pipe; 4119 4120 if (!encoder->get_hw_state(encoder, &pipe)) 4121 return NULL; 4122 4123 crtc = intel_crtc_for_pipe(display, pipe); 4124 4125 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 4126 if (!mode) 4127 return NULL; 4128 4129 crtc_state = intel_crtc_state_alloc(crtc); 4130 if (!crtc_state) { 4131 kfree(mode); 4132 return NULL; 4133 } 4134 4135 if (!intel_crtc_get_pipe_config(crtc_state)) { 4136 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4137 kfree(mode); 4138 return NULL; 4139 } 4140 4141 intel_encoder_get_config(encoder, crtc_state); 4142 4143 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 4144 4145 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 4146 4147 return mode; 4148 } 4149 4150 static bool encoders_cloneable(const struct intel_encoder *a, 4151 const struct intel_encoder *b) 4152 { 4153 /* masks could be asymmetric, so check both ways */ 4154 return a == b || (a->cloneable & BIT(b->type) && 4155 b->cloneable & BIT(a->type)); 4156 } 4157 4158 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 4159 struct intel_crtc *crtc, 4160 struct intel_encoder *encoder) 4161 { 4162 struct intel_encoder *source_encoder; 4163 struct drm_connector *connector; 4164 struct drm_connector_state *connector_state; 4165 int i; 4166 4167 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4168 if (connector_state->crtc != &crtc->base) 4169 continue; 4170 4171 source_encoder = 4172 to_intel_encoder(connector_state->best_encoder); 4173 if (!encoders_cloneable(encoder, source_encoder)) 4174 return false; 4175 } 4176 4177 return true; 4178 } 4179 4180 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4181 { 4182 const struct drm_display_mode *pipe_mode = 4183 &crtc_state->hw.pipe_mode; 4184 int linetime_wm; 4185 4186 if (!crtc_state->hw.enable) 4187 return 0; 4188 4189 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4190 pipe_mode->crtc_clock); 4191 4192 return min(linetime_wm, 0x1ff); 4193 } 4194 4195 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4196 const struct intel_cdclk_state *cdclk_state) 4197 { 4198 const struct drm_display_mode *pipe_mode = 4199 &crtc_state->hw.pipe_mode; 4200 int linetime_wm; 4201 4202 if (!crtc_state->hw.enable) 4203 return 0; 4204 4205 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4206 intel_cdclk_logical(cdclk_state)); 4207 4208 return min(linetime_wm, 0x1ff); 4209 } 4210 4211 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4212 { 4213 struct intel_display *display = to_intel_display(crtc_state); 4214 const struct drm_display_mode *pipe_mode = 4215 &crtc_state->hw.pipe_mode; 4216 int linetime_wm; 4217 4218 if (!crtc_state->hw.enable) 4219 return 0; 4220 4221 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4222 crtc_state->pixel_rate); 4223 4224 /* Display WA #1135: BXT:ALL GLK:ALL */ 4225 if ((display->platform.geminilake || display->platform.broxton) && 4226 skl_watermark_ipc_enabled(display)) 4227 linetime_wm /= 2; 4228 4229 return min(linetime_wm, 0x1ff); 4230 } 4231 4232 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4233 struct intel_crtc *crtc) 4234 { 4235 struct intel_display *display = to_intel_display(state); 4236 struct intel_crtc_state *crtc_state = 4237 intel_atomic_get_new_crtc_state(state, crtc); 4238 const struct intel_cdclk_state *cdclk_state; 4239 4240 if (DISPLAY_VER(display) >= 9) 4241 crtc_state->linetime = skl_linetime_wm(crtc_state); 4242 else 4243 crtc_state->linetime = hsw_linetime_wm(crtc_state); 4244 4245 if (!hsw_crtc_supports_ips(crtc)) 4246 return 0; 4247 4248 cdclk_state = intel_atomic_get_cdclk_state(state); 4249 if (IS_ERR(cdclk_state)) 4250 return PTR_ERR(cdclk_state); 4251 4252 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4253 cdclk_state); 4254 4255 return 0; 4256 } 4257 4258 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4259 struct intel_crtc *crtc) 4260 { 4261 struct intel_display *display = to_intel_display(crtc); 4262 struct intel_crtc_state *crtc_state = 4263 intel_atomic_get_new_crtc_state(state, crtc); 4264 int ret; 4265 4266 if (DISPLAY_VER(display) < 5 && !display->platform.g4x && 4267 intel_crtc_needs_modeset(crtc_state) && 4268 !crtc_state->hw.active) 4269 crtc_state->update_wm_post = true; 4270 4271 if (intel_crtc_needs_modeset(crtc_state)) { 4272 ret = intel_dpll_crtc_get_dpll(state, crtc); 4273 if (ret) 4274 return ret; 4275 } 4276 4277 ret = intel_color_check(state, crtc); 4278 if (ret) 4279 return ret; 4280 4281 ret = intel_wm_compute(state, crtc); 4282 if (ret) { 4283 drm_dbg_kms(display->drm, 4284 "[CRTC:%d:%s] watermarks are invalid\n", 4285 crtc->base.base.id, crtc->base.name); 4286 return ret; 4287 } 4288 4289 ret = intel_casf_compute_config(crtc_state); 4290 if (ret) 4291 return ret; 4292 4293 if (DISPLAY_VER(display) >= 9) { 4294 if (intel_crtc_needs_modeset(crtc_state) || 4295 intel_crtc_needs_fastset(crtc_state) || 4296 intel_casf_needs_scaler(crtc_state)) { 4297 ret = skl_update_scaler_crtc(crtc_state); 4298 if (ret) 4299 return ret; 4300 } 4301 4302 ret = intel_atomic_setup_scalers(state, crtc); 4303 if (ret) 4304 return ret; 4305 } 4306 4307 if (HAS_IPS(display)) { 4308 ret = hsw_ips_compute_config(state, crtc); 4309 if (ret) 4310 return ret; 4311 } 4312 4313 if (DISPLAY_VER(display) >= 9 || 4314 display->platform.broadwell || display->platform.haswell) { 4315 ret = hsw_compute_linetime_wm(state, crtc); 4316 if (ret) 4317 return ret; 4318 4319 } 4320 4321 ret = intel_psr2_sel_fetch_update(state, crtc); 4322 if (ret) 4323 return ret; 4324 4325 return 0; 4326 } 4327 4328 static int 4329 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 4330 struct intel_crtc_state *crtc_state) 4331 { 4332 struct intel_display *display = to_intel_display(crtc_state); 4333 struct drm_connector *connector = conn_state->connector; 4334 const struct drm_display_info *info = &connector->display_info; 4335 int bpp; 4336 4337 switch (conn_state->max_bpc) { 4338 case 6 ... 7: 4339 bpp = 6 * 3; 4340 break; 4341 case 8 ... 9: 4342 bpp = 8 * 3; 4343 break; 4344 case 10 ... 11: 4345 bpp = 10 * 3; 4346 break; 4347 case 12 ... 16: 4348 bpp = 12 * 3; 4349 break; 4350 default: 4351 MISSING_CASE(conn_state->max_bpc); 4352 return -EINVAL; 4353 } 4354 4355 if (bpp < crtc_state->pipe_bpp) { 4356 drm_dbg_kms(display->drm, 4357 "[CONNECTOR:%d:%s] Limiting display bpp to %d " 4358 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", 4359 connector->base.id, connector->name, 4360 bpp, 3 * info->bpc, 4361 3 * conn_state->max_requested_bpc, 4362 crtc_state->pipe_bpp); 4363 4364 crtc_state->pipe_bpp = bpp; 4365 } 4366 4367 return 0; 4368 } 4369 4370 int intel_display_min_pipe_bpp(void) 4371 { 4372 return 6 * 3; 4373 } 4374 4375 int intel_display_max_pipe_bpp(struct intel_display *display) 4376 { 4377 if (display->platform.g4x || display->platform.valleyview || 4378 display->platform.cherryview) 4379 return 10*3; 4380 else if (DISPLAY_VER(display) >= 5) 4381 return 12*3; 4382 else 4383 return 8*3; 4384 } 4385 4386 static int 4387 compute_baseline_pipe_bpp(struct intel_atomic_state *state, 4388 struct intel_crtc *crtc) 4389 { 4390 struct intel_display *display = to_intel_display(crtc); 4391 struct intel_crtc_state *crtc_state = 4392 intel_atomic_get_new_crtc_state(state, crtc); 4393 struct drm_connector *connector; 4394 struct drm_connector_state *connector_state; 4395 int i; 4396 4397 crtc_state->pipe_bpp = intel_display_max_pipe_bpp(display); 4398 4399 /* Clamp display bpp to connector max bpp */ 4400 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4401 int ret; 4402 4403 if (connector_state->crtc != &crtc->base) 4404 continue; 4405 4406 ret = compute_sink_pipe_bpp(connector_state, crtc_state); 4407 if (ret) 4408 return ret; 4409 } 4410 4411 return 0; 4412 } 4413 4414 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 4415 { 4416 struct intel_display *display = to_intel_display(state); 4417 struct drm_connector *connector; 4418 struct drm_connector_list_iter conn_iter; 4419 unsigned int used_ports = 0; 4420 unsigned int used_mst_ports = 0; 4421 bool ret = true; 4422 4423 /* 4424 * We're going to peek into connector->state, 4425 * hence connection_mutex must be held. 4426 */ 4427 drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex); 4428 4429 /* 4430 * Walk the connector list instead of the encoder 4431 * list to detect the problem on ddi platforms 4432 * where there's just one encoder per digital port. 4433 */ 4434 drm_connector_list_iter_begin(display->drm, &conn_iter); 4435 drm_for_each_connector_iter(connector, &conn_iter) { 4436 struct drm_connector_state *connector_state; 4437 struct intel_encoder *encoder; 4438 4439 connector_state = 4440 drm_atomic_get_new_connector_state(&state->base, 4441 connector); 4442 if (!connector_state) 4443 connector_state = connector->state; 4444 4445 if (!connector_state->best_encoder) 4446 continue; 4447 4448 encoder = to_intel_encoder(connector_state->best_encoder); 4449 4450 drm_WARN_ON(display->drm, !connector_state->crtc); 4451 4452 switch (encoder->type) { 4453 case INTEL_OUTPUT_DDI: 4454 if (drm_WARN_ON(display->drm, !HAS_DDI(display))) 4455 break; 4456 fallthrough; 4457 case INTEL_OUTPUT_DP: 4458 case INTEL_OUTPUT_HDMI: 4459 case INTEL_OUTPUT_EDP: 4460 /* the same port mustn't appear more than once */ 4461 if (used_ports & BIT(encoder->port)) 4462 ret = false; 4463 4464 used_ports |= BIT(encoder->port); 4465 break; 4466 case INTEL_OUTPUT_DP_MST: 4467 used_mst_ports |= 4468 1 << encoder->port; 4469 break; 4470 default: 4471 break; 4472 } 4473 } 4474 drm_connector_list_iter_end(&conn_iter); 4475 4476 /* can't mix MST and SST/HDMI on the same port */ 4477 if (used_ports & used_mst_ports) 4478 return false; 4479 4480 return ret; 4481 } 4482 4483 static void 4484 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 4485 struct intel_crtc *crtc) 4486 { 4487 struct intel_crtc_state *crtc_state = 4488 intel_atomic_get_new_crtc_state(state, crtc); 4489 4490 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4491 4492 drm_property_replace_blob(&crtc_state->hw.degamma_lut, 4493 crtc_state->uapi.degamma_lut); 4494 drm_property_replace_blob(&crtc_state->hw.gamma_lut, 4495 crtc_state->uapi.gamma_lut); 4496 drm_property_replace_blob(&crtc_state->hw.ctm, 4497 crtc_state->uapi.ctm); 4498 } 4499 4500 static void 4501 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 4502 struct intel_crtc *crtc) 4503 { 4504 struct intel_crtc_state *crtc_state = 4505 intel_atomic_get_new_crtc_state(state, crtc); 4506 4507 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state)); 4508 4509 crtc_state->hw.enable = crtc_state->uapi.enable; 4510 crtc_state->hw.active = crtc_state->uapi.active; 4511 drm_mode_copy(&crtc_state->hw.mode, 4512 &crtc_state->uapi.mode); 4513 drm_mode_copy(&crtc_state->hw.adjusted_mode, 4514 &crtc_state->uapi.adjusted_mode); 4515 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 4516 4517 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 4518 } 4519 4520 static void 4521 copy_joiner_crtc_state_nomodeset(struct intel_atomic_state *state, 4522 struct intel_crtc *secondary_crtc) 4523 { 4524 struct intel_crtc_state *secondary_crtc_state = 4525 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4526 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4527 const struct intel_crtc_state *primary_crtc_state = 4528 intel_atomic_get_new_crtc_state(state, primary_crtc); 4529 4530 drm_property_replace_blob(&secondary_crtc_state->hw.degamma_lut, 4531 primary_crtc_state->hw.degamma_lut); 4532 drm_property_replace_blob(&secondary_crtc_state->hw.gamma_lut, 4533 primary_crtc_state->hw.gamma_lut); 4534 drm_property_replace_blob(&secondary_crtc_state->hw.ctm, 4535 primary_crtc_state->hw.ctm); 4536 4537 secondary_crtc_state->uapi.color_mgmt_changed = primary_crtc_state->uapi.color_mgmt_changed; 4538 } 4539 4540 static int 4541 copy_joiner_crtc_state_modeset(struct intel_atomic_state *state, 4542 struct intel_crtc *secondary_crtc) 4543 { 4544 struct intel_crtc_state *secondary_crtc_state = 4545 intel_atomic_get_new_crtc_state(state, secondary_crtc); 4546 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state); 4547 const struct intel_crtc_state *primary_crtc_state = 4548 intel_atomic_get_new_crtc_state(state, primary_crtc); 4549 struct intel_crtc_state *saved_state; 4550 4551 WARN_ON(primary_crtc_state->joiner_pipes != 4552 secondary_crtc_state->joiner_pipes); 4553 4554 saved_state = kmemdup(primary_crtc_state, sizeof(*saved_state), GFP_KERNEL); 4555 if (!saved_state) 4556 return -ENOMEM; 4557 4558 /* preserve some things from the slave's original crtc state */ 4559 saved_state->uapi = secondary_crtc_state->uapi; 4560 saved_state->scaler_state = secondary_crtc_state->scaler_state; 4561 saved_state->intel_dpll = secondary_crtc_state->intel_dpll; 4562 saved_state->crc_enabled = secondary_crtc_state->crc_enabled; 4563 4564 intel_crtc_free_hw_state(secondary_crtc_state); 4565 if (secondary_crtc_state->dp_tunnel_ref.tunnel) 4566 drm_dp_tunnel_ref_put(&secondary_crtc_state->dp_tunnel_ref); 4567 memcpy(secondary_crtc_state, saved_state, sizeof(*secondary_crtc_state)); 4568 kfree(saved_state); 4569 4570 /* Re-init hw state */ 4571 memset(&secondary_crtc_state->hw, 0, sizeof(secondary_crtc_state->hw)); 4572 secondary_crtc_state->hw.enable = primary_crtc_state->hw.enable; 4573 secondary_crtc_state->hw.active = primary_crtc_state->hw.active; 4574 drm_mode_copy(&secondary_crtc_state->hw.mode, 4575 &primary_crtc_state->hw.mode); 4576 drm_mode_copy(&secondary_crtc_state->hw.pipe_mode, 4577 &primary_crtc_state->hw.pipe_mode); 4578 drm_mode_copy(&secondary_crtc_state->hw.adjusted_mode, 4579 &primary_crtc_state->hw.adjusted_mode); 4580 secondary_crtc_state->hw.scaling_filter = primary_crtc_state->hw.scaling_filter; 4581 4582 if (primary_crtc_state->dp_tunnel_ref.tunnel) 4583 drm_dp_tunnel_ref_get(primary_crtc_state->dp_tunnel_ref.tunnel, 4584 &secondary_crtc_state->dp_tunnel_ref); 4585 4586 copy_joiner_crtc_state_nomodeset(state, secondary_crtc); 4587 4588 secondary_crtc_state->uapi.mode_changed = primary_crtc_state->uapi.mode_changed; 4589 secondary_crtc_state->uapi.connectors_changed = primary_crtc_state->uapi.connectors_changed; 4590 secondary_crtc_state->uapi.active_changed = primary_crtc_state->uapi.active_changed; 4591 4592 WARN_ON(primary_crtc_state->joiner_pipes != 4593 secondary_crtc_state->joiner_pipes); 4594 4595 return 0; 4596 } 4597 4598 static int 4599 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 4600 struct intel_crtc *crtc) 4601 { 4602 struct intel_display *display = to_intel_display(state); 4603 struct intel_crtc_state *crtc_state = 4604 intel_atomic_get_new_crtc_state(state, crtc); 4605 struct intel_crtc_state *saved_state; 4606 4607 saved_state = intel_crtc_state_alloc(crtc); 4608 if (!saved_state) 4609 return -ENOMEM; 4610 4611 /* free the old crtc_state->hw members */ 4612 intel_crtc_free_hw_state(crtc_state); 4613 4614 intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state); 4615 4616 /* FIXME: before the switch to atomic started, a new pipe_config was 4617 * kzalloc'd. Code that depends on any field being zero should be 4618 * fixed, so that the crtc_state can be safely duplicated. For now, 4619 * only fields that are know to not cause problems are preserved. */ 4620 4621 saved_state->uapi = crtc_state->uapi; 4622 saved_state->inherited = crtc_state->inherited; 4623 saved_state->scaler_state = crtc_state->scaler_state; 4624 saved_state->intel_dpll = crtc_state->intel_dpll; 4625 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 4626 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 4627 sizeof(saved_state->icl_port_dplls)); 4628 saved_state->crc_enabled = crtc_state->crc_enabled; 4629 if (display->platform.g4x || 4630 display->platform.valleyview || display->platform.cherryview) 4631 saved_state->wm = crtc_state->wm; 4632 4633 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 4634 kfree(saved_state); 4635 4636 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 4637 4638 return 0; 4639 } 4640 4641 static int 4642 intel_modeset_pipe_config(struct intel_atomic_state *state, 4643 struct intel_crtc *crtc, 4644 const struct intel_link_bw_limits *limits) 4645 { 4646 struct intel_display *display = to_intel_display(crtc); 4647 struct intel_crtc_state *crtc_state = 4648 intel_atomic_get_new_crtc_state(state, crtc); 4649 struct drm_connector *connector; 4650 struct drm_connector_state *connector_state; 4651 int pipe_src_w, pipe_src_h; 4652 int base_bpp, ret, i; 4653 4654 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; 4655 4656 crtc_state->framestart_delay = 1; 4657 4658 /* 4659 * Sanitize sync polarity flags based on requested ones. If neither 4660 * positive or negative polarity is requested, treat this as meaning 4661 * negative polarity. 4662 */ 4663 if (!(crtc_state->hw.adjusted_mode.flags & 4664 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 4665 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 4666 4667 if (!(crtc_state->hw.adjusted_mode.flags & 4668 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 4669 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 4670 4671 ret = compute_baseline_pipe_bpp(state, crtc); 4672 if (ret) 4673 return ret; 4674 4675 crtc_state->dsc.compression_enabled_on_link = limits->link_dsc_pipes & BIT(crtc->pipe); 4676 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; 4677 4678 if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) { 4679 drm_dbg_kms(display->drm, 4680 "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n", 4681 crtc->base.base.id, crtc->base.name, 4682 FXP_Q4_ARGS(crtc_state->max_link_bpp_x16)); 4683 crtc_state->bw_constrained = true; 4684 } 4685 4686 base_bpp = crtc_state->pipe_bpp; 4687 4688 /* 4689 * Determine the real pipe dimensions. Note that stereo modes can 4690 * increase the actual pipe size due to the frame doubling and 4691 * insertion of additional space for blanks between the frame. This 4692 * is stored in the crtc timings. We use the requested mode to do this 4693 * computation to clearly distinguish it from the adjusted mode, which 4694 * can be changed by the connectors in the below retry loop. 4695 */ 4696 drm_mode_get_hv_timing(&crtc_state->hw.mode, 4697 &pipe_src_w, &pipe_src_h); 4698 drm_rect_init(&crtc_state->pipe_src, 0, 0, 4699 pipe_src_w, pipe_src_h); 4700 4701 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4702 struct intel_encoder *encoder = 4703 to_intel_encoder(connector_state->best_encoder); 4704 4705 if (connector_state->crtc != &crtc->base) 4706 continue; 4707 4708 if (!check_single_encoder_cloning(state, crtc, encoder)) { 4709 drm_dbg_kms(display->drm, 4710 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", 4711 encoder->base.base.id, encoder->base.name); 4712 return -EINVAL; 4713 } 4714 4715 /* 4716 * Determine output_types before calling the .compute_config() 4717 * hooks so that the hooks can use this information safely. 4718 */ 4719 if (encoder->compute_output_type) 4720 crtc_state->output_types |= 4721 BIT(encoder->compute_output_type(encoder, crtc_state, 4722 connector_state)); 4723 else 4724 crtc_state->output_types |= BIT(encoder->type); 4725 } 4726 4727 /* Ensure the port clock defaults are reset when retrying. */ 4728 crtc_state->port_clock = 0; 4729 crtc_state->pixel_multiplier = 1; 4730 4731 /* Fill in default crtc timings, allow encoders to overwrite them. */ 4732 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, 4733 CRTC_STEREO_DOUBLE); 4734 4735 /* Pass our mode to the connectors and the CRTC to give them a chance to 4736 * adjust it according to limitations or connector properties, and also 4737 * a chance to reject the mode entirely. 4738 */ 4739 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4740 struct intel_encoder *encoder = 4741 to_intel_encoder(connector_state->best_encoder); 4742 4743 if (connector_state->crtc != &crtc->base) 4744 continue; 4745 4746 ret = encoder->compute_config(encoder, crtc_state, 4747 connector_state); 4748 if (ret == -EDEADLK) 4749 return ret; 4750 if (ret < 0) { 4751 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] config failure: %d\n", 4752 encoder->base.base.id, encoder->base.name, ret); 4753 return ret; 4754 } 4755 } 4756 4757 /* Set default port clock if not overwritten by the encoder. Needs to be 4758 * done afterwards in case the encoder adjusts the mode. */ 4759 if (!crtc_state->port_clock) 4760 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock 4761 * crtc_state->pixel_multiplier; 4762 4763 ret = intel_crtc_compute_config(state, crtc); 4764 if (ret == -EDEADLK) 4765 return ret; 4766 if (ret < 0) { 4767 drm_dbg_kms(display->drm, "[CRTC:%d:%s] config failure: %d\n", 4768 crtc->base.base.id, crtc->base.name, ret); 4769 return ret; 4770 } 4771 4772 /* Dithering seems to not pass-through bits correctly when it should, so 4773 * only enable it on 6bpc panels and when its not a compliance 4774 * test requesting 6bpc video pattern. 4775 */ 4776 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && 4777 !crtc_state->dither_force_disable; 4778 drm_dbg_kms(display->drm, 4779 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 4780 crtc->base.base.id, crtc->base.name, 4781 base_bpp, crtc_state->pipe_bpp, crtc_state->dither); 4782 4783 return 0; 4784 } 4785 4786 static int 4787 intel_modeset_pipe_config_late(struct intel_atomic_state *state, 4788 struct intel_crtc *crtc) 4789 { 4790 struct intel_crtc_state *crtc_state = 4791 intel_atomic_get_new_crtc_state(state, crtc); 4792 struct drm_connector_state *conn_state; 4793 struct drm_connector *connector; 4794 int i; 4795 4796 for_each_new_connector_in_state(&state->base, connector, 4797 conn_state, i) { 4798 struct intel_encoder *encoder = 4799 to_intel_encoder(conn_state->best_encoder); 4800 int ret; 4801 4802 if (conn_state->crtc != &crtc->base || 4803 !encoder->compute_config_late) 4804 continue; 4805 4806 ret = encoder->compute_config_late(encoder, crtc_state, 4807 conn_state); 4808 if (ret) 4809 return ret; 4810 } 4811 4812 return 0; 4813 } 4814 4815 bool intel_fuzzy_clock_check(int clock1, int clock2) 4816 { 4817 int diff; 4818 4819 if (clock1 == clock2) 4820 return true; 4821 4822 if (!clock1 || !clock2) 4823 return false; 4824 4825 diff = abs(clock1 - clock2); 4826 4827 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 4828 return true; 4829 4830 return false; 4831 } 4832 4833 static bool 4834 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 4835 const struct intel_link_m_n *m2_n2) 4836 { 4837 return m_n->tu == m2_n2->tu && 4838 m_n->data_m == m2_n2->data_m && 4839 m_n->data_n == m2_n2->data_n && 4840 m_n->link_m == m2_n2->link_m && 4841 m_n->link_n == m2_n2->link_n; 4842 } 4843 4844 static bool 4845 intel_compare_infoframe(const union hdmi_infoframe *a, 4846 const union hdmi_infoframe *b) 4847 { 4848 return memcmp(a, b, sizeof(*a)) == 0; 4849 } 4850 4851 static bool 4852 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 4853 const struct drm_dp_vsc_sdp *b) 4854 { 4855 return a->pixelformat == b->pixelformat && 4856 a->colorimetry == b->colorimetry && 4857 a->bpc == b->bpc && 4858 a->dynamic_range == b->dynamic_range && 4859 a->content_type == b->content_type; 4860 } 4861 4862 static bool 4863 intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a, 4864 const struct drm_dp_as_sdp *b) 4865 { 4866 return a->vtotal == b->vtotal && 4867 a->target_rr == b->target_rr && 4868 a->duration_incr_ms == b->duration_incr_ms && 4869 a->duration_decr_ms == b->duration_decr_ms && 4870 a->mode == b->mode; 4871 } 4872 4873 static bool 4874 intel_compare_buffer(const u8 *a, const u8 *b, size_t len) 4875 { 4876 return memcmp(a, b, len) == 0; 4877 } 4878 4879 static void __printf(5, 6) 4880 pipe_config_mismatch(struct drm_printer *p, bool fastset, 4881 const struct intel_crtc *crtc, 4882 const char *name, const char *format, ...) 4883 { 4884 struct va_format vaf; 4885 va_list args; 4886 4887 va_start(args, format); 4888 vaf.fmt = format; 4889 vaf.va = &args; 4890 4891 if (fastset) 4892 drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", 4893 crtc->base.base.id, crtc->base.name, name, &vaf); 4894 else 4895 drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n", 4896 crtc->base.base.id, crtc->base.name, name, &vaf); 4897 4898 va_end(args); 4899 } 4900 4901 static void 4902 pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset, 4903 const struct intel_crtc *crtc, 4904 const char *name, 4905 const union hdmi_infoframe *a, 4906 const union hdmi_infoframe *b) 4907 { 4908 struct intel_display *display = to_intel_display(crtc); 4909 const char *loglevel; 4910 4911 if (fastset) { 4912 if (!drm_debug_enabled(DRM_UT_KMS)) 4913 return; 4914 4915 loglevel = KERN_DEBUG; 4916 } else { 4917 loglevel = KERN_ERR; 4918 } 4919 4920 pipe_config_mismatch(p, fastset, crtc, name, "infoframe"); 4921 4922 drm_printf(p, "expected:\n"); 4923 hdmi_infoframe_log(loglevel, display->drm->dev, a); 4924 drm_printf(p, "found:\n"); 4925 hdmi_infoframe_log(loglevel, display->drm->dev, b); 4926 } 4927 4928 static void 4929 pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset, 4930 const struct intel_crtc *crtc, 4931 const char *name, 4932 const struct drm_dp_vsc_sdp *a, 4933 const struct drm_dp_vsc_sdp *b) 4934 { 4935 pipe_config_mismatch(p, fastset, crtc, name, "dp vsc sdp"); 4936 4937 drm_printf(p, "expected:\n"); 4938 drm_dp_vsc_sdp_log(p, a); 4939 drm_printf(p, "found:\n"); 4940 drm_dp_vsc_sdp_log(p, b); 4941 } 4942 4943 static void 4944 pipe_config_dp_as_sdp_mismatch(struct drm_printer *p, bool fastset, 4945 const struct intel_crtc *crtc, 4946 const char *name, 4947 const struct drm_dp_as_sdp *a, 4948 const struct drm_dp_as_sdp *b) 4949 { 4950 pipe_config_mismatch(p, fastset, crtc, name, "dp as sdp"); 4951 4952 drm_printf(p, "expected:\n"); 4953 drm_dp_as_sdp_log(p, a); 4954 drm_printf(p, "found:\n"); 4955 drm_dp_as_sdp_log(p, b); 4956 } 4957 4958 /* Returns the length up to and including the last differing byte */ 4959 static size_t 4960 memcmp_diff_len(const u8 *a, const u8 *b, size_t len) 4961 { 4962 int i; 4963 4964 for (i = len - 1; i >= 0; i--) { 4965 if (a[i] != b[i]) 4966 return i + 1; 4967 } 4968 4969 return 0; 4970 } 4971 4972 static void 4973 pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset, 4974 const struct intel_crtc *crtc, 4975 const char *name, 4976 const u8 *a, const u8 *b, size_t len) 4977 { 4978 pipe_config_mismatch(p, fastset, crtc, name, "buffer"); 4979 4980 /* only dump up to the last difference */ 4981 len = memcmp_diff_len(a, b, len); 4982 4983 drm_print_hex_dump(p, "expected: ", a, len); 4984 drm_print_hex_dump(p, "found: ", b, len); 4985 } 4986 4987 static void 4988 pipe_config_pll_mismatch(struct drm_printer *p, bool fastset, 4989 const struct intel_crtc *crtc, 4990 const char *name, 4991 const struct intel_dpll_hw_state *a, 4992 const struct intel_dpll_hw_state *b) 4993 { 4994 struct intel_display *display = to_intel_display(crtc); 4995 4996 pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */ 4997 4998 drm_printf(p, "expected:\n"); 4999 intel_dpll_dump_hw_state(display, p, a); 5000 drm_printf(p, "found:\n"); 5001 intel_dpll_dump_hw_state(display, p, b); 5002 } 5003 5004 static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_state) 5005 { 5006 struct intel_display *display = to_intel_display(old_crtc_state); 5007 5008 /* 5009 * Allow fastboot to fix up vblank delay (handled via LRR 5010 * codepaths), a bit dodgy as the registers aren't 5011 * double buffered but seems to be working more or less... 5012 * 5013 * Also allow this when the VRR timing generator is always on, 5014 * and optimized guardband is used. In such cases, 5015 * vblank delay may vary even without inherited state, but it's 5016 * still safe as VRR guardband is still same. 5017 */ 5018 return HAS_LRR(display) && 5019 (old_crtc_state->inherited || intel_vrr_always_use_vrr_tg(display)) && 5020 !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI); 5021 } 5022 5023 static void 5024 pipe_config_lt_phy_pll_mismatch(struct drm_printer *p, bool fastset, 5025 const struct intel_crtc *crtc, 5026 const char *name, 5027 const struct intel_lt_phy_pll_state *a, 5028 const struct intel_lt_phy_pll_state *b) 5029 { 5030 struct intel_display *display = to_intel_display(crtc); 5031 char *chipname = "LTPHY"; 5032 5033 pipe_config_mismatch(p, fastset, crtc, name, chipname); 5034 5035 drm_printf(p, "expected:\n"); 5036 intel_lt_phy_dump_hw_state(display, a); 5037 drm_printf(p, "found:\n"); 5038 intel_lt_phy_dump_hw_state(display, b); 5039 } 5040 5041 bool 5042 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 5043 const struct intel_crtc_state *pipe_config, 5044 bool fastset) 5045 { 5046 struct intel_display *display = to_intel_display(current_config); 5047 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 5048 struct drm_printer p; 5049 u32 exclude_infoframes = 0; 5050 bool ret = true; 5051 5052 if (fastset) 5053 p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL); 5054 else 5055 p = drm_err_printer(display->drm, NULL); 5056 5057 #define PIPE_CONF_CHECK_X(name) do { \ 5058 if (current_config->name != pipe_config->name) { \ 5059 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5060 __stringify(name) " is bool"); \ 5061 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5062 "(expected 0x%08x, found 0x%08x)", \ 5063 current_config->name, \ 5064 pipe_config->name); \ 5065 ret = false; \ 5066 } \ 5067 } while (0) 5068 5069 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 5070 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 5071 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5072 __stringify(name) " is bool"); \ 5073 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5074 "(expected 0x%08x, found 0x%08x)", \ 5075 current_config->name & (mask), \ 5076 pipe_config->name & (mask)); \ 5077 ret = false; \ 5078 } \ 5079 } while (0) 5080 5081 #define PIPE_CONF_CHECK_I(name) do { \ 5082 if (current_config->name != pipe_config->name) { \ 5083 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 5084 __stringify(name) " is bool"); \ 5085 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5086 "(expected %i, found %i)", \ 5087 current_config->name, \ 5088 pipe_config->name); \ 5089 ret = false; \ 5090 } \ 5091 } while (0) 5092 5093 #define PIPE_CONF_CHECK_LLI(name) do { \ 5094 if (current_config->name != pipe_config->name) { \ 5095 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5096 "(expected %lli, found %lli)", \ 5097 current_config->name, \ 5098 pipe_config->name); \ 5099 ret = false; \ 5100 } \ 5101 } while (0) 5102 5103 #define PIPE_CONF_CHECK_BOOL(name) do { \ 5104 if (current_config->name != pipe_config->name) { \ 5105 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ 5106 __stringify(name) " is not bool"); \ 5107 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5108 "(expected %s, found %s)", \ 5109 str_yes_no(current_config->name), \ 5110 str_yes_no(pipe_config->name)); \ 5111 ret = false; \ 5112 } \ 5113 } while (0) 5114 5115 #define PIPE_CONF_CHECK_P(name) do { \ 5116 if (current_config->name != pipe_config->name) { \ 5117 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5118 "(expected %p, found %p)", \ 5119 current_config->name, \ 5120 pipe_config->name); \ 5121 ret = false; \ 5122 } \ 5123 } while (0) 5124 5125 #define PIPE_CONF_CHECK_M_N(name) do { \ 5126 if (!intel_compare_link_m_n(¤t_config->name, \ 5127 &pipe_config->name)) { \ 5128 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5129 "(expected tu %i data %i/%i link %i/%i, " \ 5130 "found tu %i, data %i/%i link %i/%i)", \ 5131 current_config->name.tu, \ 5132 current_config->name.data_m, \ 5133 current_config->name.data_n, \ 5134 current_config->name.link_m, \ 5135 current_config->name.link_n, \ 5136 pipe_config->name.tu, \ 5137 pipe_config->name.data_m, \ 5138 pipe_config->name.data_n, \ 5139 pipe_config->name.link_m, \ 5140 pipe_config->name.link_n); \ 5141 ret = false; \ 5142 } \ 5143 } while (0) 5144 5145 #define PIPE_CONF_CHECK_PLL(name) do { \ 5146 if (!intel_dpll_compare_hw_state(display, ¤t_config->name, \ 5147 &pipe_config->name)) { \ 5148 pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5149 ¤t_config->name, \ 5150 &pipe_config->name); \ 5151 ret = false; \ 5152 } \ 5153 } while (0) 5154 5155 #define PIPE_CONF_CHECK_PLL_LT(name) do { \ 5156 if (!intel_lt_phy_pll_compare_hw_state(¤t_config->name, \ 5157 &pipe_config->name)) { \ 5158 pipe_config_lt_phy_pll_mismatch(&p, fastset, crtc, __stringify(name), \ 5159 ¤t_config->name, \ 5160 &pipe_config->name); \ 5161 ret = false; \ 5162 } \ 5163 } while (0) 5164 5165 #define PIPE_CONF_CHECK_TIMINGS(name) do { \ 5166 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ 5167 PIPE_CONF_CHECK_I(name.crtc_htotal); \ 5168 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ 5169 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ 5170 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ 5171 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ 5172 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ 5173 if (!fastset || !allow_vblank_delay_fastset(current_config)) \ 5174 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ 5175 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ 5176 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ 5177 if (!fastset || !pipe_config->update_lrr) { \ 5178 PIPE_CONF_CHECK_I(name.crtc_vtotal); \ 5179 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ 5180 } \ 5181 } while (0) 5182 5183 #define PIPE_CONF_CHECK_RECT(name) do { \ 5184 PIPE_CONF_CHECK_I(name.x1); \ 5185 PIPE_CONF_CHECK_I(name.x2); \ 5186 PIPE_CONF_CHECK_I(name.y1); \ 5187 PIPE_CONF_CHECK_I(name.y2); \ 5188 } while (0) 5189 5190 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 5191 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 5192 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ 5193 "(%x) (expected %i, found %i)", \ 5194 (mask), \ 5195 current_config->name & (mask), \ 5196 pipe_config->name & (mask)); \ 5197 ret = false; \ 5198 } \ 5199 } while (0) 5200 5201 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 5202 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 5203 &pipe_config->infoframes.name)) { \ 5204 pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \ 5205 ¤t_config->infoframes.name, \ 5206 &pipe_config->infoframes.name); \ 5207 ret = false; \ 5208 } \ 5209 } while (0) 5210 5211 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 5212 if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 5213 &pipe_config->infoframes.name)) { \ 5214 pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5215 ¤t_config->infoframes.name, \ 5216 &pipe_config->infoframes.name); \ 5217 ret = false; \ 5218 } \ 5219 } while (0) 5220 5221 #define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \ 5222 if (!intel_compare_dp_as_sdp(¤t_config->infoframes.name, \ 5223 &pipe_config->infoframes.name)) { \ 5224 pipe_config_dp_as_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ 5225 ¤t_config->infoframes.name, \ 5226 &pipe_config->infoframes.name); \ 5227 ret = false; \ 5228 } \ 5229 } while (0) 5230 5231 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \ 5232 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ 5233 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ 5234 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ 5235 pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \ 5236 current_config->name, \ 5237 pipe_config->name, \ 5238 (len)); \ 5239 ret = false; \ 5240 } \ 5241 } while (0) 5242 5243 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \ 5244 if (current_config->gamma_mode == pipe_config->gamma_mode && \ 5245 !intel_color_lut_equal(current_config, \ 5246 current_config->lut, pipe_config->lut, \ 5247 is_pre_csc_lut)) { \ 5248 pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \ 5249 "hw_state doesn't match sw_state"); \ 5250 ret = false; \ 5251 } \ 5252 } while (0) 5253 5254 #define PIPE_CONF_CHECK_CSC(name) do { \ 5255 PIPE_CONF_CHECK_X(name.preoff[0]); \ 5256 PIPE_CONF_CHECK_X(name.preoff[1]); \ 5257 PIPE_CONF_CHECK_X(name.preoff[2]); \ 5258 PIPE_CONF_CHECK_X(name.coeff[0]); \ 5259 PIPE_CONF_CHECK_X(name.coeff[1]); \ 5260 PIPE_CONF_CHECK_X(name.coeff[2]); \ 5261 PIPE_CONF_CHECK_X(name.coeff[3]); \ 5262 PIPE_CONF_CHECK_X(name.coeff[4]); \ 5263 PIPE_CONF_CHECK_X(name.coeff[5]); \ 5264 PIPE_CONF_CHECK_X(name.coeff[6]); \ 5265 PIPE_CONF_CHECK_X(name.coeff[7]); \ 5266 PIPE_CONF_CHECK_X(name.coeff[8]); \ 5267 PIPE_CONF_CHECK_X(name.postoff[0]); \ 5268 PIPE_CONF_CHECK_X(name.postoff[1]); \ 5269 PIPE_CONF_CHECK_X(name.postoff[2]); \ 5270 } while (0) 5271 5272 #define PIPE_CONF_QUIRK(quirk) \ 5273 ((current_config->quirks | pipe_config->quirks) & (quirk)) 5274 5275 PIPE_CONF_CHECK_BOOL(hw.enable); 5276 PIPE_CONF_CHECK_BOOL(hw.active); 5277 5278 PIPE_CONF_CHECK_I(cpu_transcoder); 5279 PIPE_CONF_CHECK_I(mst_master_transcoder); 5280 5281 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 5282 PIPE_CONF_CHECK_I(fdi_lanes); 5283 PIPE_CONF_CHECK_M_N(fdi_m_n); 5284 5285 PIPE_CONF_CHECK_I(lane_count); 5286 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 5287 5288 PIPE_CONF_CHECK_I(min_hblank); 5289 5290 if (HAS_DOUBLE_BUFFERED_M_N(display)) { 5291 if (!fastset || !pipe_config->update_m_n) 5292 PIPE_CONF_CHECK_M_N(dp_m_n); 5293 } else { 5294 PIPE_CONF_CHECK_M_N(dp_m_n); 5295 PIPE_CONF_CHECK_M_N(dp_m2_n2); 5296 } 5297 5298 PIPE_CONF_CHECK_X(output_types); 5299 5300 PIPE_CONF_CHECK_I(framestart_delay); 5301 PIPE_CONF_CHECK_I(msa_timing_delay); 5302 5303 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); 5304 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); 5305 5306 PIPE_CONF_CHECK_I(pixel_multiplier); 5307 5308 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5309 DRM_MODE_FLAG_INTERLACE); 5310 5311 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 5312 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5313 DRM_MODE_FLAG_PHSYNC); 5314 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5315 DRM_MODE_FLAG_NHSYNC); 5316 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5317 DRM_MODE_FLAG_PVSYNC); 5318 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5319 DRM_MODE_FLAG_NVSYNC); 5320 } 5321 5322 PIPE_CONF_CHECK_I(output_format); 5323 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 5324 if ((DISPLAY_VER(display) < 8 && !display->platform.haswell) || 5325 display->platform.valleyview || display->platform.cherryview) 5326 PIPE_CONF_CHECK_BOOL(limited_color_range); 5327 5328 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 5329 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 5330 PIPE_CONF_CHECK_BOOL(has_infoframe); 5331 PIPE_CONF_CHECK_BOOL(enhanced_framing); 5332 PIPE_CONF_CHECK_BOOL(fec_enable); 5333 5334 if (!fastset) { 5335 PIPE_CONF_CHECK_BOOL(has_audio); 5336 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5337 } 5338 5339 PIPE_CONF_CHECK_X(gmch_pfit.control); 5340 /* pfit ratios are autocomputed by the hw on gen4+ */ 5341 if (DISPLAY_VER(display) < 4) 5342 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 5343 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 5344 5345 /* 5346 * Changing the EDP transcoder input mux 5347 * (A_ONOFF vs. A_ON) requires a full modeset. 5348 */ 5349 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 5350 5351 if (!fastset) { 5352 PIPE_CONF_CHECK_RECT(pipe_src); 5353 5354 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 5355 PIPE_CONF_CHECK_RECT(pch_pfit.dst); 5356 5357 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 5358 PIPE_CONF_CHECK_I(pixel_rate); 5359 PIPE_CONF_CHECK_BOOL(hw.casf_params.casf_enable); 5360 PIPE_CONF_CHECK_I(hw.casf_params.win_size); 5361 PIPE_CONF_CHECK_I(hw.casf_params.strength); 5362 5363 PIPE_CONF_CHECK_X(gamma_mode); 5364 if (display->platform.cherryview) 5365 PIPE_CONF_CHECK_X(cgm_mode); 5366 else 5367 PIPE_CONF_CHECK_X(csc_mode); 5368 PIPE_CONF_CHECK_BOOL(gamma_enable); 5369 PIPE_CONF_CHECK_BOOL(csc_enable); 5370 PIPE_CONF_CHECK_BOOL(wgc_enable); 5371 5372 PIPE_CONF_CHECK_I(linetime); 5373 PIPE_CONF_CHECK_I(ips_linetime); 5374 5375 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); 5376 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); 5377 5378 PIPE_CONF_CHECK_CSC(csc); 5379 PIPE_CONF_CHECK_CSC(output_csc); 5380 } 5381 5382 PIPE_CONF_CHECK_BOOL(double_wide); 5383 5384 if (display->dpll.mgr) 5385 PIPE_CONF_CHECK_P(intel_dpll); 5386 5387 /* FIXME convert everything over the dpll_mgr */ 5388 if (display->dpll.mgr || HAS_GMCH(display)) 5389 PIPE_CONF_CHECK_PLL(dpll_hw_state); 5390 5391 /* FIXME convert MTL+ platforms over to dpll_mgr */ 5392 if (HAS_LT_PHY(display)) 5393 PIPE_CONF_CHECK_PLL_LT(dpll_hw_state.ltpll); 5394 5395 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 5396 PIPE_CONF_CHECK_X(dsi_pll.div); 5397 5398 if (display->platform.g4x || DISPLAY_VER(display) >= 5) 5399 PIPE_CONF_CHECK_I(pipe_bpp); 5400 5401 if (!fastset || !pipe_config->update_m_n) { 5402 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); 5403 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); 5404 } 5405 PIPE_CONF_CHECK_I(port_clock); 5406 5407 PIPE_CONF_CHECK_I(min_voltage_level); 5408 5409 if (current_config->has_psr || pipe_config->has_psr) 5410 exclude_infoframes |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 5411 5412 if (current_config->vrr.enable || pipe_config->vrr.enable) 5413 exclude_infoframes |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC); 5414 5415 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, ~exclude_infoframes); 5416 PIPE_CONF_CHECK_X(infoframes.gcp); 5417 PIPE_CONF_CHECK_INFOFRAME(avi); 5418 PIPE_CONF_CHECK_INFOFRAME(spd); 5419 PIPE_CONF_CHECK_INFOFRAME(hdmi); 5420 if (!fastset) { 5421 PIPE_CONF_CHECK_INFOFRAME(drm); 5422 PIPE_CONF_CHECK_DP_AS_SDP(as_sdp); 5423 } 5424 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 5425 5426 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 5427 PIPE_CONF_CHECK_I(master_transcoder); 5428 PIPE_CONF_CHECK_X(joiner_pipes); 5429 5430 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable); 5431 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb); 5432 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422); 5433 PIPE_CONF_CHECK_BOOL(dsc.config.native_422); 5434 PIPE_CONF_CHECK_BOOL(dsc.config.native_420); 5435 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable); 5436 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth); 5437 PIPE_CONF_CHECK_I(dsc.config.bits_per_component); 5438 PIPE_CONF_CHECK_I(dsc.config.pic_width); 5439 PIPE_CONF_CHECK_I(dsc.config.pic_height); 5440 PIPE_CONF_CHECK_I(dsc.config.slice_width); 5441 PIPE_CONF_CHECK_I(dsc.config.slice_height); 5442 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay); 5443 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay); 5444 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval); 5445 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval); 5446 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value); 5447 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset); 5448 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp); 5449 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp); 5450 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset); 5451 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset); 5452 PIPE_CONF_CHECK_I(dsc.config.initial_offset); 5453 PIPE_CONF_CHECK_I(dsc.config.final_offset); 5454 PIPE_CONF_CHECK_I(dsc.config.rc_model_size); 5455 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0); 5456 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1); 5457 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size); 5458 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset); 5459 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset); 5460 5461 PIPE_CONF_CHECK_BOOL(dsc.compression_enable); 5462 PIPE_CONF_CHECK_I(dsc.num_streams); 5463 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16); 5464 5465 PIPE_CONF_CHECK_BOOL(splitter.enable); 5466 PIPE_CONF_CHECK_I(splitter.link_count); 5467 PIPE_CONF_CHECK_I(splitter.pixel_overlap); 5468 5469 if (!fastset) { 5470 PIPE_CONF_CHECK_BOOL(vrr.enable); 5471 PIPE_CONF_CHECK_I(vrr.vmin); 5472 PIPE_CONF_CHECK_I(vrr.vmax); 5473 PIPE_CONF_CHECK_I(vrr.flipline); 5474 PIPE_CONF_CHECK_I(vrr.vsync_start); 5475 PIPE_CONF_CHECK_I(vrr.vsync_end); 5476 PIPE_CONF_CHECK_LLI(cmrr.cmrr_m); 5477 PIPE_CONF_CHECK_LLI(cmrr.cmrr_n); 5478 PIPE_CONF_CHECK_BOOL(cmrr.enable); 5479 PIPE_CONF_CHECK_I(vrr.dc_balance.vmin); 5480 PIPE_CONF_CHECK_I(vrr.dc_balance.vmax); 5481 PIPE_CONF_CHECK_I(vrr.dc_balance.guardband); 5482 PIPE_CONF_CHECK_I(vrr.dc_balance.slope); 5483 PIPE_CONF_CHECK_I(vrr.dc_balance.max_increase); 5484 PIPE_CONF_CHECK_I(vrr.dc_balance.max_decrease); 5485 PIPE_CONF_CHECK_I(vrr.dc_balance.vblank_target); 5486 } 5487 5488 if (!fastset || intel_vrr_always_use_vrr_tg(display)) { 5489 PIPE_CONF_CHECK_I(vrr.pipeline_full); 5490 PIPE_CONF_CHECK_I(vrr.guardband); 5491 } 5492 5493 PIPE_CONF_CHECK_I(set_context_latency); 5494 5495 #undef PIPE_CONF_CHECK_X 5496 #undef PIPE_CONF_CHECK_I 5497 #undef PIPE_CONF_CHECK_LLI 5498 #undef PIPE_CONF_CHECK_BOOL 5499 #undef PIPE_CONF_CHECK_P 5500 #undef PIPE_CONF_CHECK_FLAGS 5501 #undef PIPE_CONF_CHECK_COLOR_LUT 5502 #undef PIPE_CONF_CHECK_TIMINGS 5503 #undef PIPE_CONF_CHECK_RECT 5504 #undef PIPE_CONF_QUIRK 5505 5506 return ret; 5507 } 5508 5509 static void 5510 intel_verify_planes(struct intel_atomic_state *state) 5511 { 5512 struct intel_plane *plane; 5513 const struct intel_plane_state *plane_state; 5514 int i; 5515 5516 for_each_new_intel_plane_in_state(state, plane, 5517 plane_state, i) 5518 assert_plane(plane, plane_state->is_y_plane || 5519 plane_state->uapi.visible); 5520 } 5521 5522 static int intel_modeset_pipe(struct intel_atomic_state *state, 5523 struct intel_crtc_state *crtc_state, 5524 const char *reason) 5525 { 5526 struct intel_display *display = to_intel_display(state); 5527 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5528 int ret; 5529 5530 drm_dbg_kms(display->drm, "[CRTC:%d:%s] Full modeset due to %s\n", 5531 crtc->base.base.id, crtc->base.name, reason); 5532 5533 ret = drm_atomic_add_affected_connectors(&state->base, 5534 &crtc->base); 5535 if (ret) 5536 return ret; 5537 5538 ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc); 5539 if (ret) 5540 return ret; 5541 5542 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); 5543 if (ret) 5544 return ret; 5545 5546 ret = intel_plane_add_affected(state, crtc); 5547 if (ret) 5548 return ret; 5549 5550 crtc_state->uapi.mode_changed = true; 5551 5552 return 0; 5553 } 5554 5555 /** 5556 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes 5557 * @state: intel atomic state 5558 * @reason: the reason for the full modeset 5559 * @mask: mask of pipes to modeset 5560 * 5561 * Add pipes in @mask to @state and force a full modeset on the enabled ones 5562 * due to the description in @reason. 5563 * This function can be called only before new plane states are computed. 5564 * 5565 * Returns 0 in case of success, negative error code otherwise. 5566 */ 5567 int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state, 5568 const char *reason, u8 mask) 5569 { 5570 struct intel_display *display = to_intel_display(state); 5571 struct intel_crtc *crtc; 5572 5573 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mask) { 5574 struct intel_crtc_state *crtc_state; 5575 int ret; 5576 5577 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5578 if (IS_ERR(crtc_state)) 5579 return PTR_ERR(crtc_state); 5580 5581 if (!crtc_state->hw.enable || 5582 intel_crtc_needs_modeset(crtc_state)) 5583 continue; 5584 5585 ret = intel_modeset_pipe(state, crtc_state, reason); 5586 if (ret) 5587 return ret; 5588 } 5589 5590 return 0; 5591 } 5592 5593 static void 5594 intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state) 5595 { 5596 crtc_state->uapi.mode_changed = true; 5597 5598 crtc_state->update_pipe = false; 5599 crtc_state->update_m_n = false; 5600 crtc_state->update_lrr = false; 5601 } 5602 5603 /** 5604 * intel_modeset_all_pipes_late - force a full modeset on all pipes 5605 * @state: intel atomic state 5606 * @reason: the reason for the full modeset 5607 * 5608 * Add all pipes to @state and force a full modeset on the active ones due to 5609 * the description in @reason. 5610 * This function can be called only after new plane states are computed already. 5611 * 5612 * Returns 0 in case of success, negative error code otherwise. 5613 */ 5614 int intel_modeset_all_pipes_late(struct intel_atomic_state *state, 5615 const char *reason) 5616 { 5617 struct intel_display *display = to_intel_display(state); 5618 struct intel_crtc *crtc; 5619 5620 for_each_intel_crtc(display->drm, crtc) { 5621 struct intel_crtc_state *crtc_state; 5622 int ret; 5623 5624 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5625 if (IS_ERR(crtc_state)) 5626 return PTR_ERR(crtc_state); 5627 5628 if (!crtc_state->hw.active || 5629 intel_crtc_needs_modeset(crtc_state)) 5630 continue; 5631 5632 ret = intel_modeset_pipe(state, crtc_state, reason); 5633 if (ret) 5634 return ret; 5635 5636 intel_crtc_flag_modeset(crtc_state); 5637 5638 crtc_state->update_planes |= crtc_state->active_planes; 5639 crtc_state->async_flip_planes = 0; 5640 crtc_state->do_async_flip = false; 5641 } 5642 5643 return 0; 5644 } 5645 5646 int intel_modeset_commit_pipes(struct intel_display *display, 5647 u8 pipe_mask, 5648 struct drm_modeset_acquire_ctx *ctx) 5649 { 5650 struct drm_atomic_state *state; 5651 struct intel_crtc *crtc; 5652 int ret; 5653 5654 state = drm_atomic_state_alloc(display->drm); 5655 if (!state) 5656 return -ENOMEM; 5657 5658 state->acquire_ctx = ctx; 5659 to_intel_atomic_state(state)->internal = true; 5660 5661 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) { 5662 struct intel_crtc_state *crtc_state = 5663 intel_atomic_get_crtc_state(state, crtc); 5664 5665 if (IS_ERR(crtc_state)) { 5666 ret = PTR_ERR(crtc_state); 5667 goto out; 5668 } 5669 5670 crtc_state->uapi.connectors_changed = true; 5671 } 5672 5673 ret = drm_atomic_commit(state); 5674 out: 5675 drm_atomic_state_put(state); 5676 5677 return ret; 5678 } 5679 5680 /* 5681 * This implements the workaround described in the "notes" section of the mode 5682 * set sequence documentation. When going from no pipes or single pipe to 5683 * multiple pipes, and planes are enabled after the pipe, we need to wait at 5684 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 5685 */ 5686 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 5687 { 5688 struct intel_crtc_state *crtc_state; 5689 struct intel_crtc *crtc; 5690 struct intel_crtc_state *first_crtc_state = NULL; 5691 struct intel_crtc_state *other_crtc_state = NULL; 5692 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 5693 int i; 5694 5695 /* look at all crtc's that are going to be enabled in during modeset */ 5696 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5697 if (!crtc_state->hw.active || 5698 !intel_crtc_needs_modeset(crtc_state)) 5699 continue; 5700 5701 if (first_crtc_state) { 5702 other_crtc_state = crtc_state; 5703 break; 5704 } else { 5705 first_crtc_state = crtc_state; 5706 first_pipe = crtc->pipe; 5707 } 5708 } 5709 5710 /* No workaround needed? */ 5711 if (!first_crtc_state) 5712 return 0; 5713 5714 /* w/a possibly needed, check how many crtc's are already enabled. */ 5715 for_each_intel_crtc(state->base.dev, crtc) { 5716 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5717 if (IS_ERR(crtc_state)) 5718 return PTR_ERR(crtc_state); 5719 5720 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 5721 5722 if (!crtc_state->hw.active || 5723 intel_crtc_needs_modeset(crtc_state)) 5724 continue; 5725 5726 /* 2 or more enabled crtcs means no need for w/a */ 5727 if (enabled_pipe != INVALID_PIPE) 5728 return 0; 5729 5730 enabled_pipe = crtc->pipe; 5731 } 5732 5733 if (enabled_pipe != INVALID_PIPE) 5734 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 5735 else if (other_crtc_state) 5736 other_crtc_state->hsw_workaround_pipe = first_pipe; 5737 5738 return 0; 5739 } 5740 5741 u8 intel_calc_enabled_pipes(struct intel_atomic_state *state, 5742 u8 enabled_pipes) 5743 { 5744 const struct intel_crtc_state *crtc_state; 5745 struct intel_crtc *crtc; 5746 int i; 5747 5748 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5749 if (crtc_state->hw.enable) 5750 enabled_pipes |= BIT(crtc->pipe); 5751 else 5752 enabled_pipes &= ~BIT(crtc->pipe); 5753 } 5754 5755 return enabled_pipes; 5756 } 5757 5758 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 5759 u8 active_pipes) 5760 { 5761 const struct intel_crtc_state *crtc_state; 5762 struct intel_crtc *crtc; 5763 int i; 5764 5765 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5766 if (crtc_state->hw.active) 5767 active_pipes |= BIT(crtc->pipe); 5768 else 5769 active_pipes &= ~BIT(crtc->pipe); 5770 } 5771 5772 return active_pipes; 5773 } 5774 5775 static int intel_modeset_checks(struct intel_atomic_state *state) 5776 { 5777 struct intel_display *display = to_intel_display(state); 5778 5779 state->modeset = true; 5780 5781 if (display->platform.haswell) 5782 return hsw_mode_set_planes_workaround(state); 5783 5784 return 0; 5785 } 5786 5787 static bool lrr_params_changed(const struct intel_crtc_state *old_crtc_state, 5788 const struct intel_crtc_state *new_crtc_state) 5789 { 5790 const struct drm_display_mode *old_adjusted_mode = &old_crtc_state->hw.adjusted_mode; 5791 const struct drm_display_mode *new_adjusted_mode = &new_crtc_state->hw.adjusted_mode; 5792 5793 return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start || 5794 old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end || 5795 old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal || 5796 old_crtc_state->set_context_latency != new_crtc_state->set_context_latency; 5797 } 5798 5799 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 5800 struct intel_crtc_state *new_crtc_state) 5801 { 5802 struct intel_display *display = to_intel_display(new_crtc_state); 5803 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 5804 5805 /* only allow LRR when the timings stay within the VRR range */ 5806 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range) 5807 new_crtc_state->update_lrr = false; 5808 5809 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) { 5810 drm_dbg_kms(display->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n", 5811 crtc->base.base.id, crtc->base.name); 5812 } else { 5813 if (allow_vblank_delay_fastset(old_crtc_state)) 5814 new_crtc_state->update_lrr = true; 5815 new_crtc_state->uapi.mode_changed = false; 5816 } 5817 5818 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n, 5819 &new_crtc_state->dp_m_n)) 5820 new_crtc_state->update_m_n = false; 5821 5822 if (!lrr_params_changed(old_crtc_state, new_crtc_state)) 5823 new_crtc_state->update_lrr = false; 5824 5825 if (intel_crtc_needs_modeset(new_crtc_state)) 5826 intel_crtc_flag_modeset(new_crtc_state); 5827 else 5828 new_crtc_state->update_pipe = true; 5829 } 5830 5831 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 5832 { 5833 struct intel_display *display = to_intel_display(state); 5834 struct intel_crtc_state __maybe_unused *crtc_state; 5835 struct intel_crtc *crtc; 5836 int i; 5837 5838 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5839 int ret; 5840 5841 ret = intel_crtc_atomic_check(state, crtc); 5842 if (ret) { 5843 drm_dbg_atomic(display->drm, 5844 "[CRTC:%d:%s] atomic driver check failed\n", 5845 crtc->base.base.id, crtc->base.name); 5846 return ret; 5847 } 5848 } 5849 5850 return 0; 5851 } 5852 5853 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 5854 u8 transcoders) 5855 { 5856 const struct intel_crtc_state *new_crtc_state; 5857 struct intel_crtc *crtc; 5858 int i; 5859 5860 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5861 if (new_crtc_state->hw.enable && 5862 transcoders & BIT(new_crtc_state->cpu_transcoder) && 5863 intel_crtc_needs_modeset(new_crtc_state)) 5864 return true; 5865 } 5866 5867 return false; 5868 } 5869 5870 static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 5871 u8 pipes) 5872 { 5873 const struct intel_crtc_state *new_crtc_state; 5874 struct intel_crtc *crtc; 5875 int i; 5876 5877 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5878 if (new_crtc_state->hw.enable && 5879 pipes & BIT(crtc->pipe) && 5880 intel_crtc_needs_modeset(new_crtc_state)) 5881 return true; 5882 } 5883 5884 return false; 5885 } 5886 5887 static int intel_atomic_check_joiner(struct intel_atomic_state *state, 5888 struct intel_crtc *primary_crtc) 5889 { 5890 struct intel_display *display = to_intel_display(state); 5891 struct intel_crtc_state *primary_crtc_state = 5892 intel_atomic_get_new_crtc_state(state, primary_crtc); 5893 struct intel_crtc *secondary_crtc; 5894 5895 if (!primary_crtc_state->joiner_pipes) 5896 return 0; 5897 5898 /* sanity check */ 5899 if (drm_WARN_ON(display->drm, 5900 primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state))) 5901 return -EINVAL; 5902 5903 if (primary_crtc_state->joiner_pipes & ~joiner_pipes(display)) { 5904 drm_dbg_kms(display->drm, 5905 "[CRTC:%d:%s] Cannot act as joiner primary " 5906 "(need 0x%x as pipes, only 0x%x possible)\n", 5907 primary_crtc->base.base.id, primary_crtc->base.name, 5908 primary_crtc_state->joiner_pipes, joiner_pipes(display)); 5909 return -EINVAL; 5910 } 5911 5912 for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc, 5913 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 5914 struct intel_crtc_state *secondary_crtc_state; 5915 int ret; 5916 5917 secondary_crtc_state = intel_atomic_get_crtc_state(&state->base, secondary_crtc); 5918 if (IS_ERR(secondary_crtc_state)) 5919 return PTR_ERR(secondary_crtc_state); 5920 5921 /* primary being enabled, secondary was already configured? */ 5922 if (secondary_crtc_state->uapi.enable) { 5923 drm_dbg_kms(display->drm, 5924 "[CRTC:%d:%s] secondary is enabled as normal CRTC, but " 5925 "[CRTC:%d:%s] claiming this CRTC for joiner.\n", 5926 secondary_crtc->base.base.id, secondary_crtc->base.name, 5927 primary_crtc->base.base.id, primary_crtc->base.name); 5928 return -EINVAL; 5929 } 5930 5931 /* 5932 * The state copy logic assumes the primary crtc gets processed 5933 * before the secondary crtc during the main compute_config loop. 5934 * This works because the crtcs are created in pipe order, 5935 * and the hardware requires primary pipe < secondary pipe as well. 5936 * Should that change we need to rethink the logic. 5937 */ 5938 if (WARN_ON(drm_crtc_index(&primary_crtc->base) > 5939 drm_crtc_index(&secondary_crtc->base))) 5940 return -EINVAL; 5941 5942 drm_dbg_kms(display->drm, 5943 "[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n", 5944 secondary_crtc->base.base.id, secondary_crtc->base.name, 5945 primary_crtc->base.base.id, primary_crtc->base.name); 5946 5947 secondary_crtc_state->joiner_pipes = 5948 primary_crtc_state->joiner_pipes; 5949 5950 ret = copy_joiner_crtc_state_modeset(state, secondary_crtc); 5951 if (ret) 5952 return ret; 5953 } 5954 5955 return 0; 5956 } 5957 5958 static void kill_joiner_secondaries(struct intel_atomic_state *state, 5959 struct intel_crtc *primary_crtc) 5960 { 5961 struct intel_display *display = to_intel_display(state); 5962 struct intel_crtc_state *primary_crtc_state = 5963 intel_atomic_get_new_crtc_state(state, primary_crtc); 5964 struct intel_crtc *secondary_crtc; 5965 5966 for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc, 5967 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) { 5968 struct intel_crtc_state *secondary_crtc_state = 5969 intel_atomic_get_new_crtc_state(state, secondary_crtc); 5970 5971 secondary_crtc_state->joiner_pipes = 0; 5972 5973 intel_crtc_copy_uapi_to_hw_state_modeset(state, secondary_crtc); 5974 } 5975 5976 primary_crtc_state->joiner_pipes = 0; 5977 } 5978 5979 /** 5980 * DOC: asynchronous flip implementation 5981 * 5982 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 5983 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 5984 * Correspondingly, support is currently added for primary plane only. 5985 * 5986 * Async flip can only change the plane surface address, so anything else 5987 * changing is rejected from the intel_async_flip_check_hw() function. 5988 * Once this check is cleared, flip done interrupt is enabled using 5989 * the intel_crtc_enable_flip_done() function. 5990 * 5991 * As soon as the surface address register is written, flip done interrupt is 5992 * generated and the requested events are sent to the userspace in the interrupt 5993 * handler itself. The timestamp and sequence sent during the flip done event 5994 * correspond to the last vblank and have no relation to the actual time when 5995 * the flip done event was sent. 5996 */ 5997 static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 5998 struct intel_crtc *crtc) 5999 { 6000 struct intel_display *display = to_intel_display(state); 6001 const struct intel_crtc_state *new_crtc_state = 6002 intel_atomic_get_new_crtc_state(state, crtc); 6003 const struct intel_plane_state *old_plane_state; 6004 struct intel_plane_state *new_plane_state; 6005 struct intel_plane *plane; 6006 int i; 6007 6008 if (!new_crtc_state->uapi.async_flip) 6009 return 0; 6010 6011 if (!new_crtc_state->uapi.active) { 6012 drm_dbg_kms(display->drm, 6013 "[CRTC:%d:%s] not active\n", 6014 crtc->base.base.id, crtc->base.name); 6015 return -EINVAL; 6016 } 6017 6018 if (intel_crtc_needs_modeset(new_crtc_state)) { 6019 drm_dbg_kms(display->drm, 6020 "[CRTC:%d:%s] modeset required\n", 6021 crtc->base.base.id, crtc->base.name); 6022 return -EINVAL; 6023 } 6024 6025 /* 6026 * FIXME: joiner+async flip is busted currently. 6027 * Remove this check once the issues are fixed. 6028 */ 6029 if (new_crtc_state->joiner_pipes) { 6030 drm_dbg_kms(display->drm, 6031 "[CRTC:%d:%s] async flip disallowed with joiner\n", 6032 crtc->base.base.id, crtc->base.name); 6033 return -EINVAL; 6034 } 6035 6036 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6037 new_plane_state, i) { 6038 if (plane->pipe != crtc->pipe) 6039 continue; 6040 6041 /* 6042 * TODO: Async flip is only supported through the page flip IOCTL 6043 * as of now. So support currently added for primary plane only. 6044 * Support for other planes on platforms on which supports 6045 * this(vlv/chv and icl+) should be added when async flip is 6046 * enabled in the atomic IOCTL path. 6047 */ 6048 if (!plane->async_flip) { 6049 drm_dbg_kms(display->drm, 6050 "[PLANE:%d:%s] async flip not supported\n", 6051 plane->base.base.id, plane->base.name); 6052 return -EINVAL; 6053 } 6054 6055 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 6056 drm_dbg_kms(display->drm, 6057 "[PLANE:%d:%s] no old or new framebuffer\n", 6058 plane->base.base.id, plane->base.name); 6059 return -EINVAL; 6060 } 6061 } 6062 6063 return 0; 6064 } 6065 6066 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 6067 { 6068 struct intel_display *display = to_intel_display(state); 6069 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6070 const struct intel_plane_state *new_plane_state, *old_plane_state; 6071 struct intel_plane *plane; 6072 int i; 6073 6074 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6075 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6076 6077 if (!new_crtc_state->uapi.async_flip) 6078 return 0; 6079 6080 if (!new_crtc_state->hw.active) { 6081 drm_dbg_kms(display->drm, 6082 "[CRTC:%d:%s] not active\n", 6083 crtc->base.base.id, crtc->base.name); 6084 return -EINVAL; 6085 } 6086 6087 if (intel_crtc_needs_modeset(new_crtc_state)) { 6088 drm_dbg_kms(display->drm, 6089 "[CRTC:%d:%s] modeset required\n", 6090 crtc->base.base.id, crtc->base.name); 6091 return -EINVAL; 6092 } 6093 6094 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 6095 drm_dbg_kms(display->drm, 6096 "[CRTC:%d:%s] Active planes cannot be in async flip\n", 6097 crtc->base.base.id, crtc->base.name); 6098 return -EINVAL; 6099 } 6100 6101 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6102 new_plane_state, i) { 6103 if (plane->pipe != crtc->pipe) 6104 continue; 6105 6106 /* 6107 * Only async flip capable planes should be in the state 6108 * if we're really about to ask the hardware to perform 6109 * an async flip. We should never get this far otherwise. 6110 */ 6111 if (drm_WARN_ON(display->drm, 6112 new_crtc_state->do_async_flip && !plane->async_flip)) 6113 return -EINVAL; 6114 6115 /* 6116 * Only check async flip capable planes other planes 6117 * may be involved in the initial commit due to 6118 * the wm0/ddb optimization. 6119 * 6120 * TODO maybe should track which planes actually 6121 * were requested to do the async flip... 6122 */ 6123 if (!plane->async_flip) 6124 continue; 6125 6126 if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->format, 6127 new_plane_state->hw.fb->modifier)) { 6128 drm_dbg_kms(display->drm, 6129 "[PLANE:%d:%s] pixel format %p4cc / modifier 0x%llx does not support async flip\n", 6130 plane->base.base.id, plane->base.name, 6131 &new_plane_state->hw.fb->format->format, 6132 new_plane_state->hw.fb->modifier); 6133 return -EINVAL; 6134 } 6135 6136 /* 6137 * We turn the first async flip request into a sync flip 6138 * so that we can reconfigure the plane (eg. change modifier). 6139 */ 6140 if (!new_crtc_state->do_async_flip) 6141 continue; 6142 6143 if (old_plane_state->view.color_plane[0].mapping_stride != 6144 new_plane_state->view.color_plane[0].mapping_stride) { 6145 drm_dbg_kms(display->drm, 6146 "[PLANE:%d:%s] Stride cannot be changed in async flip\n", 6147 plane->base.base.id, plane->base.name); 6148 return -EINVAL; 6149 } 6150 6151 if (old_plane_state->hw.fb->modifier != 6152 new_plane_state->hw.fb->modifier) { 6153 drm_dbg_kms(display->drm, 6154 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", 6155 plane->base.base.id, plane->base.name); 6156 return -EINVAL; 6157 } 6158 6159 if (old_plane_state->hw.fb->format != 6160 new_plane_state->hw.fb->format) { 6161 drm_dbg_kms(display->drm, 6162 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", 6163 plane->base.base.id, plane->base.name); 6164 return -EINVAL; 6165 } 6166 6167 if (old_plane_state->hw.rotation != 6168 new_plane_state->hw.rotation) { 6169 drm_dbg_kms(display->drm, 6170 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", 6171 plane->base.base.id, plane->base.name); 6172 return -EINVAL; 6173 } 6174 6175 if (skl_plane_aux_dist(old_plane_state, 0) != 6176 skl_plane_aux_dist(new_plane_state, 0)) { 6177 drm_dbg_kms(display->drm, 6178 "[PLANE:%d:%s] AUX_DIST cannot be changed in async flip\n", 6179 plane->base.base.id, plane->base.name); 6180 return -EINVAL; 6181 } 6182 6183 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 6184 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 6185 drm_dbg_kms(display->drm, 6186 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", 6187 plane->base.base.id, plane->base.name); 6188 return -EINVAL; 6189 } 6190 6191 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 6192 drm_dbg_kms(display->drm, 6193 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", 6194 plane->base.base.id, plane->base.name); 6195 return -EINVAL; 6196 } 6197 6198 if (old_plane_state->hw.pixel_blend_mode != 6199 new_plane_state->hw.pixel_blend_mode) { 6200 drm_dbg_kms(display->drm, 6201 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", 6202 plane->base.base.id, plane->base.name); 6203 return -EINVAL; 6204 } 6205 6206 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 6207 drm_dbg_kms(display->drm, 6208 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", 6209 plane->base.base.id, plane->base.name); 6210 return -EINVAL; 6211 } 6212 6213 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 6214 drm_dbg_kms(display->drm, 6215 "[PLANE:%d:%s] Color range cannot be changed in async flip\n", 6216 plane->base.base.id, plane->base.name); 6217 return -EINVAL; 6218 } 6219 6220 /* plane decryption is allow to change only in synchronous flips */ 6221 if (old_plane_state->decrypt != new_plane_state->decrypt) { 6222 drm_dbg_kms(display->drm, 6223 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", 6224 plane->base.base.id, plane->base.name); 6225 return -EINVAL; 6226 } 6227 } 6228 6229 return 0; 6230 } 6231 6232 static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state) 6233 { 6234 struct intel_display *display = to_intel_display(state); 6235 const struct intel_plane_state *plane_state; 6236 struct intel_crtc_state *crtc_state; 6237 struct intel_plane *plane; 6238 struct intel_crtc *crtc; 6239 u8 affected_pipes = 0; 6240 u8 modeset_pipes = 0; 6241 int i; 6242 6243 /* 6244 * Any plane which is in use by the joiner needs its crtc. 6245 * Pull those in first as this will not have happened yet 6246 * if the plane remains disabled according to uapi. 6247 */ 6248 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 6249 crtc = to_intel_crtc(plane_state->hw.crtc); 6250 if (!crtc) 6251 continue; 6252 6253 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6254 if (IS_ERR(crtc_state)) 6255 return PTR_ERR(crtc_state); 6256 } 6257 6258 /* Now pull in all joined crtcs */ 6259 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6260 affected_pipes |= crtc_state->joiner_pipes; 6261 if (intel_crtc_needs_modeset(crtc_state)) 6262 modeset_pipes |= crtc_state->joiner_pipes; 6263 } 6264 6265 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, affected_pipes) { 6266 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6267 if (IS_ERR(crtc_state)) 6268 return PTR_ERR(crtc_state); 6269 } 6270 6271 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, modeset_pipes) { 6272 int ret; 6273 6274 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6275 6276 crtc_state->uapi.mode_changed = true; 6277 6278 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6279 if (ret) 6280 return ret; 6281 6282 ret = intel_plane_add_affected(state, crtc); 6283 if (ret) 6284 return ret; 6285 } 6286 6287 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6288 /* Kill old joiner link, we may re-establish afterwards */ 6289 if (intel_crtc_needs_modeset(crtc_state) && 6290 intel_crtc_is_joiner_primary(crtc_state)) 6291 kill_joiner_secondaries(state, crtc); 6292 } 6293 6294 return 0; 6295 } 6296 6297 static int intel_atomic_check_config(struct intel_atomic_state *state, 6298 struct intel_link_bw_limits *limits, 6299 enum pipe *failed_pipe) 6300 { 6301 struct intel_display *display = to_intel_display(state); 6302 struct intel_crtc_state *new_crtc_state; 6303 struct intel_crtc *crtc; 6304 int ret; 6305 int i; 6306 6307 *failed_pipe = INVALID_PIPE; 6308 6309 ret = intel_joiner_add_affected_crtcs(state); 6310 if (ret) 6311 return ret; 6312 6313 ret = intel_fdi_add_affected_crtcs(state); 6314 if (ret) 6315 return ret; 6316 6317 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6318 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6319 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 6320 copy_joiner_crtc_state_nomodeset(state, crtc); 6321 else 6322 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 6323 continue; 6324 } 6325 6326 if (drm_WARN_ON(display->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6327 continue; 6328 6329 ret = intel_crtc_prepare_cleared_state(state, crtc); 6330 if (ret) 6331 goto fail; 6332 6333 if (!new_crtc_state->hw.enable) 6334 continue; 6335 6336 ret = intel_modeset_pipe_config(state, crtc, limits); 6337 if (ret) 6338 goto fail; 6339 } 6340 6341 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6342 if (!intel_crtc_needs_modeset(new_crtc_state)) 6343 continue; 6344 6345 if (drm_WARN_ON(display->drm, intel_crtc_is_joiner_secondary(new_crtc_state))) 6346 continue; 6347 6348 if (!new_crtc_state->hw.enable) 6349 continue; 6350 6351 ret = intel_modeset_pipe_config_late(state, crtc); 6352 if (ret) 6353 goto fail; 6354 } 6355 6356 fail: 6357 if (ret) 6358 *failed_pipe = crtc->pipe; 6359 6360 return ret; 6361 } 6362 6363 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state) 6364 { 6365 struct intel_link_bw_limits new_limits; 6366 struct intel_link_bw_limits old_limits; 6367 int ret; 6368 6369 intel_link_bw_init_limits(state, &new_limits); 6370 old_limits = new_limits; 6371 6372 while (true) { 6373 enum pipe failed_pipe; 6374 6375 ret = intel_atomic_check_config(state, &new_limits, 6376 &failed_pipe); 6377 if (ret) { 6378 /* 6379 * The bpp limit for a pipe is below the minimum it supports, set the 6380 * limit to the minimum and recalculate the config. 6381 */ 6382 if (ret == -EINVAL && 6383 intel_link_bw_set_bpp_limit_for_pipe(state, 6384 &old_limits, 6385 &new_limits, 6386 failed_pipe)) 6387 continue; 6388 6389 break; 6390 } 6391 6392 old_limits = new_limits; 6393 6394 ret = intel_link_bw_atomic_check(state, &new_limits); 6395 if (ret != -EAGAIN) 6396 break; 6397 } 6398 6399 return ret; 6400 } 6401 /** 6402 * intel_atomic_check - validate state object 6403 * @dev: drm device 6404 * @_state: state to validate 6405 */ 6406 int intel_atomic_check(struct drm_device *dev, 6407 struct drm_atomic_state *_state) 6408 { 6409 struct intel_display *display = to_intel_display(dev); 6410 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6411 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6412 struct intel_crtc *crtc; 6413 int ret, i; 6414 6415 if (!intel_display_driver_check_access(display)) 6416 return -ENODEV; 6417 6418 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6419 new_crtc_state, i) { 6420 /* 6421 * crtc's state no longer considered to be inherited 6422 * after the first userspace/client initiated commit. 6423 */ 6424 if (!state->internal) 6425 new_crtc_state->inherited = false; 6426 6427 if (new_crtc_state->inherited != old_crtc_state->inherited) 6428 new_crtc_state->uapi.mode_changed = true; 6429 6430 if (new_crtc_state->uapi.scaling_filter != 6431 old_crtc_state->uapi.scaling_filter) 6432 new_crtc_state->uapi.mode_changed = true; 6433 } 6434 6435 intel_vrr_check_modeset(state); 6436 6437 ret = drm_atomic_helper_check_modeset(dev, &state->base); 6438 if (ret) 6439 goto fail; 6440 6441 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6442 ret = intel_async_flip_check_uapi(state, crtc); 6443 if (ret) 6444 return ret; 6445 } 6446 6447 ret = intel_atomic_check_config_and_link(state); 6448 if (ret) 6449 goto fail; 6450 6451 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6452 if (!intel_crtc_needs_modeset(new_crtc_state)) 6453 continue; 6454 6455 if (intel_crtc_is_joiner_secondary(new_crtc_state)) { 6456 drm_WARN_ON(display->drm, new_crtc_state->uapi.enable); 6457 continue; 6458 } 6459 6460 ret = intel_atomic_check_joiner(state, crtc); 6461 if (ret) 6462 goto fail; 6463 } 6464 6465 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6466 new_crtc_state, i) { 6467 if (!intel_crtc_needs_modeset(new_crtc_state)) 6468 continue; 6469 6470 intel_joiner_adjust_pipe_src(new_crtc_state); 6471 6472 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 6473 } 6474 6475 /** 6476 * Check if fastset is allowed by external dependencies like other 6477 * pipes and transcoders. 6478 * 6479 * Right now it only forces a fullmodeset when the MST master 6480 * transcoder did not changed but the pipe of the master transcoder 6481 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 6482 * in case of port synced crtcs, if one of the synced crtcs 6483 * needs a full modeset, all other synced crtcs should be 6484 * forced a full modeset. 6485 */ 6486 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6487 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6488 continue; 6489 6490 if (intel_dp_mst_crtc_needs_modeset(state, crtc)) 6491 intel_crtc_flag_modeset(new_crtc_state); 6492 6493 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6494 enum transcoder master = new_crtc_state->mst_master_transcoder; 6495 6496 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) 6497 intel_crtc_flag_modeset(new_crtc_state); 6498 } 6499 6500 if (is_trans_port_sync_mode(new_crtc_state)) { 6501 u8 trans = new_crtc_state->sync_mode_slaves_mask; 6502 6503 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6504 trans |= BIT(new_crtc_state->master_transcoder); 6505 6506 if (intel_cpu_transcoders_need_modeset(state, trans)) 6507 intel_crtc_flag_modeset(new_crtc_state); 6508 } 6509 6510 if (new_crtc_state->joiner_pipes) { 6511 if (intel_pipes_need_modeset(state, new_crtc_state->joiner_pipes)) 6512 intel_crtc_flag_modeset(new_crtc_state); 6513 } 6514 } 6515 6516 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6517 new_crtc_state, i) { 6518 if (!intel_crtc_needs_modeset(new_crtc_state)) 6519 continue; 6520 6521 intel_dpll_release(state, crtc); 6522 } 6523 6524 if (intel_any_crtc_needs_modeset(state) && !check_digital_port_conflicts(state)) { 6525 drm_dbg_kms(display->drm, "rejecting conflicting digital port configuration\n"); 6526 ret = -EINVAL; 6527 goto fail; 6528 } 6529 6530 ret = intel_plane_atomic_check(state); 6531 if (ret) 6532 goto fail; 6533 6534 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 6535 new_crtc_state->min_cdclk = intel_crtc_min_cdclk(new_crtc_state); 6536 6537 ret = intel_compute_global_watermarks(state); 6538 if (ret) 6539 goto fail; 6540 6541 ret = intel_bw_atomic_check(state); 6542 if (ret) 6543 goto fail; 6544 6545 ret = intel_cdclk_atomic_check(state); 6546 if (ret) 6547 goto fail; 6548 6549 if (intel_any_crtc_needs_modeset(state)) { 6550 ret = intel_modeset_checks(state); 6551 if (ret) 6552 goto fail; 6553 } 6554 6555 ret = intel_pmdemand_atomic_check(state); 6556 if (ret) 6557 goto fail; 6558 6559 ret = intel_atomic_check_crtcs(state); 6560 if (ret) 6561 goto fail; 6562 6563 ret = intel_fbc_atomic_check(state); 6564 if (ret) 6565 goto fail; 6566 6567 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6568 new_crtc_state, i) { 6569 intel_color_assert_luts(new_crtc_state); 6570 6571 ret = intel_async_flip_check_hw(state, crtc); 6572 if (ret) 6573 goto fail; 6574 6575 /* Either full modeset or fastset (or neither), never both */ 6576 drm_WARN_ON(display->drm, 6577 intel_crtc_needs_modeset(new_crtc_state) && 6578 intel_crtc_needs_fastset(new_crtc_state)); 6579 6580 if (!intel_crtc_needs_modeset(new_crtc_state) && 6581 !intel_crtc_needs_fastset(new_crtc_state)) 6582 continue; 6583 6584 intel_crtc_state_dump(new_crtc_state, state, 6585 intel_crtc_needs_modeset(new_crtc_state) ? 6586 "modeset" : "fastset"); 6587 } 6588 6589 return 0; 6590 6591 fail: 6592 if (ret == -EDEADLK) 6593 return ret; 6594 6595 /* 6596 * FIXME would probably be nice to know which crtc specifically 6597 * caused the failure, in cases where we can pinpoint it. 6598 */ 6599 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6600 new_crtc_state, i) 6601 intel_crtc_state_dump(new_crtc_state, state, "failed"); 6602 6603 return ret; 6604 } 6605 6606 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 6607 { 6608 int ret; 6609 6610 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 6611 if (ret < 0) 6612 return ret; 6613 6614 return 0; 6615 } 6616 6617 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 6618 struct intel_crtc_state *crtc_state) 6619 { 6620 struct intel_display *display = to_intel_display(crtc); 6621 6622 if (DISPLAY_VER(display) != 2 || crtc_state->active_planes) 6623 intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true); 6624 6625 if (crtc_state->has_pch_encoder) { 6626 enum pipe pch_transcoder = 6627 intel_crtc_pch_transcoder(crtc); 6628 6629 intel_set_pch_fifo_underrun_reporting(display, pch_transcoder, true); 6630 } 6631 } 6632 6633 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 6634 const struct intel_crtc_state *new_crtc_state) 6635 { 6636 struct intel_display *display = to_intel_display(new_crtc_state); 6637 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6638 6639 /* 6640 * Update pipe size and adjust fitter if needed: the reason for this is 6641 * that in compute_mode_changes we check the native mode (not the pfit 6642 * mode) to see if we can flip rather than do a full mode set. In the 6643 * fastboot case, we'll flip, but if we don't update the pipesrc and 6644 * pfit state, we'll end up with a big fb scanned out into the wrong 6645 * sized surface. 6646 */ 6647 intel_set_pipe_src_size(new_crtc_state); 6648 6649 /* on skylake this is done by detaching scalers */ 6650 if (DISPLAY_VER(display) >= 9) { 6651 if (new_crtc_state->pch_pfit.enabled) 6652 skl_pfit_enable(new_crtc_state); 6653 } else if (HAS_PCH_SPLIT(display)) { 6654 if (new_crtc_state->pch_pfit.enabled) 6655 ilk_pfit_enable(new_crtc_state); 6656 else if (old_crtc_state->pch_pfit.enabled) 6657 ilk_pfit_disable(old_crtc_state); 6658 } 6659 6660 /* 6661 * The register is supposedly single buffered so perhaps 6662 * not 100% correct to do this here. But SKL+ calculate 6663 * this based on the adjust pixel rate so pfit changes do 6664 * affect it and so it must be updated for fastsets. 6665 * HSW/BDW only really need this here for fastboot, after 6666 * that the value should not change without a full modeset. 6667 */ 6668 if (DISPLAY_VER(display) >= 9 || 6669 display->platform.broadwell || display->platform.haswell) 6670 hsw_set_linetime_wm(new_crtc_state); 6671 6672 if (new_crtc_state->update_m_n) 6673 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, 6674 &new_crtc_state->dp_m_n); 6675 6676 if (new_crtc_state->update_lrr) 6677 intel_set_transcoder_timings_lrr(new_crtc_state); 6678 } 6679 6680 static void commit_pipe_pre_planes(struct intel_atomic_state *state, 6681 struct intel_crtc *crtc) 6682 { 6683 struct intel_display *display = to_intel_display(state); 6684 const struct intel_crtc_state *old_crtc_state = 6685 intel_atomic_get_old_crtc_state(state, crtc); 6686 const struct intel_crtc_state *new_crtc_state = 6687 intel_atomic_get_new_crtc_state(state, crtc); 6688 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6689 6690 drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq); 6691 6692 /* 6693 * During modesets pipe configuration was programmed as the 6694 * CRTC was enabled. 6695 */ 6696 if (!modeset) { 6697 if (intel_crtc_needs_color_update(new_crtc_state)) 6698 intel_color_commit_arm(NULL, new_crtc_state); 6699 6700 if (DISPLAY_VER(display) >= 9 || display->platform.broadwell) 6701 bdw_set_pipe_misc(NULL, new_crtc_state); 6702 6703 if (intel_crtc_needs_fastset(new_crtc_state)) 6704 intel_pipe_fastset(old_crtc_state, new_crtc_state); 6705 } 6706 6707 intel_psr2_program_trans_man_trk_ctl(NULL, new_crtc_state); 6708 6709 intel_atomic_update_watermarks(state, crtc); 6710 } 6711 6712 static void commit_pipe_post_planes(struct intel_atomic_state *state, 6713 struct intel_crtc *crtc) 6714 { 6715 struct intel_display *display = to_intel_display(state); 6716 const struct intel_crtc_state *new_crtc_state = 6717 intel_atomic_get_new_crtc_state(state, crtc); 6718 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6719 6720 drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq); 6721 6722 /* 6723 * Disable the scaler(s) after the plane(s) so that we don't 6724 * get a catastrophic underrun even if the two operations 6725 * end up happening in two different frames. 6726 */ 6727 if (DISPLAY_VER(display) >= 9 && !modeset) 6728 skl_detach_scalers(NULL, new_crtc_state); 6729 6730 if (!modeset && 6731 intel_crtc_needs_color_update(new_crtc_state) && 6732 !intel_color_uses_dsb(new_crtc_state) && 6733 HAS_DOUBLE_BUFFERED_LUT(display)) 6734 intel_color_load_luts(new_crtc_state); 6735 6736 if (intel_crtc_vrr_enabling(state, crtc)) 6737 intel_vrr_enable(new_crtc_state); 6738 } 6739 6740 static void intel_enable_crtc(struct intel_atomic_state *state, 6741 struct intel_crtc *crtc) 6742 { 6743 struct intel_display *display = to_intel_display(state); 6744 const struct intel_crtc_state *new_crtc_state = 6745 intel_atomic_get_new_crtc_state(state, crtc); 6746 struct intel_crtc *pipe_crtc; 6747 6748 if (!intel_crtc_needs_modeset(new_crtc_state)) 6749 return; 6750 6751 for_each_intel_crtc_in_pipe_mask_reverse(display->drm, pipe_crtc, 6752 intel_crtc_joined_pipe_mask(new_crtc_state)) { 6753 const struct intel_crtc_state *pipe_crtc_state = 6754 intel_atomic_get_new_crtc_state(state, pipe_crtc); 6755 6756 /* VRR will be enable later, if required */ 6757 intel_crtc_update_active_timings(pipe_crtc_state, false); 6758 } 6759 6760 intel_psr_notify_pipe_change(state, crtc, true); 6761 6762 display->funcs.display->crtc_enable(state, crtc); 6763 6764 /* vblanks work again, re-enable pipe CRC. */ 6765 intel_crtc_enable_pipe_crc(crtc); 6766 } 6767 6768 static void intel_pre_update_crtc(struct intel_atomic_state *state, 6769 struct intel_crtc *crtc) 6770 { 6771 struct intel_display *display = to_intel_display(state); 6772 const struct intel_crtc_state *old_crtc_state = 6773 intel_atomic_get_old_crtc_state(state, crtc); 6774 struct intel_crtc_state *new_crtc_state = 6775 intel_atomic_get_new_crtc_state(state, crtc); 6776 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6777 6778 if (old_crtc_state->inherited || 6779 intel_crtc_needs_modeset(new_crtc_state)) { 6780 if (HAS_DPT(display)) 6781 intel_dpt_configure(crtc); 6782 } 6783 6784 if (!modeset) { 6785 if (new_crtc_state->preload_luts && 6786 intel_crtc_needs_color_update(new_crtc_state)) 6787 intel_color_load_luts(new_crtc_state); 6788 6789 intel_pre_plane_update(state, crtc); 6790 6791 if (intel_crtc_needs_fastset(new_crtc_state)) 6792 intel_encoders_update_pipe(state, crtc); 6793 6794 if (DISPLAY_VER(display) >= 11 && 6795 intel_crtc_needs_fastset(new_crtc_state)) 6796 icl_set_pipe_chicken(new_crtc_state); 6797 6798 if (vrr_params_changed(old_crtc_state, new_crtc_state) || 6799 cmrr_params_changed(old_crtc_state, new_crtc_state)) 6800 intel_vrr_set_transcoder_timings(new_crtc_state); 6801 } 6802 6803 if (intel_casf_enabling(new_crtc_state, old_crtc_state)) 6804 intel_casf_enable(new_crtc_state); 6805 else if (new_crtc_state->hw.casf_params.strength != old_crtc_state->hw.casf_params.strength) 6806 intel_casf_update_strength(new_crtc_state); 6807 6808 intel_fbc_update(state, crtc); 6809 6810 drm_WARN_ON(display->drm, !intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF)); 6811 6812 if (!modeset && 6813 intel_crtc_needs_color_update(new_crtc_state) && 6814 !new_crtc_state->use_dsb && !new_crtc_state->use_flipq) 6815 intel_color_commit_noarm(NULL, new_crtc_state); 6816 6817 if (!new_crtc_state->use_dsb && !new_crtc_state->use_flipq) 6818 intel_crtc_planes_update_noarm(NULL, state, crtc); 6819 } 6820 6821 static void intel_update_crtc(struct intel_atomic_state *state, 6822 struct intel_crtc *crtc) 6823 { 6824 const struct intel_crtc_state *old_crtc_state = 6825 intel_atomic_get_old_crtc_state(state, crtc); 6826 struct intel_crtc_state *new_crtc_state = 6827 intel_atomic_get_new_crtc_state(state, crtc); 6828 6829 if (new_crtc_state->use_flipq) { 6830 intel_flipq_enable(new_crtc_state); 6831 6832 intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->flipq_event); 6833 6834 intel_flipq_add(crtc, INTEL_FLIPQ_PLANE_1, 0, INTEL_DSB_0, 6835 new_crtc_state->dsb_commit); 6836 } else if (new_crtc_state->use_dsb) { 6837 intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event); 6838 6839 intel_dsb_commit(new_crtc_state->dsb_commit); 6840 } else { 6841 /* Perform vblank evasion around commit operation */ 6842 intel_pipe_update_start(state, crtc); 6843 6844 if (new_crtc_state->dsb_commit) 6845 intel_dsb_commit(new_crtc_state->dsb_commit); 6846 6847 commit_pipe_pre_planes(state, crtc); 6848 6849 intel_crtc_planes_update_arm(NULL, state, crtc); 6850 6851 commit_pipe_post_planes(state, crtc); 6852 6853 intel_pipe_update_end(state, crtc); 6854 } 6855 6856 /* 6857 * VRR/Seamless M/N update may need to update frame timings. 6858 * 6859 * FIXME Should be synchronized with the start of vblank somehow... 6860 */ 6861 if (intel_crtc_vrr_enabling(state, crtc) || 6862 new_crtc_state->update_m_n || new_crtc_state->update_lrr) 6863 intel_crtc_update_active_timings(new_crtc_state, 6864 new_crtc_state->vrr.enable); 6865 6866 if (new_crtc_state->vrr.dc_balance.enable) 6867 intel_vrr_dcb_increment_flip_count(new_crtc_state, crtc); 6868 6869 /* 6870 * We usually enable FIFO underrun interrupts as part of the 6871 * CRTC enable sequence during modesets. But when we inherit a 6872 * valid pipe configuration from the BIOS we need to take care 6873 * of enabling them on the CRTC's first fastset. 6874 */ 6875 if (intel_crtc_needs_fastset(new_crtc_state) && 6876 old_crtc_state->inherited) 6877 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 6878 } 6879 6880 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 6881 struct intel_crtc *crtc) 6882 { 6883 struct intel_display *display = to_intel_display(state); 6884 const struct intel_crtc_state *old_crtc_state = 6885 intel_atomic_get_old_crtc_state(state, crtc); 6886 struct intel_crtc *pipe_crtc; 6887 6888 /* 6889 * We need to disable pipe CRC before disabling the pipe, 6890 * or we race against vblank off. 6891 */ 6892 for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc, 6893 intel_crtc_joined_pipe_mask(old_crtc_state)) 6894 intel_crtc_disable_pipe_crc(pipe_crtc); 6895 6896 intel_psr_notify_pipe_change(state, crtc, false); 6897 6898 display->funcs.display->crtc_disable(state, crtc); 6899 6900 for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc, 6901 intel_crtc_joined_pipe_mask(old_crtc_state)) { 6902 const struct intel_crtc_state *new_pipe_crtc_state = 6903 intel_atomic_get_new_crtc_state(state, pipe_crtc); 6904 6905 pipe_crtc->active = false; 6906 intel_fbc_disable(pipe_crtc); 6907 6908 if (!new_pipe_crtc_state->hw.active) 6909 intel_initial_watermarks(state, pipe_crtc); 6910 } 6911 } 6912 6913 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 6914 { 6915 struct intel_display *display = to_intel_display(state); 6916 const struct intel_crtc_state *new_crtc_state, *old_crtc_state; 6917 struct intel_crtc *crtc; 6918 u8 disable_pipes = 0; 6919 int i; 6920 6921 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6922 new_crtc_state, i) { 6923 if (!intel_crtc_needs_modeset(new_crtc_state)) 6924 continue; 6925 6926 /* 6927 * Needs to be done even for pipes 6928 * that weren't enabled previously. 6929 */ 6930 intel_pre_plane_update(state, crtc); 6931 6932 if (!old_crtc_state->hw.active) 6933 continue; 6934 6935 disable_pipes |= BIT(crtc->pipe); 6936 } 6937 6938 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 6939 if ((disable_pipes & BIT(crtc->pipe)) == 0) 6940 continue; 6941 6942 intel_crtc_disable_planes(state, crtc); 6943 6944 drm_vblank_work_flush_all(&crtc->base); 6945 } 6946 6947 /* Only disable port sync and MST slaves */ 6948 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 6949 if ((disable_pipes & BIT(crtc->pipe)) == 0) 6950 continue; 6951 6952 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 6953 continue; 6954 6955 /* In case of Transcoder port Sync master slave CRTCs can be 6956 * assigned in any order and we need to make sure that 6957 * slave CRTCs are disabled first and then master CRTC since 6958 * Slave vblanks are masked till Master Vblanks. 6959 */ 6960 if (!is_trans_port_sync_slave(old_crtc_state) && 6961 !intel_dp_mst_is_slave_trans(old_crtc_state)) 6962 continue; 6963 6964 intel_old_crtc_state_disables(state, crtc); 6965 6966 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 6967 } 6968 6969 /* Disable everything else left on */ 6970 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { 6971 if ((disable_pipes & BIT(crtc->pipe)) == 0) 6972 continue; 6973 6974 if (intel_crtc_is_joiner_secondary(old_crtc_state)) 6975 continue; 6976 6977 intel_old_crtc_state_disables(state, crtc); 6978 6979 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); 6980 } 6981 6982 drm_WARN_ON(display->drm, disable_pipes); 6983 } 6984 6985 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 6986 { 6987 struct intel_crtc_state *new_crtc_state; 6988 struct intel_crtc *crtc; 6989 int i; 6990 6991 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6992 if (!new_crtc_state->hw.active) 6993 continue; 6994 6995 intel_enable_crtc(state, crtc); 6996 intel_pre_update_crtc(state, crtc); 6997 } 6998 6999 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7000 if (!new_crtc_state->hw.active) 7001 continue; 7002 7003 intel_update_crtc(state, crtc); 7004 } 7005 } 7006 7007 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 7008 { 7009 struct intel_display *display = to_intel_display(state); 7010 struct intel_crtc *crtc; 7011 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7012 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 7013 u8 update_pipes = 0, modeset_pipes = 0; 7014 int i; 7015 7016 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7017 enum pipe pipe = crtc->pipe; 7018 7019 if (!new_crtc_state->hw.active) 7020 continue; 7021 7022 /* ignore allocations for crtc's that have been turned off. */ 7023 if (!intel_crtc_needs_modeset(new_crtc_state)) { 7024 entries[pipe] = old_crtc_state->wm.skl.ddb; 7025 update_pipes |= BIT(pipe); 7026 } else { 7027 modeset_pipes |= BIT(pipe); 7028 } 7029 } 7030 7031 /* 7032 * Whenever the number of active pipes changes, we need to make sure we 7033 * update the pipes in the right order so that their ddb allocations 7034 * never overlap with each other between CRTC updates. Otherwise we'll 7035 * cause pipe underruns and other bad stuff. 7036 * 7037 * So first lets enable all pipes that do not need a fullmodeset as 7038 * those don't have any external dependency. 7039 */ 7040 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7041 enum pipe pipe = crtc->pipe; 7042 7043 if ((update_pipes & BIT(pipe)) == 0) 7044 continue; 7045 7046 intel_pre_update_crtc(state, crtc); 7047 } 7048 7049 intel_dbuf_mbus_pre_ddb_update(state); 7050 7051 while (update_pipes) { 7052 /* 7053 * Commit in reverse order to make joiner primary 7054 * send the uapi events after secondaries are done. 7055 */ 7056 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, 7057 new_crtc_state, i) { 7058 enum pipe pipe = crtc->pipe; 7059 7060 if ((update_pipes & BIT(pipe)) == 0) 7061 continue; 7062 7063 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7064 entries, I915_MAX_PIPES, pipe)) 7065 continue; 7066 7067 entries[pipe] = new_crtc_state->wm.skl.ddb; 7068 update_pipes &= ~BIT(pipe); 7069 7070 intel_update_crtc(state, crtc); 7071 7072 /* 7073 * If this is an already active pipe, it's DDB changed, 7074 * and this isn't the last pipe that needs updating 7075 * then we need to wait for a vblank to pass for the 7076 * new ddb allocation to take effect. 7077 */ 7078 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 7079 &old_crtc_state->wm.skl.ddb) && 7080 (update_pipes | modeset_pipes)) 7081 intel_crtc_wait_for_next_vblank(crtc); 7082 } 7083 } 7084 7085 intel_dbuf_mbus_post_ddb_update(state); 7086 7087 update_pipes = modeset_pipes; 7088 7089 /* 7090 * Enable all pipes that needs a modeset and do not depends on other 7091 * pipes 7092 */ 7093 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7094 enum pipe pipe = crtc->pipe; 7095 7096 if ((modeset_pipes & BIT(pipe)) == 0) 7097 continue; 7098 7099 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7100 continue; 7101 7102 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 7103 is_trans_port_sync_master(new_crtc_state)) 7104 continue; 7105 7106 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7107 7108 intel_enable_crtc(state, crtc); 7109 } 7110 7111 /* 7112 * Then we enable all remaining pipes that depend on other 7113 * pipes: MST slaves and port sync masters 7114 */ 7115 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7116 enum pipe pipe = crtc->pipe; 7117 7118 if ((modeset_pipes & BIT(pipe)) == 0) 7119 continue; 7120 7121 if (intel_crtc_is_joiner_secondary(new_crtc_state)) 7122 continue; 7123 7124 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); 7125 7126 intel_enable_crtc(state, crtc); 7127 } 7128 7129 /* 7130 * Finally we do the plane updates/etc. for all pipes that got enabled. 7131 */ 7132 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7133 enum pipe pipe = crtc->pipe; 7134 7135 if ((update_pipes & BIT(pipe)) == 0) 7136 continue; 7137 7138 intel_pre_update_crtc(state, crtc); 7139 } 7140 7141 /* 7142 * Commit in reverse order to make joiner primary 7143 * send the uapi events after secondaries are done. 7144 */ 7145 for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) { 7146 enum pipe pipe = crtc->pipe; 7147 7148 if ((update_pipes & BIT(pipe)) == 0) 7149 continue; 7150 7151 drm_WARN_ON(display->drm, 7152 skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7153 entries, I915_MAX_PIPES, pipe)); 7154 7155 entries[pipe] = new_crtc_state->wm.skl.ddb; 7156 update_pipes &= ~BIT(pipe); 7157 7158 intel_update_crtc(state, crtc); 7159 } 7160 7161 drm_WARN_ON(display->drm, modeset_pipes); 7162 drm_WARN_ON(display->drm, update_pipes); 7163 } 7164 7165 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 7166 { 7167 struct drm_plane *plane; 7168 struct drm_plane_state *new_plane_state; 7169 long ret; 7170 int i; 7171 7172 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { 7173 if (new_plane_state->fence) { 7174 ret = dma_fence_wait_timeout(new_plane_state->fence, false, 7175 i915_fence_timeout()); 7176 if (ret <= 0) 7177 break; 7178 7179 dma_fence_put(new_plane_state->fence); 7180 new_plane_state->fence = NULL; 7181 } 7182 } 7183 } 7184 7185 static void intel_atomic_dsb_wait_commit(struct intel_crtc_state *crtc_state) 7186 { 7187 if (crtc_state->dsb_commit) 7188 intel_dsb_wait(crtc_state->dsb_commit); 7189 7190 intel_color_wait_commit(crtc_state); 7191 } 7192 7193 static void intel_atomic_dsb_cleanup(struct intel_crtc_state *crtc_state) 7194 { 7195 if (crtc_state->dsb_commit) { 7196 intel_dsb_cleanup(crtc_state->dsb_commit); 7197 crtc_state->dsb_commit = NULL; 7198 } 7199 7200 intel_color_cleanup_commit(crtc_state); 7201 } 7202 7203 static void intel_atomic_cleanup_work(struct work_struct *work) 7204 { 7205 struct intel_atomic_state *state = 7206 container_of(work, struct intel_atomic_state, cleanup_work); 7207 struct intel_display *display = to_intel_display(state); 7208 struct intel_crtc_state *old_crtc_state; 7209 struct intel_crtc *crtc; 7210 int i; 7211 7212 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) 7213 intel_atomic_dsb_cleanup(old_crtc_state); 7214 7215 drm_atomic_helper_cleanup_planes(display->drm, &state->base); 7216 drm_atomic_helper_commit_cleanup_done(&state->base); 7217 drm_atomic_state_put(&state->base); 7218 } 7219 7220 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 7221 { 7222 struct intel_display *display = to_intel_display(state); 7223 struct intel_plane *plane; 7224 struct intel_plane_state *plane_state; 7225 int i; 7226 7227 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7228 struct drm_framebuffer *fb = plane_state->hw.fb; 7229 int cc_plane; 7230 int ret; 7231 7232 if (!fb) 7233 continue; 7234 7235 cc_plane = intel_fb_rc_ccs_cc_plane(fb); 7236 if (cc_plane < 0) 7237 continue; 7238 7239 /* 7240 * The layout of the fast clear color value expected by HW 7241 * (the DRM ABI requiring this value to be located in fb at 7242 * offset 0 of cc plane, plane #2 previous generations or 7243 * plane #1 for flat ccs): 7244 * - 4 x 4 bytes per-channel value 7245 * (in surface type specific float/int format provided by the fb user) 7246 * - 8 bytes native color value used by the display 7247 * (converted/written by GPU during a fast clear operation using the 7248 * above per-channel values) 7249 * 7250 * The commit's FB prepare hook already ensured that FB obj is pinned and the 7251 * caller made sure that the object is synced wrt. the related color clear value 7252 * GPU write on it. 7253 */ 7254 ret = intel_bo_read_from_page(intel_fb_bo(fb), 7255 fb->offsets[cc_plane] + 16, 7256 &plane_state->ccval, 7257 sizeof(plane_state->ccval)); 7258 /* The above could only fail if the FB obj has an unexpected backing store type. */ 7259 drm_WARN_ON(display->drm, ret); 7260 } 7261 } 7262 7263 static void intel_atomic_dsb_prepare(struct intel_atomic_state *state, 7264 struct intel_crtc *crtc) 7265 { 7266 struct intel_display *display = to_intel_display(state); 7267 struct intel_crtc_state *new_crtc_state = 7268 intel_atomic_get_new_crtc_state(state, crtc); 7269 7270 if (!new_crtc_state->hw.active) 7271 return; 7272 7273 if (state->base.legacy_cursor_update) 7274 return; 7275 7276 /* FIXME deal with everything */ 7277 new_crtc_state->use_flipq = 7278 intel_flipq_supported(display) && 7279 !new_crtc_state->do_async_flip && 7280 !new_crtc_state->vrr.enable && 7281 !new_crtc_state->has_psr && 7282 !intel_crtc_needs_modeset(new_crtc_state) && 7283 !intel_crtc_needs_fastset(new_crtc_state) && 7284 !intel_crtc_needs_color_update(new_crtc_state); 7285 7286 new_crtc_state->use_dsb = 7287 !new_crtc_state->use_flipq && 7288 !new_crtc_state->do_async_flip && 7289 (DISPLAY_VER(display) >= 20 || !new_crtc_state->has_psr) && 7290 !intel_crtc_needs_modeset(new_crtc_state) && 7291 !intel_crtc_needs_fastset(new_crtc_state); 7292 7293 intel_color_prepare_commit(state, crtc); 7294 } 7295 7296 static void intel_atomic_dsb_finish(struct intel_atomic_state *state, 7297 struct intel_crtc *crtc) 7298 { 7299 struct intel_display *display = to_intel_display(state); 7300 struct intel_crtc_state *new_crtc_state = 7301 intel_atomic_get_new_crtc_state(state, crtc); 7302 unsigned int size = new_crtc_state->plane_color_changed ? 8192 : 1024; 7303 7304 if (!new_crtc_state->use_flipq && 7305 !new_crtc_state->use_dsb && 7306 !new_crtc_state->dsb_color) 7307 return; 7308 7309 /* 7310 * Rough estimate: 7311 * ~64 registers per each plane * 8 planes = 512 7312 * Double that for pipe stuff and other overhead. 7313 * ~4913 registers for 3DLUT 7314 * ~200 color registers * 3 HDR planes 7315 */ 7316 new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 7317 new_crtc_state->use_dsb || 7318 new_crtc_state->use_flipq ? size : 16); 7319 if (!new_crtc_state->dsb_commit) { 7320 new_crtc_state->use_flipq = false; 7321 new_crtc_state->use_dsb = false; 7322 intel_color_cleanup_commit(new_crtc_state); 7323 return; 7324 } 7325 7326 if (new_crtc_state->use_flipq || new_crtc_state->use_dsb) { 7327 /* Wa_18034343758 */ 7328 if (new_crtc_state->use_flipq) 7329 intel_flipq_wait_dmc_halt(new_crtc_state->dsb_commit, crtc); 7330 7331 if (new_crtc_state->vrr.dc_balance.enable) { 7332 /* 7333 * Pause the DMC DC balancing for the remainder of 7334 * the commit so that vmin/vmax won't change after 7335 * we've baked them into the DSB vblank evasion 7336 * commands. 7337 * 7338 * FIXME maybe need a small delay here to make sure 7339 * DMC has finished updating the values? Or we need 7340 * a better DMC<->driver protocol that gives is real 7341 * guarantees about that... 7342 */ 7343 intel_pipedmc_dcb_disable(NULL, crtc); 7344 } 7345 7346 if (intel_crtc_needs_color_update(new_crtc_state)) 7347 intel_color_commit_noarm(new_crtc_state->dsb_commit, 7348 new_crtc_state); 7349 intel_crtc_planes_update_noarm(new_crtc_state->dsb_commit, 7350 state, crtc); 7351 7352 /* 7353 * Ensure we have "Frame Change" event when PSR state is 7354 * SRDENT(PSR1) or DEEP_SLEEP(PSR2). Otherwise DSB vblank 7355 * evasion hangs as PIPEDSL is reading as 0. 7356 */ 7357 intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit, 7358 state, crtc); 7359 7360 intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit, 7361 new_crtc_state); 7362 7363 if (new_crtc_state->use_dsb) 7364 intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit); 7365 7366 if (intel_crtc_needs_color_update(new_crtc_state)) 7367 intel_color_commit_arm(new_crtc_state->dsb_commit, 7368 new_crtc_state); 7369 bdw_set_pipe_misc(new_crtc_state->dsb_commit, 7370 new_crtc_state); 7371 intel_psr2_program_trans_man_trk_ctl(new_crtc_state->dsb_commit, 7372 new_crtc_state); 7373 intel_crtc_planes_update_arm(new_crtc_state->dsb_commit, 7374 state, crtc); 7375 7376 if (DISPLAY_VER(display) >= 9) 7377 skl_detach_scalers(new_crtc_state->dsb_commit, 7378 new_crtc_state); 7379 7380 /* Wa_18034343758 */ 7381 if (new_crtc_state->use_flipq) 7382 intel_flipq_unhalt_dmc(new_crtc_state->dsb_commit, crtc); 7383 } 7384 7385 if (intel_color_uses_chained_dsb(new_crtc_state)) 7386 intel_dsb_chain(state, new_crtc_state->dsb_commit, 7387 new_crtc_state->dsb_color, true); 7388 else if (intel_color_uses_gosub_dsb(new_crtc_state)) 7389 intel_dsb_gosub(new_crtc_state->dsb_commit, 7390 new_crtc_state->dsb_color); 7391 7392 if (new_crtc_state->use_dsb && !intel_color_uses_chained_dsb(new_crtc_state)) { 7393 intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1); 7394 7395 intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state); 7396 intel_dsb_wait_for_delayed_vblank(state, new_crtc_state->dsb_commit); 7397 intel_vrr_check_push_sent(new_crtc_state->dsb_commit, 7398 new_crtc_state); 7399 7400 if (new_crtc_state->vrr.dc_balance.enable) 7401 intel_pipedmc_dcb_enable(new_crtc_state->dsb_commit, crtc); 7402 7403 intel_dsb_interrupt(new_crtc_state->dsb_commit); 7404 } 7405 7406 intel_dsb_finish(new_crtc_state->dsb_commit); 7407 } 7408 7409 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 7410 { 7411 struct intel_display *display = to_intel_display(state); 7412 struct intel_uncore *uncore = to_intel_uncore(display->drm); 7413 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7414 struct intel_crtc *crtc; 7415 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; 7416 struct ref_tracker *wakeref = NULL; 7417 int i; 7418 7419 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7420 intel_atomic_dsb_prepare(state, crtc); 7421 7422 intel_atomic_commit_fence_wait(state); 7423 7424 intel_td_flush(display); 7425 7426 intel_atomic_prepare_plane_clear_colors(state); 7427 7428 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7429 intel_fbc_prepare_dirty_rect(state, crtc); 7430 7431 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7432 intel_atomic_dsb_finish(state, crtc); 7433 7434 drm_atomic_helper_wait_for_dependencies(&state->base); 7435 drm_dp_mst_atomic_wait_for_dependencies(&state->base); 7436 intel_atomic_global_state_wait_for_dependencies(state); 7437 7438 /* 7439 * During full modesets we write a lot of registers, wait 7440 * for PLLs, etc. Doing that while DC states are enabled 7441 * is not a good idea. 7442 * 7443 * During fastsets and other updates we also need to 7444 * disable DC states due to the following scenario: 7445 * 1. DC5 exit and PSR exit happen 7446 * 2. Some or all _noarm() registers are written 7447 * 3. Due to some long delay PSR is re-entered 7448 * 4. DC5 entry -> DMC saves the already written new 7449 * _noarm() registers and the old not yet written 7450 * _arm() registers 7451 * 5. DC5 exit -> DMC restores a mixture of old and 7452 * new register values and arms the update 7453 * 6. PSR exit -> hardware latches a mixture of old and 7454 * new register values -> corrupted frame, or worse 7455 * 7. New _arm() registers are finally written 7456 * 8. Hardware finally latches a complete set of new 7457 * register values, and subsequent frames will be OK again 7458 * 7459 * Also note that due to the pipe CSC hardware issues on 7460 * SKL/GLK DC states must remain off until the pipe CSC 7461 * state readout has happened. Otherwise we risk corrupting 7462 * the CSC latched register values with the readout (see 7463 * skl_read_csc() and skl_color_commit_noarm()). 7464 */ 7465 wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF); 7466 7467 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7468 new_crtc_state, i) { 7469 if (intel_crtc_needs_modeset(new_crtc_state) || 7470 intel_crtc_needs_fastset(new_crtc_state)) 7471 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); 7472 } 7473 7474 intel_commit_modeset_disables(state); 7475 7476 intel_dp_tunnel_atomic_alloc_bw(state); 7477 7478 /* FIXME: Eventually get rid of our crtc->config pointer */ 7479 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7480 crtc->config = new_crtc_state; 7481 7482 /* 7483 * In XE_LPD+ Pmdemand combines many parameters such as voltage index, 7484 * plls, cdclk frequency, QGV point selection parameter etc. Voltage 7485 * index, cdclk/ddiclk frequencies are supposed to be configured before 7486 * the cdclk config is set. 7487 */ 7488 intel_pmdemand_pre_plane_update(state); 7489 7490 if (state->modeset) 7491 drm_atomic_helper_update_legacy_modeset_state(display->drm, &state->base); 7492 7493 intel_set_cdclk_pre_plane_update(state); 7494 7495 if (state->modeset) 7496 intel_modeset_verify_disabled(state); 7497 7498 intel_sagv_pre_plane_update(state); 7499 7500 /* Complete the events for pipes that have now been disabled */ 7501 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7502 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7503 7504 /* Complete events for now disable pipes here. */ 7505 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 7506 spin_lock_irq(&display->drm->event_lock); 7507 drm_crtc_send_vblank_event(&crtc->base, 7508 new_crtc_state->uapi.event); 7509 spin_unlock_irq(&display->drm->event_lock); 7510 7511 new_crtc_state->uapi.event = NULL; 7512 } 7513 } 7514 7515 intel_encoders_update_prepare(state); 7516 7517 intel_dbuf_pre_plane_update(state); 7518 7519 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7520 if (new_crtc_state->do_async_flip) 7521 intel_crtc_enable_flip_done(state, crtc); 7522 } 7523 7524 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7525 display->funcs.display->commit_modeset_enables(state); 7526 7527 /* FIXME probably need to sequence this properly */ 7528 intel_program_dpkgc_latency(state); 7529 7530 intel_wait_for_vblank_workers(state); 7531 7532 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 7533 * already, but still need the state for the delayed optimization. To 7534 * fix this: 7535 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 7536 * - schedule that vblank worker _before_ calling hw_done 7537 * - at the start of commit_tail, cancel it _synchrously 7538 * - switch over to the vblank wait helper in the core after that since 7539 * we don't need out special handling any more. 7540 */ 7541 drm_atomic_helper_wait_for_flip_done(display->drm, &state->base); 7542 7543 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7544 if (new_crtc_state->do_async_flip) 7545 intel_crtc_disable_flip_done(state, crtc); 7546 7547 intel_atomic_dsb_wait_commit(new_crtc_state); 7548 7549 if (!state->base.legacy_cursor_update && !new_crtc_state->use_dsb) 7550 intel_vrr_check_push_sent(NULL, new_crtc_state); 7551 7552 if (new_crtc_state->use_flipq) 7553 intel_flipq_disable(new_crtc_state); 7554 } 7555 7556 /* 7557 * Now that the vblank has passed, we can go ahead and program the 7558 * optimal watermarks on platforms that need two-step watermark 7559 * programming. 7560 * 7561 * TODO: Move this (and other cleanup) to an async worker eventually. 7562 */ 7563 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7564 new_crtc_state, i) { 7565 /* 7566 * Gen2 reports pipe underruns whenever all planes are disabled. 7567 * So re-enable underrun reporting after some planes get enabled. 7568 * 7569 * We do this before .optimize_watermarks() so that we have a 7570 * chance of catching underruns with the intermediate watermarks 7571 * vs. the new plane configuration. 7572 */ 7573 if (DISPLAY_VER(display) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 7574 intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true); 7575 7576 intel_optimize_watermarks(state, crtc); 7577 } 7578 7579 intel_dbuf_post_plane_update(state); 7580 7581 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7582 intel_post_plane_update(state, crtc); 7583 7584 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); 7585 7586 intel_modeset_verify_crtc(state, crtc); 7587 7588 intel_post_plane_update_after_readout(state, crtc); 7589 7590 /* 7591 * DSB cleanup is done in cleanup_work aligning with framebuffer 7592 * cleanup. So copy and reset the dsb structure to sync with 7593 * commit_done and later do dsb cleanup in cleanup_work. 7594 * 7595 * FIXME get rid of this funny new->old swapping 7596 */ 7597 old_crtc_state->dsb_color = fetch_and_zero(&new_crtc_state->dsb_color); 7598 old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit); 7599 } 7600 7601 /* Underruns don't always raise interrupts, so check manually */ 7602 intel_check_cpu_fifo_underruns(display); 7603 intel_check_pch_fifo_underruns(display); 7604 7605 if (state->modeset) 7606 intel_verify_planes(state); 7607 7608 intel_sagv_post_plane_update(state); 7609 intel_set_cdclk_post_plane_update(state); 7610 intel_pmdemand_post_plane_update(state); 7611 7612 drm_atomic_helper_commit_hw_done(&state->base); 7613 intel_atomic_global_state_commit_done(state); 7614 7615 if (state->modeset) { 7616 /* As one of the primary mmio accessors, KMS has a high 7617 * likelihood of triggering bugs in unclaimed access. After we 7618 * finish modesetting, see if an error has been flagged, and if 7619 * so enable debugging for the next modeset - and hope we catch 7620 * the culprit. 7621 */ 7622 intel_uncore_arm_unclaimed_mmio_detection(uncore); 7623 } 7624 /* 7625 * Delay re-enabling DC states by 17 ms to avoid the off->on->off 7626 * toggling overhead at and above 60 FPS. 7627 */ 7628 intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17); 7629 intel_display_rpm_put(display, state->wakeref); 7630 7631 /* 7632 * Defer the cleanup of the old state to a separate worker to not 7633 * impede the current task (userspace for blocking modesets) that 7634 * are executed inline. For out-of-line asynchronous modesets/flips, 7635 * deferring to a new worker seems overkill, but we would place a 7636 * schedule point (cond_resched()) here anyway to keep latencies 7637 * down. 7638 */ 7639 INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work); 7640 queue_work(display->wq.cleanup, &state->cleanup_work); 7641 } 7642 7643 static void intel_atomic_commit_work(struct work_struct *work) 7644 { 7645 struct intel_atomic_state *state = 7646 container_of(work, struct intel_atomic_state, base.commit_work); 7647 7648 intel_atomic_commit_tail(state); 7649 } 7650 7651 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7652 { 7653 struct intel_plane_state *old_plane_state, *new_plane_state; 7654 struct intel_plane *plane; 7655 int i; 7656 7657 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7658 new_plane_state, i) 7659 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 7660 to_intel_frontbuffer(new_plane_state->hw.fb), 7661 plane->frontbuffer_bit); 7662 } 7663 7664 static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock) 7665 { 7666 int ret; 7667 7668 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 7669 if (ret) 7670 return ret; 7671 7672 ret = intel_atomic_global_state_setup_commit(state); 7673 if (ret) 7674 return ret; 7675 7676 return 0; 7677 } 7678 7679 static int intel_atomic_swap_state(struct intel_atomic_state *state) 7680 { 7681 int ret; 7682 7683 ret = drm_atomic_helper_swap_state(&state->base, true); 7684 if (ret) 7685 return ret; 7686 7687 intel_atomic_swap_global_state(state); 7688 7689 intel_dpll_swap_state(state); 7690 7691 intel_atomic_track_fbs(state); 7692 7693 return 0; 7694 } 7695 7696 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, 7697 bool nonblock) 7698 { 7699 struct intel_display *display = to_intel_display(dev); 7700 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7701 int ret = 0; 7702 7703 state->wakeref = intel_display_rpm_get(display); 7704 7705 /* 7706 * The intel_legacy_cursor_update() fast path takes care 7707 * of avoiding the vblank waits for simple cursor 7708 * movement and flips. For cursor on/off and size changes, 7709 * we want to perform the vblank waits so that watermark 7710 * updates happen during the correct frames. Gen9+ have 7711 * double buffered watermarks and so shouldn't need this. 7712 * 7713 * Unset state->legacy_cursor_update before the call to 7714 * drm_atomic_helper_setup_commit() because otherwise 7715 * drm_atomic_helper_wait_for_flip_done() is a noop and 7716 * we get FIFO underruns because we didn't wait 7717 * for vblank. 7718 * 7719 * FIXME doing watermarks and fb cleanup from a vblank worker 7720 * (assuming we had any) would solve these problems. 7721 */ 7722 if (DISPLAY_VER(display) < 9 && state->base.legacy_cursor_update) { 7723 struct intel_crtc_state *new_crtc_state; 7724 struct intel_crtc *crtc; 7725 int i; 7726 7727 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7728 if (new_crtc_state->wm.need_postvbl_update || 7729 new_crtc_state->update_wm_post) 7730 state->base.legacy_cursor_update = false; 7731 } 7732 7733 ret = intel_atomic_prepare_commit(state); 7734 if (ret) { 7735 drm_dbg_atomic(display->drm, 7736 "Preparing state failed with %i\n", ret); 7737 intel_display_rpm_put(display, state->wakeref); 7738 return ret; 7739 } 7740 7741 ret = intel_atomic_setup_commit(state, nonblock); 7742 if (!ret) 7743 ret = intel_atomic_swap_state(state); 7744 7745 if (ret) { 7746 drm_atomic_helper_unprepare_planes(dev, &state->base); 7747 intel_display_rpm_put(display, state->wakeref); 7748 return ret; 7749 } 7750 7751 drm_atomic_state_get(&state->base); 7752 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 7753 7754 if (nonblock && state->modeset) { 7755 queue_work(display->wq.modeset, &state->base.commit_work); 7756 } else if (nonblock) { 7757 queue_work(display->wq.flip, &state->base.commit_work); 7758 } else { 7759 if (state->modeset) 7760 flush_workqueue(display->wq.modeset); 7761 intel_atomic_commit_tail(state); 7762 } 7763 7764 return 0; 7765 } 7766 7767 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 7768 { 7769 struct intel_display *display = to_intel_display(encoder); 7770 struct intel_encoder *source_encoder; 7771 u32 possible_clones = 0; 7772 7773 for_each_intel_encoder(display->drm, source_encoder) { 7774 if (encoders_cloneable(encoder, source_encoder)) 7775 possible_clones |= drm_encoder_mask(&source_encoder->base); 7776 } 7777 7778 return possible_clones; 7779 } 7780 7781 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 7782 { 7783 struct intel_display *display = to_intel_display(encoder); 7784 struct intel_crtc *crtc; 7785 u32 possible_crtcs = 0; 7786 7787 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, encoder->pipe_mask) 7788 possible_crtcs |= drm_crtc_mask(&crtc->base); 7789 7790 return possible_crtcs; 7791 } 7792 7793 static bool ilk_has_edp_a(struct intel_display *display) 7794 { 7795 if (!display->platform.mobile) 7796 return false; 7797 7798 if ((intel_de_read(display, DP_A) & DP_DETECTED) == 0) 7799 return false; 7800 7801 if (display->platform.ironlake && (intel_de_read(display, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 7802 return false; 7803 7804 return true; 7805 } 7806 7807 static bool intel_ddi_crt_present(struct intel_display *display) 7808 { 7809 if (DISPLAY_VER(display) >= 9) 7810 return false; 7811 7812 if (display->platform.haswell_ult || display->platform.broadwell_ult) 7813 return false; 7814 7815 if (HAS_PCH_LPT_H(display) && 7816 intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 7817 return false; 7818 7819 /* DDI E can't be used if DDI A requires 4 lanes */ 7820 if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 7821 return false; 7822 7823 if (!display->vbt.int_crt_support) 7824 return false; 7825 7826 return true; 7827 } 7828 7829 bool assert_port_valid(struct intel_display *display, enum port port) 7830 { 7831 return !drm_WARN(display->drm, !(DISPLAY_RUNTIME_INFO(display)->port_mask & BIT(port)), 7832 "Platform does not support port %c\n", port_name(port)); 7833 } 7834 7835 void intel_setup_outputs(struct intel_display *display) 7836 { 7837 struct intel_encoder *encoder; 7838 bool dpd_is_edp = false; 7839 7840 intel_pps_unlock_regs_wa(display); 7841 7842 if (!HAS_DISPLAY(display)) 7843 return; 7844 7845 if (HAS_DDI(display)) { 7846 if (intel_ddi_crt_present(display)) 7847 intel_crt_init(display); 7848 7849 intel_bios_for_each_encoder(display, intel_ddi_init); 7850 7851 if (display->platform.geminilake || display->platform.broxton) 7852 vlv_dsi_init(display); 7853 } else if (HAS_PCH_SPLIT(display)) { 7854 int found; 7855 7856 /* 7857 * intel_edp_init_connector() depends on this completing first, 7858 * to prevent the registration of both eDP and LVDS and the 7859 * incorrect sharing of the PPS. 7860 */ 7861 intel_lvds_init(display); 7862 intel_crt_init(display); 7863 7864 dpd_is_edp = intel_dp_is_port_edp(display, PORT_D); 7865 7866 if (ilk_has_edp_a(display)) 7867 g4x_dp_init(display, DP_A, PORT_A); 7868 7869 if (intel_de_read(display, PCH_HDMIB) & SDVO_DETECTED) { 7870 /* PCH SDVOB multiplex with HDMIB */ 7871 found = intel_sdvo_init(display, PCH_SDVOB, PORT_B); 7872 if (!found) 7873 g4x_hdmi_init(display, PCH_HDMIB, PORT_B); 7874 if (!found && (intel_de_read(display, PCH_DP_B) & DP_DETECTED)) 7875 g4x_dp_init(display, PCH_DP_B, PORT_B); 7876 } 7877 7878 if (intel_de_read(display, PCH_HDMIC) & SDVO_DETECTED) 7879 g4x_hdmi_init(display, PCH_HDMIC, PORT_C); 7880 7881 if (!dpd_is_edp && intel_de_read(display, PCH_HDMID) & SDVO_DETECTED) 7882 g4x_hdmi_init(display, PCH_HDMID, PORT_D); 7883 7884 if (intel_de_read(display, PCH_DP_C) & DP_DETECTED) 7885 g4x_dp_init(display, PCH_DP_C, PORT_C); 7886 7887 if (intel_de_read(display, PCH_DP_D) & DP_DETECTED) 7888 g4x_dp_init(display, PCH_DP_D, PORT_D); 7889 } else if (display->platform.valleyview || display->platform.cherryview) { 7890 bool has_edp, has_port; 7891 7892 if (display->platform.valleyview && display->vbt.int_crt_support) 7893 intel_crt_init(display); 7894 7895 /* 7896 * The DP_DETECTED bit is the latched state of the DDC 7897 * SDA pin at boot. However since eDP doesn't require DDC 7898 * (no way to plug in a DP->HDMI dongle) the DDC pins for 7899 * eDP ports may have been muxed to an alternate function. 7900 * Thus we can't rely on the DP_DETECTED bit alone to detect 7901 * eDP ports. Consult the VBT as well as DP_DETECTED to 7902 * detect eDP ports. 7903 * 7904 * Sadly the straps seem to be missing sometimes even for HDMI 7905 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 7906 * and VBT for the presence of the port. Additionally we can't 7907 * trust the port type the VBT declares as we've seen at least 7908 * HDMI ports that the VBT claim are DP or eDP. 7909 */ 7910 has_edp = intel_dp_is_port_edp(display, PORT_B); 7911 has_port = intel_bios_is_port_present(display, PORT_B); 7912 if (intel_de_read(display, VLV_DP_B) & DP_DETECTED || has_port) 7913 has_edp &= g4x_dp_init(display, VLV_DP_B, PORT_B); 7914 if ((intel_de_read(display, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 7915 g4x_hdmi_init(display, VLV_HDMIB, PORT_B); 7916 7917 has_edp = intel_dp_is_port_edp(display, PORT_C); 7918 has_port = intel_bios_is_port_present(display, PORT_C); 7919 if (intel_de_read(display, VLV_DP_C) & DP_DETECTED || has_port) 7920 has_edp &= g4x_dp_init(display, VLV_DP_C, PORT_C); 7921 if ((intel_de_read(display, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 7922 g4x_hdmi_init(display, VLV_HDMIC, PORT_C); 7923 7924 if (display->platform.cherryview) { 7925 /* 7926 * eDP not supported on port D, 7927 * so no need to worry about it 7928 */ 7929 has_port = intel_bios_is_port_present(display, PORT_D); 7930 if (intel_de_read(display, CHV_DP_D) & DP_DETECTED || has_port) 7931 g4x_dp_init(display, CHV_DP_D, PORT_D); 7932 if (intel_de_read(display, CHV_HDMID) & SDVO_DETECTED || has_port) 7933 g4x_hdmi_init(display, CHV_HDMID, PORT_D); 7934 } 7935 7936 vlv_dsi_init(display); 7937 } else if (display->platform.pineview) { 7938 intel_lvds_init(display); 7939 intel_crt_init(display); 7940 } else if (IS_DISPLAY_VER(display, 3, 4)) { 7941 bool found = false; 7942 7943 if (display->platform.mobile) 7944 intel_lvds_init(display); 7945 7946 intel_crt_init(display); 7947 7948 if (intel_de_read(display, GEN3_SDVOB) & SDVO_DETECTED) { 7949 drm_dbg_kms(display->drm, "probing SDVOB\n"); 7950 found = intel_sdvo_init(display, GEN3_SDVOB, PORT_B); 7951 if (!found && display->platform.g4x) { 7952 drm_dbg_kms(display->drm, 7953 "probing HDMI on SDVOB\n"); 7954 g4x_hdmi_init(display, GEN4_HDMIB, PORT_B); 7955 } 7956 7957 if (!found && display->platform.g4x) 7958 g4x_dp_init(display, DP_B, PORT_B); 7959 } 7960 7961 /* Before G4X SDVOC doesn't have its own detect register */ 7962 7963 if (intel_de_read(display, GEN3_SDVOB) & SDVO_DETECTED) { 7964 drm_dbg_kms(display->drm, "probing SDVOC\n"); 7965 found = intel_sdvo_init(display, GEN3_SDVOC, PORT_C); 7966 } 7967 7968 if (!found && (intel_de_read(display, GEN3_SDVOC) & SDVO_DETECTED)) { 7969 7970 if (display->platform.g4x) { 7971 drm_dbg_kms(display->drm, 7972 "probing HDMI on SDVOC\n"); 7973 g4x_hdmi_init(display, GEN4_HDMIC, PORT_C); 7974 } 7975 if (display->platform.g4x) 7976 g4x_dp_init(display, DP_C, PORT_C); 7977 } 7978 7979 if (display->platform.g4x && (intel_de_read(display, DP_D) & DP_DETECTED)) 7980 g4x_dp_init(display, DP_D, PORT_D); 7981 7982 if (SUPPORTS_TV(display)) 7983 intel_tv_init(display); 7984 } else if (DISPLAY_VER(display) == 2) { 7985 if (display->platform.i85x) 7986 intel_lvds_init(display); 7987 7988 intel_crt_init(display); 7989 intel_dvo_init(display); 7990 } 7991 7992 for_each_intel_encoder(display->drm, encoder) { 7993 encoder->base.possible_crtcs = 7994 intel_encoder_possible_crtcs(encoder); 7995 encoder->base.possible_clones = 7996 intel_encoder_possible_clones(encoder); 7997 } 7998 7999 intel_init_pch_refclk(display); 8000 8001 drm_helper_move_panel_connectors_to_head(display->drm); 8002 } 8003 8004 static int max_dotclock(struct intel_display *display) 8005 { 8006 int max_dotclock = display->cdclk.max_dotclk_freq; 8007 8008 if (HAS_ULTRAJOINER(display)) 8009 max_dotclock *= 4; 8010 else if (HAS_UNCOMPRESSED_JOINER(display) || HAS_BIGJOINER(display)) 8011 max_dotclock *= 2; 8012 8013 return max_dotclock; 8014 } 8015 8016 enum drm_mode_status intel_mode_valid(struct drm_device *dev, 8017 const struct drm_display_mode *mode) 8018 { 8019 struct intel_display *display = to_intel_display(dev); 8020 int hdisplay_max, htotal_max; 8021 int vdisplay_max, vtotal_max; 8022 8023 /* 8024 * Can't reject DBLSCAN here because Xorg ddxen can add piles 8025 * of DBLSCAN modes to the output's mode list when they detect 8026 * the scaling mode property on the connector. And they don't 8027 * ask the kernel to validate those modes in any way until 8028 * modeset time at which point the client gets a protocol error. 8029 * So in order to not upset those clients we silently ignore the 8030 * DBLSCAN flag on such connectors. For other connectors we will 8031 * reject modes with the DBLSCAN flag in encoder->compute_config(). 8032 * And we always reject DBLSCAN modes in connector->mode_valid() 8033 * as we never want such modes on the connector's mode list. 8034 */ 8035 8036 if (mode->vscan > 1) 8037 return MODE_NO_VSCAN; 8038 8039 if (mode->flags & DRM_MODE_FLAG_HSKEW) 8040 return MODE_H_ILLEGAL; 8041 8042 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 8043 DRM_MODE_FLAG_NCSYNC | 8044 DRM_MODE_FLAG_PCSYNC)) 8045 return MODE_HSYNC; 8046 8047 if (mode->flags & (DRM_MODE_FLAG_BCAST | 8048 DRM_MODE_FLAG_PIXMUX | 8049 DRM_MODE_FLAG_CLKDIV2)) 8050 return MODE_BAD; 8051 8052 /* 8053 * Reject clearly excessive dotclocks early to 8054 * avoid having to worry about huge integers later. 8055 */ 8056 if (mode->clock > max_dotclock(display)) 8057 return MODE_CLOCK_HIGH; 8058 8059 /* Transcoder timing limits */ 8060 if (DISPLAY_VER(display) >= 11) { 8061 hdisplay_max = 16384; 8062 vdisplay_max = 8192; 8063 htotal_max = 16384; 8064 vtotal_max = 8192; 8065 } else if (DISPLAY_VER(display) >= 9 || 8066 display->platform.broadwell || display->platform.haswell) { 8067 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 8068 vdisplay_max = 4096; 8069 htotal_max = 8192; 8070 vtotal_max = 8192; 8071 } else if (DISPLAY_VER(display) >= 3) { 8072 hdisplay_max = 4096; 8073 vdisplay_max = 4096; 8074 htotal_max = 8192; 8075 vtotal_max = 8192; 8076 } else { 8077 hdisplay_max = 2048; 8078 vdisplay_max = 2048; 8079 htotal_max = 4096; 8080 vtotal_max = 4096; 8081 } 8082 8083 if (mode->hdisplay > hdisplay_max || 8084 mode->hsync_start > htotal_max || 8085 mode->hsync_end > htotal_max || 8086 mode->htotal > htotal_max) 8087 return MODE_H_ILLEGAL; 8088 8089 if (mode->vdisplay > vdisplay_max || 8090 mode->vsync_start > vtotal_max || 8091 mode->vsync_end > vtotal_max || 8092 mode->vtotal > vtotal_max) 8093 return MODE_V_ILLEGAL; 8094 8095 /* 8096 * WM_LINETIME only goes up to (almost) 64 usec, and also 8097 * knowing that the linetime is always bounded will ease the 8098 * mind during various calculations. 8099 */ 8100 if (DIV_ROUND_UP(mode->htotal * 1000, mode->clock) > 64) 8101 return MODE_H_ILLEGAL; 8102 8103 return MODE_OK; 8104 } 8105 8106 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct intel_display *display, 8107 const struct drm_display_mode *mode) 8108 { 8109 /* 8110 * Additional transcoder timing limits, 8111 * excluding BXT/GLK DSI transcoders. 8112 */ 8113 if (DISPLAY_VER(display) >= 5) { 8114 if (mode->hdisplay < 64 || 8115 mode->htotal - mode->hdisplay < 32) 8116 return MODE_H_ILLEGAL; 8117 8118 if (mode->vtotal - mode->vdisplay < 5) 8119 return MODE_V_ILLEGAL; 8120 } else { 8121 if (mode->htotal - mode->hdisplay < 32) 8122 return MODE_H_ILLEGAL; 8123 8124 if (mode->vtotal - mode->vdisplay < 3) 8125 return MODE_V_ILLEGAL; 8126 } 8127 8128 /* 8129 * Cantiga+ cannot handle modes with a hsync front porch of 0. 8130 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 8131 */ 8132 if ((DISPLAY_VER(display) >= 5 || display->platform.g4x) && 8133 mode->hsync_start == mode->hdisplay) 8134 return MODE_H_ILLEGAL; 8135 8136 return MODE_OK; 8137 } 8138 8139 enum drm_mode_status 8140 intel_mode_valid_max_plane_size(struct intel_display *display, 8141 const struct drm_display_mode *mode, 8142 int num_joined_pipes) 8143 { 8144 int plane_width_max, plane_height_max; 8145 8146 /* 8147 * intel_mode_valid() should be 8148 * sufficient on older platforms. 8149 */ 8150 if (DISPLAY_VER(display) < 9) 8151 return MODE_OK; 8152 8153 /* 8154 * Most people will probably want a fullscreen 8155 * plane so let's not advertize modes that are 8156 * too big for that. 8157 */ 8158 if (DISPLAY_VER(display) >= 30) { 8159 plane_width_max = 6144 * num_joined_pipes; 8160 plane_height_max = 4800; 8161 } else if (DISPLAY_VER(display) >= 11) { 8162 plane_width_max = 5120 * num_joined_pipes; 8163 plane_height_max = 4320; 8164 } else { 8165 plane_width_max = 5120; 8166 plane_height_max = 4096; 8167 } 8168 8169 if (mode->hdisplay > plane_width_max) 8170 return MODE_H_ILLEGAL; 8171 8172 if (mode->vdisplay > plane_height_max) 8173 return MODE_V_ILLEGAL; 8174 8175 return MODE_OK; 8176 } 8177 8178 static const struct intel_display_funcs skl_display_funcs = { 8179 .get_pipe_config = hsw_get_pipe_config, 8180 .crtc_enable = hsw_crtc_enable, 8181 .crtc_disable = hsw_crtc_disable, 8182 .commit_modeset_enables = skl_commit_modeset_enables, 8183 .get_initial_plane_config = skl_get_initial_plane_config, 8184 .fixup_initial_plane_config = skl_fixup_initial_plane_config, 8185 }; 8186 8187 static const struct intel_display_funcs ddi_display_funcs = { 8188 .get_pipe_config = hsw_get_pipe_config, 8189 .crtc_enable = hsw_crtc_enable, 8190 .crtc_disable = hsw_crtc_disable, 8191 .commit_modeset_enables = intel_commit_modeset_enables, 8192 .get_initial_plane_config = i9xx_get_initial_plane_config, 8193 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8194 }; 8195 8196 static const struct intel_display_funcs pch_split_display_funcs = { 8197 .get_pipe_config = ilk_get_pipe_config, 8198 .crtc_enable = ilk_crtc_enable, 8199 .crtc_disable = ilk_crtc_disable, 8200 .commit_modeset_enables = intel_commit_modeset_enables, 8201 .get_initial_plane_config = i9xx_get_initial_plane_config, 8202 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8203 }; 8204 8205 static const struct intel_display_funcs vlv_display_funcs = { 8206 .get_pipe_config = i9xx_get_pipe_config, 8207 .crtc_enable = valleyview_crtc_enable, 8208 .crtc_disable = i9xx_crtc_disable, 8209 .commit_modeset_enables = intel_commit_modeset_enables, 8210 .get_initial_plane_config = i9xx_get_initial_plane_config, 8211 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8212 }; 8213 8214 static const struct intel_display_funcs i9xx_display_funcs = { 8215 .get_pipe_config = i9xx_get_pipe_config, 8216 .crtc_enable = i9xx_crtc_enable, 8217 .crtc_disable = i9xx_crtc_disable, 8218 .commit_modeset_enables = intel_commit_modeset_enables, 8219 .get_initial_plane_config = i9xx_get_initial_plane_config, 8220 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 8221 }; 8222 8223 /** 8224 * intel_init_display_hooks - initialize the display modesetting hooks 8225 * @display: display device private 8226 */ 8227 void intel_init_display_hooks(struct intel_display *display) 8228 { 8229 if (DISPLAY_VER(display) >= 9) { 8230 display->funcs.display = &skl_display_funcs; 8231 } else if (HAS_DDI(display)) { 8232 display->funcs.display = &ddi_display_funcs; 8233 } else if (HAS_PCH_SPLIT(display)) { 8234 display->funcs.display = &pch_split_display_funcs; 8235 } else if (display->platform.cherryview || 8236 display->platform.valleyview) { 8237 display->funcs.display = &vlv_display_funcs; 8238 } else { 8239 display->funcs.display = &i9xx_display_funcs; 8240 } 8241 } 8242 8243 int intel_initial_commit(struct intel_display *display) 8244 { 8245 struct drm_atomic_state *state = NULL; 8246 struct drm_modeset_acquire_ctx ctx; 8247 struct intel_crtc *crtc; 8248 int ret = 0; 8249 8250 state = drm_atomic_state_alloc(display->drm); 8251 if (!state) 8252 return -ENOMEM; 8253 8254 drm_modeset_acquire_init(&ctx, 0); 8255 8256 state->acquire_ctx = &ctx; 8257 to_intel_atomic_state(state)->internal = true; 8258 8259 retry: 8260 for_each_intel_crtc(display->drm, crtc) { 8261 struct intel_crtc_state *crtc_state = 8262 intel_atomic_get_crtc_state(state, crtc); 8263 8264 if (IS_ERR(crtc_state)) { 8265 ret = PTR_ERR(crtc_state); 8266 goto out; 8267 } 8268 8269 if (!crtc_state->hw.active) 8270 crtc_state->inherited = false; 8271 8272 if (crtc_state->hw.active) { 8273 struct intel_encoder *encoder; 8274 8275 ret = drm_atomic_add_affected_planes(state, &crtc->base); 8276 if (ret) 8277 goto out; 8278 8279 /* 8280 * FIXME hack to force a LUT update to avoid the 8281 * plane update forcing the pipe gamma on without 8282 * having a proper LUT loaded. Remove once we 8283 * have readout for pipe gamma enable. 8284 */ 8285 crtc_state->uapi.color_mgmt_changed = true; 8286 8287 for_each_intel_encoder_mask(display->drm, encoder, 8288 crtc_state->uapi.encoder_mask) { 8289 if (encoder->initial_fastset_check && 8290 !encoder->initial_fastset_check(encoder, crtc_state)) { 8291 ret = drm_atomic_add_affected_connectors(state, 8292 &crtc->base); 8293 if (ret) 8294 goto out; 8295 } 8296 } 8297 } 8298 } 8299 8300 ret = drm_atomic_commit(state); 8301 8302 out: 8303 if (ret == -EDEADLK) { 8304 drm_atomic_state_clear(state); 8305 drm_modeset_backoff(&ctx); 8306 goto retry; 8307 } 8308 8309 drm_atomic_state_put(state); 8310 8311 drm_modeset_drop_locks(&ctx); 8312 drm_modeset_acquire_fini(&ctx); 8313 8314 return ret; 8315 } 8316 8317 void i830_enable_pipe(struct intel_display *display, enum pipe pipe) 8318 { 8319 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8320 enum transcoder cpu_transcoder = (enum transcoder)pipe; 8321 /* 640x480@60Hz, ~25175 kHz */ 8322 struct dpll clock = { 8323 .m1 = 18, 8324 .m2 = 7, 8325 .p1 = 13, 8326 .p2 = 4, 8327 .n = 2, 8328 }; 8329 u32 dpll, fp; 8330 int i; 8331 8332 drm_WARN_ON(display->drm, 8333 i9xx_calc_dpll_params(48000, &clock) != 25154); 8334 8335 drm_dbg_kms(display->drm, 8336 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 8337 pipe_name(pipe), clock.vco, clock.dot); 8338 8339 fp = i9xx_dpll_compute_fp(&clock); 8340 dpll = DPLL_DVO_2X_MODE | 8341 DPLL_VGA_MODE_DIS | 8342 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 8343 PLL_P2_DIVIDE_BY_4 | 8344 PLL_REF_INPUT_DREFCLK | 8345 DPLL_VCO_ENABLE; 8346 8347 intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder), 8348 HACTIVE(640 - 1) | HTOTAL(800 - 1)); 8349 intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder), 8350 HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); 8351 intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder), 8352 HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); 8353 intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), 8354 VACTIVE(480 - 1) | VTOTAL(525 - 1)); 8355 intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), 8356 VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); 8357 intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder), 8358 VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); 8359 intel_de_write(display, PIPESRC(display, pipe), 8360 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); 8361 8362 intel_de_write(display, FP0(pipe), fp); 8363 intel_de_write(display, FP1(pipe), fp); 8364 8365 /* 8366 * Apparently we need to have VGA mode enabled prior to changing 8367 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 8368 * dividers, even though the register value does change. 8369 */ 8370 intel_de_write(display, DPLL(display, pipe), 8371 dpll & ~DPLL_VGA_MODE_DIS); 8372 intel_de_write(display, DPLL(display, pipe), dpll); 8373 8374 /* Wait for the clocks to stabilize. */ 8375 intel_de_posting_read(display, DPLL(display, pipe)); 8376 udelay(150); 8377 8378 /* The pixel multiplier can only be updated once the 8379 * DPLL is enabled and the clocks are stable. 8380 * 8381 * So write it again. 8382 */ 8383 intel_de_write(display, DPLL(display, pipe), dpll); 8384 8385 /* We do this three times for luck */ 8386 for (i = 0; i < 3 ; i++) { 8387 intel_de_write(display, DPLL(display, pipe), dpll); 8388 intel_de_posting_read(display, DPLL(display, pipe)); 8389 udelay(150); /* wait for warmup */ 8390 } 8391 8392 intel_de_write(display, TRANSCONF(display, pipe), TRANSCONF_ENABLE); 8393 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8394 8395 intel_wait_for_pipe_scanline_moving(crtc); 8396 } 8397 8398 void i830_disable_pipe(struct intel_display *display, enum pipe pipe) 8399 { 8400 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 8401 8402 drm_dbg_kms(display->drm, "disabling pipe %c due to force quirk\n", 8403 pipe_name(pipe)); 8404 8405 drm_WARN_ON(display->drm, 8406 intel_de_read(display, DSPCNTR(display, PLANE_A)) & DISP_ENABLE); 8407 drm_WARN_ON(display->drm, 8408 intel_de_read(display, DSPCNTR(display, PLANE_B)) & DISP_ENABLE); 8409 drm_WARN_ON(display->drm, 8410 intel_de_read(display, DSPCNTR(display, PLANE_C)) & DISP_ENABLE); 8411 drm_WARN_ON(display->drm, 8412 intel_de_read(display, CURCNTR(display, PIPE_A)) & MCURSOR_MODE_MASK); 8413 drm_WARN_ON(display->drm, 8414 intel_de_read(display, CURCNTR(display, PIPE_B)) & MCURSOR_MODE_MASK); 8415 8416 intel_de_write(display, TRANSCONF(display, pipe), 0); 8417 intel_de_posting_read(display, TRANSCONF(display, pipe)); 8418 8419 intel_wait_for_pipe_scanline_stopped(crtc); 8420 8421 intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS); 8422 intel_de_posting_read(display, DPLL(display, pipe)); 8423 } 8424 8425 bool intel_scanout_needs_vtd_wa(struct intel_display *display) 8426 { 8427 return IS_DISPLAY_VER(display, 6, 11) && intel_display_vtd_active(display); 8428 } 8429