1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dma-resv.h> 28 #include <linux/i2c.h> 29 #include <linux/input.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 #include <linux/string_helpers.h> 34 35 #include <drm/display/drm_dp_helper.h> 36 #include <drm/display/drm_dp_tunnel.h> 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_atomic_uapi.h> 40 #include <drm/drm_damage_helper.h> 41 #include <drm/drm_edid.h> 42 #include <drm/drm_fourcc.h> 43 #include <drm/drm_probe_helper.h> 44 #include <drm/drm_rect.h> 45 46 #include "gem/i915_gem_lmem.h" 47 #include "gem/i915_gem_object.h" 48 49 #include "g4x_dp.h" 50 #include "g4x_hdmi.h" 51 #include "hsw_ips.h" 52 #include "i915_config.h" 53 #include "i915_drv.h" 54 #include "i915_reg.h" 55 #include "i915_utils.h" 56 #include "i9xx_plane.h" 57 #include "i9xx_wm.h" 58 #include "intel_atomic.h" 59 #include "intel_atomic_plane.h" 60 #include "intel_audio.h" 61 #include "intel_bw.h" 62 #include "intel_cdclk.h" 63 #include "intel_clock_gating.h" 64 #include "intel_color.h" 65 #include "intel_crt.h" 66 #include "intel_crtc.h" 67 #include "intel_crtc_state_dump.h" 68 #include "intel_ddi.h" 69 #include "intel_de.h" 70 #include "intel_display_driver.h" 71 #include "intel_display_power.h" 72 #include "intel_display_types.h" 73 #include "intel_dmc.h" 74 #include "intel_dp.h" 75 #include "intel_dp_link_training.h" 76 #include "intel_dp_mst.h" 77 #include "intel_dp_tunnel.h" 78 #include "intel_dpll.h" 79 #include "intel_dpll_mgr.h" 80 #include "intel_dpt.h" 81 #include "intel_dpt_common.h" 82 #include "intel_drrs.h" 83 #include "intel_dsb.h" 84 #include "intel_dsi.h" 85 #include "intel_dvo.h" 86 #include "intel_fb.h" 87 #include "intel_fbc.h" 88 #include "intel_fbdev.h" 89 #include "intel_fdi.h" 90 #include "intel_fifo_underrun.h" 91 #include "intel_frontbuffer.h" 92 #include "intel_hdmi.h" 93 #include "intel_hotplug.h" 94 #include "intel_link_bw.h" 95 #include "intel_lvds.h" 96 #include "intel_lvds_regs.h" 97 #include "intel_modeset_setup.h" 98 #include "intel_modeset_verify.h" 99 #include "intel_overlay.h" 100 #include "intel_panel.h" 101 #include "intel_pch_display.h" 102 #include "intel_pch_refclk.h" 103 #include "intel_pcode.h" 104 #include "intel_pipe_crc.h" 105 #include "intel_plane_initial.h" 106 #include "intel_pmdemand.h" 107 #include "intel_pps.h" 108 #include "intel_psr.h" 109 #include "intel_psr_regs.h" 110 #include "intel_sdvo.h" 111 #include "intel_snps_phy.h" 112 #include "intel_tc.h" 113 #include "intel_tv.h" 114 #include "intel_vblank.h" 115 #include "intel_vdsc.h" 116 #include "intel_vdsc_regs.h" 117 #include "intel_vga.h" 118 #include "intel_vrr.h" 119 #include "intel_wm.h" 120 #include "skl_scaler.h" 121 #include "skl_universal_plane.h" 122 #include "skl_watermark.h" 123 #include "vlv_dsi.h" 124 #include "vlv_dsi_pll.h" 125 #include "vlv_dsi_regs.h" 126 #include "vlv_sideband.h" 127 128 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 129 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 130 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 131 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); 132 133 /* returns HPLL frequency in kHz */ 134 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 135 { 136 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 137 138 /* Obtain SKU information */ 139 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 140 CCK_FUSE_HPLL_FREQ_MASK; 141 142 return vco_freq[hpll_freq] * 1000; 143 } 144 145 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 146 const char *name, u32 reg, int ref_freq) 147 { 148 u32 val; 149 int divider; 150 151 val = vlv_cck_read(dev_priv, reg); 152 divider = val & CCK_FREQUENCY_VALUES; 153 154 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 155 (divider << CCK_FREQUENCY_STATUS_SHIFT), 156 "%s change in progress\n", name); 157 158 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 159 } 160 161 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 162 const char *name, u32 reg) 163 { 164 int hpll; 165 166 vlv_cck_get(dev_priv); 167 168 if (dev_priv->hpll_freq == 0) 169 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 170 171 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 172 173 vlv_cck_put(dev_priv); 174 175 return hpll; 176 } 177 178 void intel_update_czclk(struct drm_i915_private *dev_priv) 179 { 180 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 181 return; 182 183 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 184 CCK_CZ_CLOCK_CONTROL); 185 186 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 187 dev_priv->czclk_freq); 188 } 189 190 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 191 { 192 return (crtc_state->active_planes & 193 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 194 } 195 196 /* WA Display #0827: Gen9:all */ 197 static void 198 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 199 { 200 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 201 DUPS1_GATING_DIS | DUPS2_GATING_DIS, 202 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); 203 } 204 205 /* Wa_2006604312:icl,ehl */ 206 static void 207 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 208 bool enable) 209 { 210 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 211 DPFR_GATING_DIS, 212 enable ? DPFR_GATING_DIS : 0); 213 } 214 215 /* Wa_1604331009:icl,jsl,ehl */ 216 static void 217 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 218 bool enable) 219 { 220 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 221 CURSOR_GATING_DIS, 222 enable ? CURSOR_GATING_DIS : 0); 223 } 224 225 static bool 226 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 227 { 228 return crtc_state->master_transcoder != INVALID_TRANSCODER; 229 } 230 231 bool 232 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 233 { 234 return crtc_state->sync_mode_slaves_mask != 0; 235 } 236 237 bool 238 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 239 { 240 return is_trans_port_sync_master(crtc_state) || 241 is_trans_port_sync_slave(crtc_state); 242 } 243 244 static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state) 245 { 246 return ffs(crtc_state->bigjoiner_pipes) - 1; 247 } 248 249 u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state) 250 { 251 if (crtc_state->bigjoiner_pipes) 252 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state)); 253 else 254 return 0; 255 } 256 257 bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state) 258 { 259 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 260 261 return crtc_state->bigjoiner_pipes && 262 crtc->pipe != bigjoiner_master_pipe(crtc_state); 263 } 264 265 bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state) 266 { 267 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 268 269 return crtc_state->bigjoiner_pipes && 270 crtc->pipe == bigjoiner_master_pipe(crtc_state); 271 } 272 273 static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state) 274 { 275 return hweight8(crtc_state->bigjoiner_pipes); 276 } 277 278 struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) 279 { 280 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 281 282 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 283 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state)); 284 else 285 return to_intel_crtc(crtc_state->uapi.crtc); 286 } 287 288 static void 289 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 290 { 291 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 292 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 293 294 if (DISPLAY_VER(dev_priv) >= 4) { 295 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 296 297 /* Wait for the Pipe State to go off */ 298 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder), 299 TRANSCONF_STATE_ENABLE, 100)) 300 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); 301 } else { 302 intel_wait_for_pipe_scanline_stopped(crtc); 303 } 304 } 305 306 void assert_transcoder(struct drm_i915_private *dev_priv, 307 enum transcoder cpu_transcoder, bool state) 308 { 309 bool cur_state; 310 enum intel_display_power_domain power_domain; 311 intel_wakeref_t wakeref; 312 313 /* we keep both pipes enabled on 830 */ 314 if (IS_I830(dev_priv)) 315 state = true; 316 317 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 318 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 319 if (wakeref) { 320 u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 321 cur_state = !!(val & TRANSCONF_ENABLE); 322 323 intel_display_power_put(dev_priv, power_domain, wakeref); 324 } else { 325 cur_state = false; 326 } 327 328 I915_STATE_WARN(dev_priv, cur_state != state, 329 "transcoder %s assertion failure (expected %s, current %s)\n", 330 transcoder_name(cpu_transcoder), str_on_off(state), 331 str_on_off(cur_state)); 332 } 333 334 static void assert_plane(struct intel_plane *plane, bool state) 335 { 336 struct drm_i915_private *i915 = to_i915(plane->base.dev); 337 enum pipe pipe; 338 bool cur_state; 339 340 cur_state = plane->get_hw_state(plane, &pipe); 341 342 I915_STATE_WARN(i915, cur_state != state, 343 "%s assertion failure (expected %s, current %s)\n", 344 plane->base.name, str_on_off(state), 345 str_on_off(cur_state)); 346 } 347 348 #define assert_plane_enabled(p) assert_plane(p, true) 349 #define assert_plane_disabled(p) assert_plane(p, false) 350 351 static void assert_planes_disabled(struct intel_crtc *crtc) 352 { 353 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 354 struct intel_plane *plane; 355 356 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 357 assert_plane_disabled(plane); 358 } 359 360 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 361 struct intel_digital_port *dig_port, 362 unsigned int expected_mask) 363 { 364 u32 port_mask; 365 i915_reg_t dpll_reg; 366 367 switch (dig_port->base.port) { 368 default: 369 MISSING_CASE(dig_port->base.port); 370 fallthrough; 371 case PORT_B: 372 port_mask = DPLL_PORTB_READY_MASK; 373 dpll_reg = DPLL(0); 374 break; 375 case PORT_C: 376 port_mask = DPLL_PORTC_READY_MASK; 377 dpll_reg = DPLL(0); 378 expected_mask <<= 4; 379 break; 380 case PORT_D: 381 port_mask = DPLL_PORTD_READY_MASK; 382 dpll_reg = DPIO_PHY_STATUS; 383 break; 384 } 385 386 if (intel_de_wait_for_register(dev_priv, dpll_reg, 387 port_mask, expected_mask, 1000)) 388 drm_WARN(&dev_priv->drm, 1, 389 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 390 dig_port->base.base.base.id, dig_port->base.base.name, 391 intel_de_read(dev_priv, dpll_reg) & port_mask, 392 expected_mask); 393 } 394 395 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 396 { 397 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 398 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 399 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 400 enum pipe pipe = crtc->pipe; 401 u32 val; 402 403 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 404 405 assert_planes_disabled(crtc); 406 407 /* 408 * A pipe without a PLL won't actually be able to drive bits from 409 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 410 * need the check. 411 */ 412 if (HAS_GMCH(dev_priv)) { 413 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 414 assert_dsi_pll_enabled(dev_priv); 415 else 416 assert_pll_enabled(dev_priv, pipe); 417 } else { 418 if (new_crtc_state->has_pch_encoder) { 419 /* if driving the PCH, we need FDI enabled */ 420 assert_fdi_rx_pll_enabled(dev_priv, 421 intel_crtc_pch_transcoder(crtc)); 422 assert_fdi_tx_pll_enabled(dev_priv, 423 (enum pipe) cpu_transcoder); 424 } 425 /* FIXME: assert CPU port conditions for SNB+ */ 426 } 427 428 /* Wa_22012358565:adl-p */ 429 if (DISPLAY_VER(dev_priv) == 13) 430 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 431 0, PIPE_ARB_USE_PROG_SLOTS); 432 433 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 434 if (val & TRANSCONF_ENABLE) { 435 /* we keep both pipes enabled on 830 */ 436 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 437 return; 438 } 439 440 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), 441 val | TRANSCONF_ENABLE); 442 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 443 444 /* 445 * Until the pipe starts PIPEDSL reads will return a stale value, 446 * which causes an apparent vblank timestamp jump when PIPEDSL 447 * resets to its proper value. That also messes up the frame count 448 * when it's derived from the timestamps. So let's wait for the 449 * pipe to start properly before we call drm_crtc_vblank_on() 450 */ 451 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 452 intel_wait_for_pipe_scanline_moving(crtc); 453 } 454 455 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 456 { 457 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 458 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 459 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 460 enum pipe pipe = crtc->pipe; 461 u32 val; 462 463 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 464 465 /* 466 * Make sure planes won't keep trying to pump pixels to us, 467 * or we might hang the display. 468 */ 469 assert_planes_disabled(crtc); 470 471 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 472 if ((val & TRANSCONF_ENABLE) == 0) 473 return; 474 475 /* 476 * Double wide has implications for planes 477 * so best keep it disabled when not needed. 478 */ 479 if (old_crtc_state->double_wide) 480 val &= ~TRANSCONF_DOUBLE_WIDE; 481 482 /* Don't disable pipe or pipe PLLs if needed */ 483 if (!IS_I830(dev_priv)) 484 val &= ~TRANSCONF_ENABLE; 485 486 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 487 488 if (DISPLAY_VER(dev_priv) >= 12) 489 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), 490 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 491 492 if ((val & TRANSCONF_ENABLE) == 0) 493 intel_wait_for_pipe_off(old_crtc_state); 494 } 495 496 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 497 { 498 unsigned int size = 0; 499 int i; 500 501 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 502 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; 503 504 return size; 505 } 506 507 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 508 { 509 unsigned int size = 0; 510 int i; 511 512 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 513 unsigned int plane_size; 514 515 if (rem_info->plane[i].linear) 516 plane_size = rem_info->plane[i].size; 517 else 518 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; 519 520 if (plane_size == 0) 521 continue; 522 523 if (rem_info->plane_alignment) 524 size = ALIGN(size, rem_info->plane_alignment); 525 526 size += plane_size; 527 } 528 529 return size; 530 } 531 532 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 533 { 534 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 535 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 536 537 return DISPLAY_VER(dev_priv) < 4 || 538 (plane->fbc && 539 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); 540 } 541 542 /* 543 * Convert the x/y offsets into a linear offset. 544 * Only valid with 0/180 degree rotation, which is fine since linear 545 * offset is only used with linear buffers on pre-hsw and tiled buffers 546 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 547 */ 548 u32 intel_fb_xy_to_linear(int x, int y, 549 const struct intel_plane_state *state, 550 int color_plane) 551 { 552 const struct drm_framebuffer *fb = state->hw.fb; 553 unsigned int cpp = fb->format->cpp[color_plane]; 554 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; 555 556 return y * pitch + x * cpp; 557 } 558 559 /* 560 * Add the x/y offsets derived from fb->offsets[] to the user 561 * specified plane src x/y offsets. The resulting x/y offsets 562 * specify the start of scanout from the beginning of the gtt mapping. 563 */ 564 void intel_add_fb_offsets(int *x, int *y, 565 const struct intel_plane_state *state, 566 int color_plane) 567 568 { 569 *x += state->view.color_plane[color_plane].x; 570 *y += state->view.color_plane[color_plane].y; 571 } 572 573 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 574 u32 pixel_format, u64 modifier) 575 { 576 struct intel_crtc *crtc; 577 struct intel_plane *plane; 578 579 if (!HAS_DISPLAY(dev_priv)) 580 return 0; 581 582 /* 583 * We assume the primary plane for pipe A has 584 * the highest stride limits of them all, 585 * if in case pipe A is disabled, use the first pipe from pipe_mask. 586 */ 587 crtc = intel_first_crtc(dev_priv); 588 if (!crtc) 589 return 0; 590 591 plane = to_intel_plane(crtc->base.primary); 592 593 return plane->max_stride(plane, pixel_format, modifier, 594 DRM_MODE_ROTATE_0); 595 } 596 597 void intel_set_plane_visible(struct intel_crtc_state *crtc_state, 598 struct intel_plane_state *plane_state, 599 bool visible) 600 { 601 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 602 603 plane_state->uapi.visible = visible; 604 605 if (visible) 606 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 607 else 608 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 609 } 610 611 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) 612 { 613 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 614 struct drm_plane *plane; 615 616 /* 617 * Active_planes aliases if multiple "primary" or cursor planes 618 * have been used on the same (or wrong) pipe. plane_mask uses 619 * unique ids, hence we can use that to reconstruct active_planes. 620 */ 621 crtc_state->enabled_planes = 0; 622 crtc_state->active_planes = 0; 623 624 drm_for_each_plane_mask(plane, &dev_priv->drm, 625 crtc_state->uapi.plane_mask) { 626 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 627 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 628 } 629 } 630 631 void intel_plane_disable_noatomic(struct intel_crtc *crtc, 632 struct intel_plane *plane) 633 { 634 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 635 struct intel_crtc_state *crtc_state = 636 to_intel_crtc_state(crtc->base.state); 637 struct intel_plane_state *plane_state = 638 to_intel_plane_state(plane->base.state); 639 640 drm_dbg_kms(&dev_priv->drm, 641 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 642 plane->base.base.id, plane->base.name, 643 crtc->base.base.id, crtc->base.name); 644 645 intel_set_plane_visible(crtc_state, plane_state, false); 646 intel_plane_fixup_bitmasks(crtc_state); 647 crtc_state->data_rate[plane->id] = 0; 648 crtc_state->data_rate_y[plane->id] = 0; 649 crtc_state->rel_data_rate[plane->id] = 0; 650 crtc_state->rel_data_rate_y[plane->id] = 0; 651 crtc_state->min_cdclk[plane->id] = 0; 652 653 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 654 hsw_ips_disable(crtc_state)) { 655 crtc_state->ips_enabled = false; 656 intel_crtc_wait_for_next_vblank(crtc); 657 } 658 659 /* 660 * Vblank time updates from the shadow to live plane control register 661 * are blocked if the memory self-refresh mode is active at that 662 * moment. So to make sure the plane gets truly disabled, disable 663 * first the self-refresh mode. The self-refresh enable bit in turn 664 * will be checked/applied by the HW only at the next frame start 665 * event which is after the vblank start event, so we need to have a 666 * wait-for-vblank between disabling the plane and the pipe. 667 */ 668 if (HAS_GMCH(dev_priv) && 669 intel_set_memory_cxsr(dev_priv, false)) 670 intel_crtc_wait_for_next_vblank(crtc); 671 672 /* 673 * Gen2 reports pipe underruns whenever all planes are disabled. 674 * So disable underrun reporting before all the planes get disabled. 675 */ 676 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) 677 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 678 679 intel_plane_disable_arm(plane, crtc_state); 680 intel_crtc_wait_for_next_vblank(crtc); 681 } 682 683 unsigned int 684 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 685 { 686 int x = 0, y = 0; 687 688 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 689 plane_state->view.color_plane[0].offset, 0); 690 691 return y; 692 } 693 694 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 695 { 696 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 697 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 698 enum pipe pipe = crtc->pipe; 699 u32 tmp; 700 701 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 702 703 /* 704 * Display WA #1153: icl 705 * enable hardware to bypass the alpha math 706 * and rounding for per-pixel values 00 and 0xff 707 */ 708 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 709 /* 710 * Display WA # 1605353570: icl 711 * Set the pixel rounding bit to 1 for allowing 712 * passthrough of Frame buffer pixels unmodified 713 * across pipe 714 */ 715 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 716 717 /* 718 * Underrun recovery must always be disabled on display 13+. 719 * DG2 chicken bit meaning is inverted compared to other platforms. 720 */ 721 if (IS_DG2(dev_priv)) 722 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 723 else if (DISPLAY_VER(dev_priv) >= 13) 724 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 725 726 /* Wa_14010547955:dg2 */ 727 if (IS_DG2(dev_priv)) 728 tmp |= DG2_RENDER_CCSTAG_4_3_EN; 729 730 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 731 } 732 733 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 734 { 735 struct drm_crtc *crtc; 736 bool cleanup_done; 737 738 drm_for_each_crtc(crtc, &dev_priv->drm) { 739 struct drm_crtc_commit *commit; 740 spin_lock(&crtc->commit_lock); 741 commit = list_first_entry_or_null(&crtc->commit_list, 742 struct drm_crtc_commit, commit_entry); 743 cleanup_done = commit ? 744 try_wait_for_completion(&commit->cleanup_done) : true; 745 spin_unlock(&crtc->commit_lock); 746 747 if (cleanup_done) 748 continue; 749 750 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 751 752 return true; 753 } 754 755 return false; 756 } 757 758 /* 759 * Finds the encoder associated with the given CRTC. This can only be 760 * used when we know that the CRTC isn't feeding multiple encoders! 761 */ 762 struct intel_encoder * 763 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 764 const struct intel_crtc_state *crtc_state) 765 { 766 const struct drm_connector_state *connector_state; 767 const struct drm_connector *connector; 768 struct intel_encoder *encoder = NULL; 769 struct intel_crtc *master_crtc; 770 int num_encoders = 0; 771 int i; 772 773 master_crtc = intel_master_crtc(crtc_state); 774 775 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 776 if (connector_state->crtc != &master_crtc->base) 777 continue; 778 779 encoder = to_intel_encoder(connector_state->best_encoder); 780 num_encoders++; 781 } 782 783 drm_WARN(state->base.dev, num_encoders != 1, 784 "%d encoders for pipe %c\n", 785 num_encoders, pipe_name(master_crtc->pipe)); 786 787 return encoder; 788 } 789 790 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 791 { 792 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 793 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 794 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 795 enum pipe pipe = crtc->pipe; 796 int width = drm_rect_width(dst); 797 int height = drm_rect_height(dst); 798 int x = dst->x1; 799 int y = dst->y1; 800 801 if (!crtc_state->pch_pfit.enabled) 802 return; 803 804 /* Force use of hard-coded filter coefficients 805 * as some pre-programmed values are broken, 806 * e.g. x201. 807 */ 808 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 809 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 810 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 811 else 812 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 813 PF_FILTER_MED_3x3); 814 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 815 PF_WIN_XPOS(x) | PF_WIN_YPOS(y)); 816 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 817 PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height)); 818 } 819 820 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 821 { 822 if (crtc->overlay) 823 (void) intel_overlay_switch_off(crtc->overlay); 824 825 /* Let userspace switch the overlay on again. In most cases userspace 826 * has to recompute where to put it anyway. 827 */ 828 } 829 830 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 831 { 832 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 833 834 if (!crtc_state->nv12_planes) 835 return false; 836 837 /* WA Display #0827: Gen9:all */ 838 if (DISPLAY_VER(dev_priv) == 9) 839 return true; 840 841 return false; 842 } 843 844 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 845 { 846 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 847 848 /* Wa_2006604312:icl,ehl */ 849 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) 850 return true; 851 852 return false; 853 } 854 855 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 856 { 857 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 858 859 /* Wa_1604331009:icl,jsl,ehl */ 860 if (is_hdr_mode(crtc_state) && 861 crtc_state->active_planes & BIT(PLANE_CURSOR) && 862 DISPLAY_VER(dev_priv) == 11) 863 return true; 864 865 return false; 866 } 867 868 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, 869 enum pipe pipe, bool enable) 870 { 871 if (DISPLAY_VER(i915) == 9) { 872 /* 873 * "Plane N strech max must be programmed to 11b (x1) 874 * when Async flips are enabled on that plane." 875 */ 876 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 877 SKL_PLANE1_STRETCH_MAX_MASK, 878 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 879 } else { 880 /* Also needed on HSW/BDW albeit undocumented */ 881 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 882 HSW_PRI_STRETCH_MAX_MASK, 883 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 884 } 885 } 886 887 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 888 { 889 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 890 891 return crtc_state->uapi.async_flip && i915_vtd_active(i915) && 892 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); 893 } 894 895 static void intel_encoders_audio_enable(struct intel_atomic_state *state, 896 struct intel_crtc *crtc) 897 { 898 const struct intel_crtc_state *crtc_state = 899 intel_atomic_get_new_crtc_state(state, crtc); 900 const struct drm_connector_state *conn_state; 901 struct drm_connector *conn; 902 int i; 903 904 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 905 struct intel_encoder *encoder = 906 to_intel_encoder(conn_state->best_encoder); 907 908 if (conn_state->crtc != &crtc->base) 909 continue; 910 911 if (encoder->audio_enable) 912 encoder->audio_enable(encoder, crtc_state, conn_state); 913 } 914 } 915 916 static void intel_encoders_audio_disable(struct intel_atomic_state *state, 917 struct intel_crtc *crtc) 918 { 919 const struct intel_crtc_state *old_crtc_state = 920 intel_atomic_get_old_crtc_state(state, crtc); 921 const struct drm_connector_state *old_conn_state; 922 struct drm_connector *conn; 923 int i; 924 925 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 926 struct intel_encoder *encoder = 927 to_intel_encoder(old_conn_state->best_encoder); 928 929 if (old_conn_state->crtc != &crtc->base) 930 continue; 931 932 if (encoder->audio_disable) 933 encoder->audio_disable(encoder, old_crtc_state, old_conn_state); 934 } 935 } 936 937 #define is_enabling(feature, old_crtc_state, new_crtc_state) \ 938 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ 939 (new_crtc_state)->feature) 940 #define is_disabling(feature, old_crtc_state, new_crtc_state) \ 941 ((old_crtc_state)->feature && \ 942 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) 943 944 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 945 const struct intel_crtc_state *new_crtc_state) 946 { 947 if (!new_crtc_state->hw.active) 948 return false; 949 950 return is_enabling(active_planes, old_crtc_state, new_crtc_state); 951 } 952 953 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 954 const struct intel_crtc_state *new_crtc_state) 955 { 956 if (!old_crtc_state->hw.active) 957 return false; 958 959 return is_disabling(active_planes, old_crtc_state, new_crtc_state); 960 } 961 962 static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, 963 const struct intel_crtc_state *new_crtc_state) 964 { 965 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline || 966 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || 967 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || 968 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || 969 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full; 970 } 971 972 static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state, 973 const struct intel_crtc_state *new_crtc_state) 974 { 975 if (!new_crtc_state->hw.active) 976 return false; 977 978 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) || 979 (new_crtc_state->vrr.enable && 980 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 981 vrr_params_changed(old_crtc_state, new_crtc_state))); 982 } 983 984 static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state, 985 const struct intel_crtc_state *new_crtc_state) 986 { 987 if (!old_crtc_state->hw.active) 988 return false; 989 990 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) || 991 (old_crtc_state->vrr.enable && 992 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 993 vrr_params_changed(old_crtc_state, new_crtc_state))); 994 } 995 996 static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, 997 const struct intel_crtc_state *new_crtc_state) 998 { 999 if (!new_crtc_state->hw.active) 1000 return false; 1001 1002 return is_enabling(has_audio, old_crtc_state, new_crtc_state) || 1003 (new_crtc_state->has_audio && 1004 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1005 } 1006 1007 static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, 1008 const struct intel_crtc_state *new_crtc_state) 1009 { 1010 if (!old_crtc_state->hw.active) 1011 return false; 1012 1013 return is_disabling(has_audio, old_crtc_state, new_crtc_state) || 1014 (old_crtc_state->has_audio && 1015 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 1016 } 1017 1018 #undef is_disabling 1019 #undef is_enabling 1020 1021 static void intel_post_plane_update(struct intel_atomic_state *state, 1022 struct intel_crtc *crtc) 1023 { 1024 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1025 const struct intel_crtc_state *old_crtc_state = 1026 intel_atomic_get_old_crtc_state(state, crtc); 1027 const struct intel_crtc_state *new_crtc_state = 1028 intel_atomic_get_new_crtc_state(state, crtc); 1029 enum pipe pipe = crtc->pipe; 1030 1031 intel_psr_post_plane_update(state, crtc); 1032 1033 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 1034 1035 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 1036 intel_update_watermarks(dev_priv); 1037 1038 intel_fbc_post_update(state, crtc); 1039 1040 if (needs_async_flip_vtd_wa(old_crtc_state) && 1041 !needs_async_flip_vtd_wa(new_crtc_state)) 1042 intel_async_flip_vtd_wa(dev_priv, pipe, false); 1043 1044 if (needs_nv12_wa(old_crtc_state) && 1045 !needs_nv12_wa(new_crtc_state)) 1046 skl_wa_827(dev_priv, pipe, false); 1047 1048 if (needs_scalerclk_wa(old_crtc_state) && 1049 !needs_scalerclk_wa(new_crtc_state)) 1050 icl_wa_scalerclkgating(dev_priv, pipe, false); 1051 1052 if (needs_cursorclk_wa(old_crtc_state) && 1053 !needs_cursorclk_wa(new_crtc_state)) 1054 icl_wa_cursorclkgating(dev_priv, pipe, false); 1055 1056 if (intel_crtc_needs_color_update(new_crtc_state)) 1057 intel_color_post_update(new_crtc_state); 1058 1059 if (audio_enabling(old_crtc_state, new_crtc_state)) 1060 intel_encoders_audio_enable(state, crtc); 1061 } 1062 1063 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 1064 struct intel_crtc *crtc) 1065 { 1066 const struct intel_crtc_state *crtc_state = 1067 intel_atomic_get_new_crtc_state(state, crtc); 1068 u8 update_planes = crtc_state->update_planes; 1069 const struct intel_plane_state __maybe_unused *plane_state; 1070 struct intel_plane *plane; 1071 int i; 1072 1073 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1074 if (plane->pipe == crtc->pipe && 1075 update_planes & BIT(plane->id)) 1076 plane->enable_flip_done(plane); 1077 } 1078 } 1079 1080 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 1081 struct intel_crtc *crtc) 1082 { 1083 const struct intel_crtc_state *crtc_state = 1084 intel_atomic_get_new_crtc_state(state, crtc); 1085 u8 update_planes = crtc_state->update_planes; 1086 const struct intel_plane_state __maybe_unused *plane_state; 1087 struct intel_plane *plane; 1088 int i; 1089 1090 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1091 if (plane->pipe == crtc->pipe && 1092 update_planes & BIT(plane->id)) 1093 plane->disable_flip_done(plane); 1094 } 1095 } 1096 1097 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 1098 struct intel_crtc *crtc) 1099 { 1100 const struct intel_crtc_state *old_crtc_state = 1101 intel_atomic_get_old_crtc_state(state, crtc); 1102 const struct intel_crtc_state *new_crtc_state = 1103 intel_atomic_get_new_crtc_state(state, crtc); 1104 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & 1105 ~new_crtc_state->async_flip_planes; 1106 const struct intel_plane_state *old_plane_state; 1107 struct intel_plane *plane; 1108 bool need_vbl_wait = false; 1109 int i; 1110 1111 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1112 if (plane->need_async_flip_disable_wa && 1113 plane->pipe == crtc->pipe && 1114 disable_async_flip_planes & BIT(plane->id)) { 1115 /* 1116 * Apart from the async flip bit we want to 1117 * preserve the old state for the plane. 1118 */ 1119 plane->async_flip(plane, old_crtc_state, 1120 old_plane_state, false); 1121 need_vbl_wait = true; 1122 } 1123 } 1124 1125 if (need_vbl_wait) 1126 intel_crtc_wait_for_next_vblank(crtc); 1127 } 1128 1129 static void intel_pre_plane_update(struct intel_atomic_state *state, 1130 struct intel_crtc *crtc) 1131 { 1132 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1133 const struct intel_crtc_state *old_crtc_state = 1134 intel_atomic_get_old_crtc_state(state, crtc); 1135 const struct intel_crtc_state *new_crtc_state = 1136 intel_atomic_get_new_crtc_state(state, crtc); 1137 enum pipe pipe = crtc->pipe; 1138 1139 if (vrr_disabling(old_crtc_state, new_crtc_state)) { 1140 intel_vrr_disable(old_crtc_state); 1141 intel_crtc_update_active_timings(old_crtc_state, false); 1142 } 1143 1144 if (audio_disabling(old_crtc_state, new_crtc_state)) 1145 intel_encoders_audio_disable(state, crtc); 1146 1147 intel_drrs_deactivate(old_crtc_state); 1148 1149 intel_psr_pre_plane_update(state, crtc); 1150 1151 if (hsw_ips_pre_update(state, crtc)) 1152 intel_crtc_wait_for_next_vblank(crtc); 1153 1154 if (intel_fbc_pre_update(state, crtc)) 1155 intel_crtc_wait_for_next_vblank(crtc); 1156 1157 if (!needs_async_flip_vtd_wa(old_crtc_state) && 1158 needs_async_flip_vtd_wa(new_crtc_state)) 1159 intel_async_flip_vtd_wa(dev_priv, pipe, true); 1160 1161 /* Display WA 827 */ 1162 if (!needs_nv12_wa(old_crtc_state) && 1163 needs_nv12_wa(new_crtc_state)) 1164 skl_wa_827(dev_priv, pipe, true); 1165 1166 /* Wa_2006604312:icl,ehl */ 1167 if (!needs_scalerclk_wa(old_crtc_state) && 1168 needs_scalerclk_wa(new_crtc_state)) 1169 icl_wa_scalerclkgating(dev_priv, pipe, true); 1170 1171 /* Wa_1604331009:icl,jsl,ehl */ 1172 if (!needs_cursorclk_wa(old_crtc_state) && 1173 needs_cursorclk_wa(new_crtc_state)) 1174 icl_wa_cursorclkgating(dev_priv, pipe, true); 1175 1176 /* 1177 * Vblank time updates from the shadow to live plane control register 1178 * are blocked if the memory self-refresh mode is active at that 1179 * moment. So to make sure the plane gets truly disabled, disable 1180 * first the self-refresh mode. The self-refresh enable bit in turn 1181 * will be checked/applied by the HW only at the next frame start 1182 * event which is after the vblank start event, so we need to have a 1183 * wait-for-vblank between disabling the plane and the pipe. 1184 */ 1185 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1186 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 1187 intel_crtc_wait_for_next_vblank(crtc); 1188 1189 /* 1190 * IVB workaround: must disable low power watermarks for at least 1191 * one frame before enabling scaling. LP watermarks can be re-enabled 1192 * when scaling is disabled. 1193 * 1194 * WaCxSRDisabledForSpriteScaling:ivb 1195 */ 1196 if (old_crtc_state->hw.active && 1197 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 1198 intel_crtc_wait_for_next_vblank(crtc); 1199 1200 /* 1201 * If we're doing a modeset we don't need to do any 1202 * pre-vblank watermark programming here. 1203 */ 1204 if (!intel_crtc_needs_modeset(new_crtc_state)) { 1205 /* 1206 * For platforms that support atomic watermarks, program the 1207 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1208 * will be the intermediate values that are safe for both pre- and 1209 * post- vblank; when vblank happens, the 'active' values will be set 1210 * to the final 'target' values and we'll do this again to get the 1211 * optimal watermarks. For gen9+ platforms, the values we program here 1212 * will be the final target values which will get automatically latched 1213 * at vblank time; no further programming will be necessary. 1214 * 1215 * If a platform hasn't been transitioned to atomic watermarks yet, 1216 * we'll continue to update watermarks the old way, if flags tell 1217 * us to. 1218 */ 1219 if (!intel_initial_watermarks(state, crtc)) 1220 if (new_crtc_state->update_wm_pre) 1221 intel_update_watermarks(dev_priv); 1222 } 1223 1224 /* 1225 * Gen2 reports pipe underruns whenever all planes are disabled. 1226 * So disable underrun reporting before all the planes get disabled. 1227 * 1228 * We do this after .initial_watermarks() so that we have a 1229 * chance of catching underruns with the intermediate watermarks 1230 * vs. the old plane configuration. 1231 */ 1232 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1233 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1234 1235 /* 1236 * WA for platforms where async address update enable bit 1237 * is double buffered and only latched at start of vblank. 1238 */ 1239 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) 1240 intel_crtc_async_flip_disable_wa(state, crtc); 1241 } 1242 1243 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1244 struct intel_crtc *crtc) 1245 { 1246 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1247 const struct intel_crtc_state *new_crtc_state = 1248 intel_atomic_get_new_crtc_state(state, crtc); 1249 unsigned int update_mask = new_crtc_state->update_planes; 1250 const struct intel_plane_state *old_plane_state; 1251 struct intel_plane *plane; 1252 unsigned fb_bits = 0; 1253 int i; 1254 1255 intel_crtc_dpms_overlay_disable(crtc); 1256 1257 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1258 if (crtc->pipe != plane->pipe || 1259 !(update_mask & BIT(plane->id))) 1260 continue; 1261 1262 intel_plane_disable_arm(plane, new_crtc_state); 1263 1264 if (old_plane_state->uapi.visible) 1265 fb_bits |= plane->frontbuffer_bit; 1266 } 1267 1268 intel_frontbuffer_flip(dev_priv, fb_bits); 1269 } 1270 1271 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1272 { 1273 struct drm_i915_private *i915 = to_i915(state->base.dev); 1274 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1275 struct intel_crtc *crtc; 1276 int i; 1277 1278 /* 1279 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 1280 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 1281 */ 1282 if (i915->display.dpll.mgr) { 1283 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1284 if (intel_crtc_needs_modeset(new_crtc_state)) 1285 continue; 1286 1287 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 1288 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1289 } 1290 } 1291 } 1292 1293 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1294 struct intel_crtc *crtc) 1295 { 1296 const struct intel_crtc_state *crtc_state = 1297 intel_atomic_get_new_crtc_state(state, crtc); 1298 const struct drm_connector_state *conn_state; 1299 struct drm_connector *conn; 1300 int i; 1301 1302 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1303 struct intel_encoder *encoder = 1304 to_intel_encoder(conn_state->best_encoder); 1305 1306 if (conn_state->crtc != &crtc->base) 1307 continue; 1308 1309 if (encoder->pre_pll_enable) 1310 encoder->pre_pll_enable(state, encoder, 1311 crtc_state, conn_state); 1312 } 1313 } 1314 1315 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1316 struct intel_crtc *crtc) 1317 { 1318 const struct intel_crtc_state *crtc_state = 1319 intel_atomic_get_new_crtc_state(state, crtc); 1320 const struct drm_connector_state *conn_state; 1321 struct drm_connector *conn; 1322 int i; 1323 1324 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1325 struct intel_encoder *encoder = 1326 to_intel_encoder(conn_state->best_encoder); 1327 1328 if (conn_state->crtc != &crtc->base) 1329 continue; 1330 1331 if (encoder->pre_enable) 1332 encoder->pre_enable(state, encoder, 1333 crtc_state, conn_state); 1334 } 1335 } 1336 1337 static void intel_encoders_enable(struct intel_atomic_state *state, 1338 struct intel_crtc *crtc) 1339 { 1340 const struct intel_crtc_state *crtc_state = 1341 intel_atomic_get_new_crtc_state(state, crtc); 1342 const struct drm_connector_state *conn_state; 1343 struct drm_connector *conn; 1344 int i; 1345 1346 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1347 struct intel_encoder *encoder = 1348 to_intel_encoder(conn_state->best_encoder); 1349 1350 if (conn_state->crtc != &crtc->base) 1351 continue; 1352 1353 if (encoder->enable) 1354 encoder->enable(state, encoder, 1355 crtc_state, conn_state); 1356 intel_opregion_notify_encoder(encoder, true); 1357 } 1358 } 1359 1360 static void intel_encoders_disable(struct intel_atomic_state *state, 1361 struct intel_crtc *crtc) 1362 { 1363 const struct intel_crtc_state *old_crtc_state = 1364 intel_atomic_get_old_crtc_state(state, crtc); 1365 const struct drm_connector_state *old_conn_state; 1366 struct drm_connector *conn; 1367 int i; 1368 1369 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1370 struct intel_encoder *encoder = 1371 to_intel_encoder(old_conn_state->best_encoder); 1372 1373 if (old_conn_state->crtc != &crtc->base) 1374 continue; 1375 1376 intel_opregion_notify_encoder(encoder, false); 1377 if (encoder->disable) 1378 encoder->disable(state, encoder, 1379 old_crtc_state, old_conn_state); 1380 } 1381 } 1382 1383 static void intel_encoders_post_disable(struct intel_atomic_state *state, 1384 struct intel_crtc *crtc) 1385 { 1386 const struct intel_crtc_state *old_crtc_state = 1387 intel_atomic_get_old_crtc_state(state, crtc); 1388 const struct drm_connector_state *old_conn_state; 1389 struct drm_connector *conn; 1390 int i; 1391 1392 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1393 struct intel_encoder *encoder = 1394 to_intel_encoder(old_conn_state->best_encoder); 1395 1396 if (old_conn_state->crtc != &crtc->base) 1397 continue; 1398 1399 if (encoder->post_disable) 1400 encoder->post_disable(state, encoder, 1401 old_crtc_state, old_conn_state); 1402 } 1403 } 1404 1405 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1406 struct intel_crtc *crtc) 1407 { 1408 const struct intel_crtc_state *old_crtc_state = 1409 intel_atomic_get_old_crtc_state(state, crtc); 1410 const struct drm_connector_state *old_conn_state; 1411 struct drm_connector *conn; 1412 int i; 1413 1414 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1415 struct intel_encoder *encoder = 1416 to_intel_encoder(old_conn_state->best_encoder); 1417 1418 if (old_conn_state->crtc != &crtc->base) 1419 continue; 1420 1421 if (encoder->post_pll_disable) 1422 encoder->post_pll_disable(state, encoder, 1423 old_crtc_state, old_conn_state); 1424 } 1425 } 1426 1427 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1428 struct intel_crtc *crtc) 1429 { 1430 const struct intel_crtc_state *crtc_state = 1431 intel_atomic_get_new_crtc_state(state, crtc); 1432 const struct drm_connector_state *conn_state; 1433 struct drm_connector *conn; 1434 int i; 1435 1436 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1437 struct intel_encoder *encoder = 1438 to_intel_encoder(conn_state->best_encoder); 1439 1440 if (conn_state->crtc != &crtc->base) 1441 continue; 1442 1443 if (encoder->update_pipe) 1444 encoder->update_pipe(state, encoder, 1445 crtc_state, conn_state); 1446 } 1447 } 1448 1449 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 1450 { 1451 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1452 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1453 1454 plane->disable_arm(plane, crtc_state); 1455 } 1456 1457 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1458 { 1459 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1460 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1461 1462 if (crtc_state->has_pch_encoder) { 1463 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1464 &crtc_state->fdi_m_n); 1465 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1466 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1467 &crtc_state->dp_m_n); 1468 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1469 &crtc_state->dp_m2_n2); 1470 } 1471 1472 intel_set_transcoder_timings(crtc_state); 1473 1474 ilk_set_pipeconf(crtc_state); 1475 } 1476 1477 static void ilk_crtc_enable(struct intel_atomic_state *state, 1478 struct intel_crtc *crtc) 1479 { 1480 const struct intel_crtc_state *new_crtc_state = 1481 intel_atomic_get_new_crtc_state(state, crtc); 1482 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1483 enum pipe pipe = crtc->pipe; 1484 1485 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1486 return; 1487 1488 /* 1489 * Sometimes spurious CPU pipe underruns happen during FDI 1490 * training, at least with VGA+HDMI cloning. Suppress them. 1491 * 1492 * On ILK we get an occasional spurious CPU pipe underruns 1493 * between eDP port A enable and vdd enable. Also PCH port 1494 * enable seems to result in the occasional CPU pipe underrun. 1495 * 1496 * Spurious PCH underruns also occur during PCH enabling. 1497 */ 1498 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1499 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1500 1501 ilk_configure_cpu_transcoder(new_crtc_state); 1502 1503 intel_set_pipe_src_size(new_crtc_state); 1504 1505 crtc->active = true; 1506 1507 intel_encoders_pre_enable(state, crtc); 1508 1509 if (new_crtc_state->has_pch_encoder) { 1510 ilk_pch_pre_enable(state, crtc); 1511 } else { 1512 assert_fdi_tx_disabled(dev_priv, pipe); 1513 assert_fdi_rx_disabled(dev_priv, pipe); 1514 } 1515 1516 ilk_pfit_enable(new_crtc_state); 1517 1518 /* 1519 * On ILK+ LUT must be loaded before the pipe is running but with 1520 * clocks enabled 1521 */ 1522 intel_color_load_luts(new_crtc_state); 1523 intel_color_commit_noarm(new_crtc_state); 1524 intel_color_commit_arm(new_crtc_state); 1525 /* update DSPCNTR to configure gamma for pipe bottom color */ 1526 intel_disable_primary_plane(new_crtc_state); 1527 1528 intel_initial_watermarks(state, crtc); 1529 intel_enable_transcoder(new_crtc_state); 1530 1531 if (new_crtc_state->has_pch_encoder) 1532 ilk_pch_enable(state, crtc); 1533 1534 intel_crtc_vblank_on(new_crtc_state); 1535 1536 intel_encoders_enable(state, crtc); 1537 1538 if (HAS_PCH_CPT(dev_priv)) 1539 intel_wait_for_pipe_scanline_moving(crtc); 1540 1541 /* 1542 * Must wait for vblank to avoid spurious PCH FIFO underruns. 1543 * And a second vblank wait is needed at least on ILK with 1544 * some interlaced HDMI modes. Let's do the double wait always 1545 * in case there are more corner cases we don't know about. 1546 */ 1547 if (new_crtc_state->has_pch_encoder) { 1548 intel_crtc_wait_for_next_vblank(crtc); 1549 intel_crtc_wait_for_next_vblank(crtc); 1550 } 1551 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1552 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1553 } 1554 1555 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 1556 enum pipe pipe, bool apply) 1557 { 1558 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); 1559 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1560 1561 if (apply) 1562 val |= mask; 1563 else 1564 val &= ~mask; 1565 1566 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); 1567 } 1568 1569 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1570 { 1571 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1572 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1573 1574 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 1575 HSW_LINETIME(crtc_state->linetime) | 1576 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1577 } 1578 1579 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1580 { 1581 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1582 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1583 1584 intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder), 1585 HSW_FRAME_START_DELAY_MASK, 1586 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1587 } 1588 1589 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, 1590 const struct intel_crtc_state *crtc_state) 1591 { 1592 struct intel_crtc *master_crtc = intel_master_crtc(crtc_state); 1593 1594 /* 1595 * Enable sequence steps 1-7 on bigjoiner master 1596 */ 1597 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 1598 intel_encoders_pre_pll_enable(state, master_crtc); 1599 1600 if (crtc_state->shared_dpll) 1601 intel_enable_shared_dpll(crtc_state); 1602 1603 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 1604 intel_encoders_pre_enable(state, master_crtc); 1605 } 1606 1607 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1608 { 1609 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1610 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1611 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1612 1613 if (crtc_state->has_pch_encoder) { 1614 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1615 &crtc_state->fdi_m_n); 1616 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1617 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1618 &crtc_state->dp_m_n); 1619 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1620 &crtc_state->dp_m2_n2); 1621 } 1622 1623 intel_set_transcoder_timings(crtc_state); 1624 if (HAS_VRR(dev_priv)) 1625 intel_vrr_set_transcoder_timings(crtc_state); 1626 1627 if (cpu_transcoder != TRANSCODER_EDP) 1628 intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder), 1629 crtc_state->pixel_multiplier - 1); 1630 1631 hsw_set_frame_start_delay(crtc_state); 1632 1633 hsw_set_transconf(crtc_state); 1634 } 1635 1636 static void hsw_crtc_enable(struct intel_atomic_state *state, 1637 struct intel_crtc *crtc) 1638 { 1639 const struct intel_crtc_state *new_crtc_state = 1640 intel_atomic_get_new_crtc_state(state, crtc); 1641 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1642 enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 1643 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1644 bool psl_clkgate_wa; 1645 1646 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1647 return; 1648 1649 intel_dmc_enable_pipe(dev_priv, crtc->pipe); 1650 1651 if (!new_crtc_state->bigjoiner_pipes) { 1652 intel_encoders_pre_pll_enable(state, crtc); 1653 1654 if (new_crtc_state->shared_dpll) 1655 intel_enable_shared_dpll(new_crtc_state); 1656 1657 intel_encoders_pre_enable(state, crtc); 1658 } else { 1659 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); 1660 } 1661 1662 intel_dsc_enable(new_crtc_state); 1663 1664 if (DISPLAY_VER(dev_priv) >= 13) 1665 intel_uncompressed_joiner_enable(new_crtc_state); 1666 1667 intel_set_pipe_src_size(new_crtc_state); 1668 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 1669 bdw_set_pipe_misc(new_crtc_state); 1670 1671 if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && 1672 !transcoder_is_dsi(cpu_transcoder)) 1673 hsw_configure_cpu_transcoder(new_crtc_state); 1674 1675 crtc->active = true; 1676 1677 /* Display WA #1180: WaDisableScalarClockGating: glk */ 1678 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 && 1679 new_crtc_state->pch_pfit.enabled; 1680 if (psl_clkgate_wa) 1681 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 1682 1683 if (DISPLAY_VER(dev_priv) >= 9) 1684 skl_pfit_enable(new_crtc_state); 1685 else 1686 ilk_pfit_enable(new_crtc_state); 1687 1688 /* 1689 * On ILK+ LUT must be loaded before the pipe is running but with 1690 * clocks enabled 1691 */ 1692 intel_color_load_luts(new_crtc_state); 1693 intel_color_commit_noarm(new_crtc_state); 1694 intel_color_commit_arm(new_crtc_state); 1695 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 1696 if (DISPLAY_VER(dev_priv) < 9) 1697 intel_disable_primary_plane(new_crtc_state); 1698 1699 hsw_set_linetime_wm(new_crtc_state); 1700 1701 if (DISPLAY_VER(dev_priv) >= 11) 1702 icl_set_pipe_chicken(new_crtc_state); 1703 1704 intel_initial_watermarks(state, crtc); 1705 1706 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 1707 intel_crtc_vblank_on(new_crtc_state); 1708 1709 intel_encoders_enable(state, crtc); 1710 1711 if (psl_clkgate_wa) { 1712 intel_crtc_wait_for_next_vblank(crtc); 1713 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 1714 } 1715 1716 /* If we change the relative order between pipe/planes enabling, we need 1717 * to change the workaround. */ 1718 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 1719 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 1720 struct intel_crtc *wa_crtc; 1721 1722 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); 1723 1724 intel_crtc_wait_for_next_vblank(wa_crtc); 1725 intel_crtc_wait_for_next_vblank(wa_crtc); 1726 } 1727 } 1728 1729 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 1730 { 1731 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1732 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1733 enum pipe pipe = crtc->pipe; 1734 1735 /* To avoid upsetting the power well on haswell only disable the pfit if 1736 * it's in use. The hw state code will make sure we get this right. */ 1737 if (!old_crtc_state->pch_pfit.enabled) 1738 return; 1739 1740 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0); 1741 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0); 1742 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0); 1743 } 1744 1745 static void ilk_crtc_disable(struct intel_atomic_state *state, 1746 struct intel_crtc *crtc) 1747 { 1748 const struct intel_crtc_state *old_crtc_state = 1749 intel_atomic_get_old_crtc_state(state, crtc); 1750 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1751 enum pipe pipe = crtc->pipe; 1752 1753 /* 1754 * Sometimes spurious CPU pipe underruns happen when the 1755 * pipe is already disabled, but FDI RX/TX is still enabled. 1756 * Happens at least with VGA+HDMI cloning. Suppress them. 1757 */ 1758 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1759 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1760 1761 intel_encoders_disable(state, crtc); 1762 1763 intel_crtc_vblank_off(old_crtc_state); 1764 1765 intel_disable_transcoder(old_crtc_state); 1766 1767 ilk_pfit_disable(old_crtc_state); 1768 1769 if (old_crtc_state->has_pch_encoder) 1770 ilk_pch_disable(state, crtc); 1771 1772 intel_encoders_post_disable(state, crtc); 1773 1774 if (old_crtc_state->has_pch_encoder) 1775 ilk_pch_post_disable(state, crtc); 1776 1777 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1778 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1779 1780 intel_disable_shared_dpll(old_crtc_state); 1781 } 1782 1783 static void hsw_crtc_disable(struct intel_atomic_state *state, 1784 struct intel_crtc *crtc) 1785 { 1786 const struct intel_crtc_state *old_crtc_state = 1787 intel_atomic_get_old_crtc_state(state, crtc); 1788 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1789 1790 /* 1791 * FIXME collapse everything to one hook. 1792 * Need care with mst->ddi interactions. 1793 */ 1794 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { 1795 intel_encoders_disable(state, crtc); 1796 intel_encoders_post_disable(state, crtc); 1797 } 1798 1799 intel_disable_shared_dpll(old_crtc_state); 1800 1801 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { 1802 struct intel_crtc *slave_crtc; 1803 1804 intel_encoders_post_pll_disable(state, crtc); 1805 1806 intel_dmc_disable_pipe(i915, crtc->pipe); 1807 1808 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 1809 intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) 1810 intel_dmc_disable_pipe(i915, slave_crtc->pipe); 1811 } 1812 } 1813 1814 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 1815 { 1816 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1817 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1818 1819 if (!crtc_state->gmch_pfit.control) 1820 return; 1821 1822 /* 1823 * The panel fitter should only be adjusted whilst the pipe is disabled, 1824 * according to register description and PRM. 1825 */ 1826 drm_WARN_ON(&dev_priv->drm, 1827 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); 1828 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 1829 1830 intel_de_write(dev_priv, PFIT_PGM_RATIOS, 1831 crtc_state->gmch_pfit.pgm_ratios); 1832 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); 1833 1834 /* Border color in case we don't scale up to the full screen. Black by 1835 * default, change to something else for debugging. */ 1836 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); 1837 } 1838 1839 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 1840 { 1841 if (phy == PHY_NONE) 1842 return false; 1843 else if (IS_ALDERLAKE_S(dev_priv)) 1844 return phy <= PHY_E; 1845 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 1846 return phy <= PHY_D; 1847 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) 1848 return phy <= PHY_C; 1849 else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12)) 1850 return phy <= PHY_B; 1851 else 1852 /* 1853 * DG2 outputs labelled as "combo PHY" in the bspec use 1854 * SNPS PHYs with completely different programming, 1855 * hence we always return false here. 1856 */ 1857 return false; 1858 } 1859 1860 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 1861 { 1862 /* 1863 * DG2's "TC1", although TC-capable output, doesn't share the same flow 1864 * as other platforms on the display engine side and rather rely on the 1865 * SNPS PHY, that is programmed separately 1866 */ 1867 if (IS_DG2(dev_priv)) 1868 return false; 1869 1870 if (DISPLAY_VER(dev_priv) >= 13) 1871 return phy >= PHY_F && phy <= PHY_I; 1872 else if (IS_TIGERLAKE(dev_priv)) 1873 return phy >= PHY_D && phy <= PHY_I; 1874 else if (IS_ICELAKE(dev_priv)) 1875 return phy >= PHY_C && phy <= PHY_F; 1876 1877 return false; 1878 } 1879 1880 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) 1881 { 1882 /* 1883 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port 1884 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). 1885 */ 1886 return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E; 1887 } 1888 1889 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 1890 { 1891 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) 1892 return PHY_D + port - PORT_D_XELPD; 1893 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) 1894 return PHY_F + port - PORT_TC1; 1895 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) 1896 return PHY_B + port - PORT_TC1; 1897 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) 1898 return PHY_C + port - PORT_TC1; 1899 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 1900 port == PORT_D) 1901 return PHY_A; 1902 1903 return PHY_A + port - PORT_A; 1904 } 1905 1906 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 1907 { 1908 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 1909 return TC_PORT_NONE; 1910 1911 if (DISPLAY_VER(dev_priv) >= 12) 1912 return TC_PORT_1 + port - PORT_TC1; 1913 else 1914 return TC_PORT_1 + port - PORT_C; 1915 } 1916 1917 enum intel_display_power_domain 1918 intel_aux_power_domain(struct intel_digital_port *dig_port) 1919 { 1920 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 1921 1922 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 1923 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); 1924 1925 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 1926 } 1927 1928 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, 1929 struct intel_power_domain_mask *mask) 1930 { 1931 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1932 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1933 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1934 struct drm_encoder *encoder; 1935 enum pipe pipe = crtc->pipe; 1936 1937 bitmap_zero(mask->bits, POWER_DOMAIN_NUM); 1938 1939 if (!crtc_state->hw.active) 1940 return; 1941 1942 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); 1943 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); 1944 if (crtc_state->pch_pfit.enabled || 1945 crtc_state->pch_pfit.force_thru) 1946 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); 1947 1948 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 1949 crtc_state->uapi.encoder_mask) { 1950 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 1951 1952 set_bit(intel_encoder->power_domain, mask->bits); 1953 } 1954 1955 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 1956 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); 1957 1958 if (crtc_state->shared_dpll) 1959 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); 1960 1961 if (crtc_state->dsc.compression_enable) 1962 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); 1963 } 1964 1965 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, 1966 struct intel_power_domain_mask *old_domains) 1967 { 1968 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1969 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1970 enum intel_display_power_domain domain; 1971 struct intel_power_domain_mask domains, new_domains; 1972 1973 get_crtc_power_domains(crtc_state, &domains); 1974 1975 bitmap_andnot(new_domains.bits, 1976 domains.bits, 1977 crtc->enabled_power_domains.mask.bits, 1978 POWER_DOMAIN_NUM); 1979 bitmap_andnot(old_domains->bits, 1980 crtc->enabled_power_domains.mask.bits, 1981 domains.bits, 1982 POWER_DOMAIN_NUM); 1983 1984 for_each_power_domain(domain, &new_domains) 1985 intel_display_power_get_in_set(dev_priv, 1986 &crtc->enabled_power_domains, 1987 domain); 1988 } 1989 1990 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, 1991 struct intel_power_domain_mask *domains) 1992 { 1993 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), 1994 &crtc->enabled_power_domains, 1995 domains); 1996 } 1997 1998 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1999 { 2000 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2001 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2002 2003 if (intel_crtc_has_dp_encoder(crtc_state)) { 2004 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 2005 &crtc_state->dp_m_n); 2006 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 2007 &crtc_state->dp_m2_n2); 2008 } 2009 2010 intel_set_transcoder_timings(crtc_state); 2011 2012 i9xx_set_pipeconf(crtc_state); 2013 } 2014 2015 static void valleyview_crtc_enable(struct intel_atomic_state *state, 2016 struct intel_crtc *crtc) 2017 { 2018 const struct intel_crtc_state *new_crtc_state = 2019 intel_atomic_get_new_crtc_state(state, crtc); 2020 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2021 enum pipe pipe = crtc->pipe; 2022 2023 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2024 return; 2025 2026 i9xx_configure_cpu_transcoder(new_crtc_state); 2027 2028 intel_set_pipe_src_size(new_crtc_state); 2029 2030 intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); 2031 2032 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 2033 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); 2034 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); 2035 } 2036 2037 crtc->active = true; 2038 2039 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2040 2041 intel_encoders_pre_pll_enable(state, crtc); 2042 2043 if (IS_CHERRYVIEW(dev_priv)) 2044 chv_enable_pll(new_crtc_state); 2045 else 2046 vlv_enable_pll(new_crtc_state); 2047 2048 intel_encoders_pre_enable(state, crtc); 2049 2050 i9xx_pfit_enable(new_crtc_state); 2051 2052 intel_color_load_luts(new_crtc_state); 2053 intel_color_commit_noarm(new_crtc_state); 2054 intel_color_commit_arm(new_crtc_state); 2055 /* update DSPCNTR to configure gamma for pipe bottom color */ 2056 intel_disable_primary_plane(new_crtc_state); 2057 2058 intel_initial_watermarks(state, crtc); 2059 intel_enable_transcoder(new_crtc_state); 2060 2061 intel_crtc_vblank_on(new_crtc_state); 2062 2063 intel_encoders_enable(state, crtc); 2064 } 2065 2066 static void i9xx_crtc_enable(struct intel_atomic_state *state, 2067 struct intel_crtc *crtc) 2068 { 2069 const struct intel_crtc_state *new_crtc_state = 2070 intel_atomic_get_new_crtc_state(state, crtc); 2071 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2072 enum pipe pipe = crtc->pipe; 2073 2074 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2075 return; 2076 2077 i9xx_configure_cpu_transcoder(new_crtc_state); 2078 2079 intel_set_pipe_src_size(new_crtc_state); 2080 2081 crtc->active = true; 2082 2083 if (DISPLAY_VER(dev_priv) != 2) 2084 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2085 2086 intel_encoders_pre_enable(state, crtc); 2087 2088 i9xx_enable_pll(new_crtc_state); 2089 2090 i9xx_pfit_enable(new_crtc_state); 2091 2092 intel_color_load_luts(new_crtc_state); 2093 intel_color_commit_noarm(new_crtc_state); 2094 intel_color_commit_arm(new_crtc_state); 2095 /* update DSPCNTR to configure gamma for pipe bottom color */ 2096 intel_disable_primary_plane(new_crtc_state); 2097 2098 if (!intel_initial_watermarks(state, crtc)) 2099 intel_update_watermarks(dev_priv); 2100 intel_enable_transcoder(new_crtc_state); 2101 2102 intel_crtc_vblank_on(new_crtc_state); 2103 2104 intel_encoders_enable(state, crtc); 2105 2106 /* prevents spurious underruns */ 2107 if (DISPLAY_VER(dev_priv) == 2) 2108 intel_crtc_wait_for_next_vblank(crtc); 2109 } 2110 2111 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 2112 { 2113 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 2114 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2115 2116 if (!old_crtc_state->gmch_pfit.control) 2117 return; 2118 2119 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); 2120 2121 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 2122 intel_de_read(dev_priv, PFIT_CONTROL)); 2123 intel_de_write(dev_priv, PFIT_CONTROL, 0); 2124 } 2125 2126 static void i9xx_crtc_disable(struct intel_atomic_state *state, 2127 struct intel_crtc *crtc) 2128 { 2129 struct intel_crtc_state *old_crtc_state = 2130 intel_atomic_get_old_crtc_state(state, crtc); 2131 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2132 enum pipe pipe = crtc->pipe; 2133 2134 /* 2135 * On gen2 planes are double buffered but the pipe isn't, so we must 2136 * wait for planes to fully turn off before disabling the pipe. 2137 */ 2138 if (DISPLAY_VER(dev_priv) == 2) 2139 intel_crtc_wait_for_next_vblank(crtc); 2140 2141 intel_encoders_disable(state, crtc); 2142 2143 intel_crtc_vblank_off(old_crtc_state); 2144 2145 intel_disable_transcoder(old_crtc_state); 2146 2147 i9xx_pfit_disable(old_crtc_state); 2148 2149 intel_encoders_post_disable(state, crtc); 2150 2151 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2152 if (IS_CHERRYVIEW(dev_priv)) 2153 chv_disable_pll(dev_priv, pipe); 2154 else if (IS_VALLEYVIEW(dev_priv)) 2155 vlv_disable_pll(dev_priv, pipe); 2156 else 2157 i9xx_disable_pll(old_crtc_state); 2158 } 2159 2160 intel_encoders_post_pll_disable(state, crtc); 2161 2162 if (DISPLAY_VER(dev_priv) != 2) 2163 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 2164 2165 if (!dev_priv->display.funcs.wm->initial_watermarks) 2166 intel_update_watermarks(dev_priv); 2167 2168 /* clock the pipe down to 640x480@60 to potentially save power */ 2169 if (IS_I830(dev_priv)) 2170 i830_enable_pipe(dev_priv, pipe); 2171 } 2172 2173 void intel_encoder_destroy(struct drm_encoder *encoder) 2174 { 2175 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2176 2177 drm_encoder_cleanup(encoder); 2178 kfree(intel_encoder); 2179 } 2180 2181 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2182 { 2183 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2184 2185 /* GDG double wide on either pipe, otherwise pipe A only */ 2186 return DISPLAY_VER(dev_priv) < 4 && 2187 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 2188 } 2189 2190 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2191 { 2192 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 2193 struct drm_rect src; 2194 2195 /* 2196 * We only use IF-ID interlacing. If we ever use 2197 * PF-ID we'll need to adjust the pixel_rate here. 2198 */ 2199 2200 if (!crtc_state->pch_pfit.enabled) 2201 return pixel_rate; 2202 2203 drm_rect_init(&src, 0, 0, 2204 drm_rect_width(&crtc_state->pipe_src) << 16, 2205 drm_rect_height(&crtc_state->pipe_src) << 16); 2206 2207 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 2208 pixel_rate); 2209 } 2210 2211 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 2212 const struct drm_display_mode *timings) 2213 { 2214 mode->hdisplay = timings->crtc_hdisplay; 2215 mode->htotal = timings->crtc_htotal; 2216 mode->hsync_start = timings->crtc_hsync_start; 2217 mode->hsync_end = timings->crtc_hsync_end; 2218 2219 mode->vdisplay = timings->crtc_vdisplay; 2220 mode->vtotal = timings->crtc_vtotal; 2221 mode->vsync_start = timings->crtc_vsync_start; 2222 mode->vsync_end = timings->crtc_vsync_end; 2223 2224 mode->flags = timings->flags; 2225 mode->type = DRM_MODE_TYPE_DRIVER; 2226 2227 mode->clock = timings->crtc_clock; 2228 2229 drm_mode_set_name(mode); 2230 } 2231 2232 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2233 { 2234 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2235 2236 if (HAS_GMCH(dev_priv)) 2237 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2238 crtc_state->pixel_rate = 2239 crtc_state->hw.pipe_mode.crtc_clock; 2240 else 2241 crtc_state->pixel_rate = 2242 ilk_pipe_pixel_rate(crtc_state); 2243 } 2244 2245 static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state, 2246 struct drm_display_mode *mode) 2247 { 2248 int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 2249 2250 if (num_pipes < 2) 2251 return; 2252 2253 mode->crtc_clock /= num_pipes; 2254 mode->crtc_hdisplay /= num_pipes; 2255 mode->crtc_hblank_start /= num_pipes; 2256 mode->crtc_hblank_end /= num_pipes; 2257 mode->crtc_hsync_start /= num_pipes; 2258 mode->crtc_hsync_end /= num_pipes; 2259 mode->crtc_htotal /= num_pipes; 2260 } 2261 2262 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, 2263 struct drm_display_mode *mode) 2264 { 2265 int overlap = crtc_state->splitter.pixel_overlap; 2266 int n = crtc_state->splitter.link_count; 2267 2268 if (!crtc_state->splitter.enable) 2269 return; 2270 2271 /* 2272 * eDP MSO uses segment timings from EDID for transcoder 2273 * timings, but full mode for everything else. 2274 * 2275 * h_full = (h_segment - pixel_overlap) * link_count 2276 */ 2277 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; 2278 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; 2279 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; 2280 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; 2281 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; 2282 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; 2283 mode->crtc_clock *= n; 2284 } 2285 2286 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 2287 { 2288 struct drm_display_mode *mode = &crtc_state->hw.mode; 2289 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2290 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2291 2292 /* 2293 * Start with the adjusted_mode crtc timings, which 2294 * have been filled with the transcoder timings. 2295 */ 2296 drm_mode_copy(pipe_mode, adjusted_mode); 2297 2298 /* Expand MSO per-segment transcoder timings to full */ 2299 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2300 2301 /* 2302 * We want the full numbers in adjusted_mode normal timings, 2303 * adjusted_mode crtc timings are left with the raw transcoder 2304 * timings. 2305 */ 2306 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 2307 2308 /* Populate the "user" mode with full numbers */ 2309 drm_mode_copy(mode, pipe_mode); 2310 intel_mode_from_crtc_timings(mode, mode); 2311 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * 2312 (intel_bigjoiner_num_pipes(crtc_state) ?: 1); 2313 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); 2314 2315 /* Derive per-pipe timings in case bigjoiner is used */ 2316 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); 2317 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2318 2319 intel_crtc_compute_pixel_rate(crtc_state); 2320 } 2321 2322 void intel_encoder_get_config(struct intel_encoder *encoder, 2323 struct intel_crtc_state *crtc_state) 2324 { 2325 encoder->get_config(encoder, crtc_state); 2326 2327 intel_crtc_readout_derived_state(crtc_state); 2328 } 2329 2330 static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state) 2331 { 2332 int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 2333 int width, height; 2334 2335 if (num_pipes < 2) 2336 return; 2337 2338 width = drm_rect_width(&crtc_state->pipe_src); 2339 height = drm_rect_height(&crtc_state->pipe_src); 2340 2341 drm_rect_init(&crtc_state->pipe_src, 0, 0, 2342 width / num_pipes, height); 2343 } 2344 2345 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) 2346 { 2347 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2348 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2349 2350 intel_bigjoiner_compute_pipe_src(crtc_state); 2351 2352 /* 2353 * Pipe horizontal size must be even in: 2354 * - DVO ganged mode 2355 * - LVDS dual channel mode 2356 * - Double wide pipe 2357 */ 2358 if (drm_rect_width(&crtc_state->pipe_src) & 1) { 2359 if (crtc_state->double_wide) { 2360 drm_dbg_kms(&i915->drm, 2361 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", 2362 crtc->base.base.id, crtc->base.name); 2363 return -EINVAL; 2364 } 2365 2366 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 2367 intel_is_dual_link_lvds(i915)) { 2368 drm_dbg_kms(&i915->drm, 2369 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", 2370 crtc->base.base.id, crtc->base.name); 2371 return -EINVAL; 2372 } 2373 } 2374 2375 return 0; 2376 } 2377 2378 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) 2379 { 2380 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2381 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2382 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2383 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2384 int clock_limit = i915->max_dotclk_freq; 2385 2386 /* 2387 * Start with the adjusted_mode crtc timings, which 2388 * have been filled with the transcoder timings. 2389 */ 2390 drm_mode_copy(pipe_mode, adjusted_mode); 2391 2392 /* Expand MSO per-segment transcoder timings to full */ 2393 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2394 2395 /* Derive per-pipe timings in case bigjoiner is used */ 2396 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); 2397 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2398 2399 if (DISPLAY_VER(i915) < 4) { 2400 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; 2401 2402 /* 2403 * Enable double wide mode when the dot clock 2404 * is > 90% of the (display) core speed. 2405 */ 2406 if (intel_crtc_supports_double_wide(crtc) && 2407 pipe_mode->crtc_clock > clock_limit) { 2408 clock_limit = i915->max_dotclk_freq; 2409 crtc_state->double_wide = true; 2410 } 2411 } 2412 2413 if (pipe_mode->crtc_clock > clock_limit) { 2414 drm_dbg_kms(&i915->drm, 2415 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 2416 crtc->base.base.id, crtc->base.name, 2417 pipe_mode->crtc_clock, clock_limit, 2418 str_yes_no(crtc_state->double_wide)); 2419 return -EINVAL; 2420 } 2421 2422 return 0; 2423 } 2424 2425 static int intel_crtc_compute_config(struct intel_atomic_state *state, 2426 struct intel_crtc *crtc) 2427 { 2428 struct intel_crtc_state *crtc_state = 2429 intel_atomic_get_new_crtc_state(state, crtc); 2430 int ret; 2431 2432 ret = intel_dpll_crtc_compute_clock(state, crtc); 2433 if (ret) 2434 return ret; 2435 2436 ret = intel_crtc_compute_pipe_src(crtc_state); 2437 if (ret) 2438 return ret; 2439 2440 ret = intel_crtc_compute_pipe_mode(crtc_state); 2441 if (ret) 2442 return ret; 2443 2444 intel_crtc_compute_pixel_rate(crtc_state); 2445 2446 if (crtc_state->has_pch_encoder) 2447 return ilk_fdi_compute_config(crtc, crtc_state); 2448 2449 return 0; 2450 } 2451 2452 static void 2453 intel_reduce_m_n_ratio(u32 *num, u32 *den) 2454 { 2455 while (*num > DATA_LINK_M_N_MASK || 2456 *den > DATA_LINK_M_N_MASK) { 2457 *num >>= 1; 2458 *den >>= 1; 2459 } 2460 } 2461 2462 static void compute_m_n(u32 *ret_m, u32 *ret_n, 2463 u32 m, u32 n, u32 constant_n) 2464 { 2465 if (constant_n) 2466 *ret_n = constant_n; 2467 else 2468 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2469 2470 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2471 intel_reduce_m_n_ratio(ret_m, ret_n); 2472 } 2473 2474 void 2475 intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, 2476 int pixel_clock, int link_clock, 2477 int bw_overhead, 2478 struct intel_link_m_n *m_n) 2479 { 2480 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); 2481 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, 2482 bw_overhead); 2483 u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); 2484 2485 /* 2486 * Windows/BIOS uses fixed M/N values always. Follow suit. 2487 * 2488 * Also several DP dongles in particular seem to be fussy 2489 * about too large link M/N values. Presumably the 20bit 2490 * value used by Windows/BIOS is acceptable to everyone. 2491 */ 2492 m_n->tu = 64; 2493 compute_m_n(&m_n->data_m, &m_n->data_n, 2494 data_m, data_n, 2495 0x8000000); 2496 2497 compute_m_n(&m_n->link_m, &m_n->link_n, 2498 pixel_clock, link_symbol_clock, 2499 0x80000); 2500 } 2501 2502 void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 2503 { 2504 /* 2505 * There may be no VBT; and if the BIOS enabled SSC we can 2506 * just keep using it to avoid unnecessary flicker. Whereas if the 2507 * BIOS isn't using it, don't assume it will work even if the VBT 2508 * indicates as much. 2509 */ 2510 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 2511 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 2512 PCH_DREF_CONTROL) & 2513 DREF_SSC1_ENABLE; 2514 2515 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2516 drm_dbg_kms(&dev_priv->drm, 2517 "SSC %s by BIOS, overriding VBT which says %s\n", 2518 str_enabled_disabled(bios_lvds_use_ssc), 2519 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); 2520 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; 2521 } 2522 } 2523 } 2524 2525 void intel_zero_m_n(struct intel_link_m_n *m_n) 2526 { 2527 /* corresponds to 0 register value */ 2528 memset(m_n, 0, sizeof(*m_n)); 2529 m_n->tu = 1; 2530 } 2531 2532 void intel_set_m_n(struct drm_i915_private *i915, 2533 const struct intel_link_m_n *m_n, 2534 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 2535 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 2536 { 2537 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 2538 intel_de_write(i915, data_n_reg, m_n->data_n); 2539 intel_de_write(i915, link_m_reg, m_n->link_m); 2540 /* 2541 * On BDW+ writing LINK_N arms the double buffered update 2542 * of all the M/N registers, so it must be written last. 2543 */ 2544 intel_de_write(i915, link_n_reg, m_n->link_n); 2545 } 2546 2547 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 2548 enum transcoder transcoder) 2549 { 2550 if (IS_HASWELL(dev_priv)) 2551 return transcoder == TRANSCODER_EDP; 2552 2553 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); 2554 } 2555 2556 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 2557 enum transcoder transcoder, 2558 const struct intel_link_m_n *m_n) 2559 { 2560 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2561 enum pipe pipe = crtc->pipe; 2562 2563 if (DISPLAY_VER(dev_priv) >= 5) 2564 intel_set_m_n(dev_priv, m_n, 2565 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), 2566 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); 2567 else 2568 intel_set_m_n(dev_priv, m_n, 2569 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 2570 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 2571 } 2572 2573 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 2574 enum transcoder transcoder, 2575 const struct intel_link_m_n *m_n) 2576 { 2577 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2578 2579 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 2580 return; 2581 2582 intel_set_m_n(dev_priv, m_n, 2583 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), 2584 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); 2585 } 2586 2587 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 2588 { 2589 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2590 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2591 enum pipe pipe = crtc->pipe; 2592 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2593 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2594 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2595 int vsyncshift = 0; 2596 2597 /* We need to be careful not to changed the adjusted mode, for otherwise 2598 * the hw state checker will get angry at the mismatch. */ 2599 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2600 crtc_vtotal = adjusted_mode->crtc_vtotal; 2601 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2602 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2603 2604 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2605 /* the chip adds 2 halflines automatically */ 2606 crtc_vtotal -= 1; 2607 crtc_vblank_end -= 1; 2608 2609 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2610 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 2611 else 2612 vsyncshift = adjusted_mode->crtc_hsync_start - 2613 adjusted_mode->crtc_htotal / 2; 2614 if (vsyncshift < 0) 2615 vsyncshift += adjusted_mode->crtc_htotal; 2616 } 2617 2618 /* 2619 * VBLANK_START no longer works on ADL+, instead we must use 2620 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. 2621 */ 2622 if (DISPLAY_VER(dev_priv) >= 13) { 2623 intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder), 2624 crtc_vblank_start - crtc_vdisplay); 2625 2626 /* 2627 * VBLANK_START not used by hw, just clear it 2628 * to make it stand out in register dumps. 2629 */ 2630 crtc_vblank_start = 1; 2631 } 2632 2633 if (DISPLAY_VER(dev_priv) >= 4) 2634 intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder), 2635 vsyncshift); 2636 2637 intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), 2638 HACTIVE(adjusted_mode->crtc_hdisplay - 1) | 2639 HTOTAL(adjusted_mode->crtc_htotal - 1)); 2640 intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), 2641 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | 2642 HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); 2643 intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), 2644 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | 2645 HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); 2646 2647 intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), 2648 VACTIVE(crtc_vdisplay - 1) | 2649 VTOTAL(crtc_vtotal - 1)); 2650 intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 2651 VBLANK_START(crtc_vblank_start - 1) | 2652 VBLANK_END(crtc_vblank_end - 1)); 2653 intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), 2654 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | 2655 VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); 2656 2657 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2658 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 2659 * documented on the DDI_FUNC_CTL register description, EDP Input Select 2660 * bits. */ 2661 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 2662 (pipe == PIPE_B || pipe == PIPE_C)) 2663 intel_de_write(dev_priv, TRANS_VTOTAL(pipe), 2664 VACTIVE(crtc_vdisplay - 1) | 2665 VTOTAL(crtc_vtotal - 1)); 2666 } 2667 2668 static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state) 2669 { 2670 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2671 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2672 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2673 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2674 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2675 2676 crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2677 crtc_vtotal = adjusted_mode->crtc_vtotal; 2678 crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2679 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2680 2681 drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE); 2682 2683 /* 2684 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode. 2685 * But let's write it anyway to keep the state checker happy. 2686 */ 2687 intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 2688 VBLANK_START(crtc_vblank_start - 1) | 2689 VBLANK_END(crtc_vblank_end - 1)); 2690 /* 2691 * The double buffer latch point for TRANS_VTOTAL 2692 * is the transcoder's undelayed vblank. 2693 */ 2694 intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), 2695 VACTIVE(crtc_vdisplay - 1) | 2696 VTOTAL(crtc_vtotal - 1)); 2697 } 2698 2699 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 2700 { 2701 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2702 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2703 int width = drm_rect_width(&crtc_state->pipe_src); 2704 int height = drm_rect_height(&crtc_state->pipe_src); 2705 enum pipe pipe = crtc->pipe; 2706 2707 /* pipesrc controls the size that is scaled from, which should 2708 * always be the user's requested size. 2709 */ 2710 intel_de_write(dev_priv, PIPESRC(pipe), 2711 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2712 } 2713 2714 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 2715 { 2716 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2717 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2718 2719 if (DISPLAY_VER(dev_priv) == 2) 2720 return false; 2721 2722 if (DISPLAY_VER(dev_priv) >= 9 || 2723 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2724 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; 2725 else 2726 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; 2727 } 2728 2729 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 2730 struct intel_crtc_state *pipe_config) 2731 { 2732 struct drm_device *dev = crtc->base.dev; 2733 struct drm_i915_private *dev_priv = to_i915(dev); 2734 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2735 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2736 u32 tmp; 2737 2738 tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)); 2739 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; 2740 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; 2741 2742 if (!transcoder_is_dsi(cpu_transcoder)) { 2743 tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)); 2744 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; 2745 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; 2746 } 2747 2748 tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)); 2749 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; 2750 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; 2751 2752 tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); 2753 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; 2754 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; 2755 2756 /* FIXME TGL+ DSI transcoders have this! */ 2757 if (!transcoder_is_dsi(cpu_transcoder)) { 2758 tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); 2759 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; 2760 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; 2761 } 2762 tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); 2763 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; 2764 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; 2765 2766 if (intel_pipe_is_interlaced(pipe_config)) { 2767 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; 2768 adjusted_mode->crtc_vtotal += 1; 2769 adjusted_mode->crtc_vblank_end += 1; 2770 } 2771 2772 if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) 2773 adjusted_mode->crtc_vblank_start = 2774 adjusted_mode->crtc_vdisplay + 2775 intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder)); 2776 } 2777 2778 static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) 2779 { 2780 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2781 int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 2782 enum pipe master_pipe, pipe = crtc->pipe; 2783 int width; 2784 2785 if (num_pipes < 2) 2786 return; 2787 2788 master_pipe = bigjoiner_master_pipe(crtc_state); 2789 width = drm_rect_width(&crtc_state->pipe_src); 2790 2791 drm_rect_translate_to(&crtc_state->pipe_src, 2792 (pipe - master_pipe) * width, 0); 2793 } 2794 2795 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 2796 struct intel_crtc_state *pipe_config) 2797 { 2798 struct drm_device *dev = crtc->base.dev; 2799 struct drm_i915_private *dev_priv = to_i915(dev); 2800 u32 tmp; 2801 2802 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); 2803 2804 drm_rect_init(&pipe_config->pipe_src, 0, 0, 2805 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, 2806 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); 2807 2808 intel_bigjoiner_adjust_pipe_src(pipe_config); 2809 } 2810 2811 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 2812 { 2813 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2814 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2815 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2816 u32 val = 0; 2817 2818 /* 2819 * - We keep both pipes enabled on 830 2820 * - During modeset the pipe is still disabled and must remain so 2821 * - During fastset the pipe is already enabled and must remain so 2822 */ 2823 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) 2824 val |= TRANSCONF_ENABLE; 2825 2826 if (crtc_state->double_wide) 2827 val |= TRANSCONF_DOUBLE_WIDE; 2828 2829 /* only g4x and later have fancy bpc/dither controls */ 2830 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2831 IS_CHERRYVIEW(dev_priv)) { 2832 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 2833 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 2834 val |= TRANSCONF_DITHER_EN | 2835 TRANSCONF_DITHER_TYPE_SP; 2836 2837 switch (crtc_state->pipe_bpp) { 2838 default: 2839 /* Case prevented by intel_choose_pipe_bpp_dither. */ 2840 MISSING_CASE(crtc_state->pipe_bpp); 2841 fallthrough; 2842 case 18: 2843 val |= TRANSCONF_BPC_6; 2844 break; 2845 case 24: 2846 val |= TRANSCONF_BPC_8; 2847 break; 2848 case 30: 2849 val |= TRANSCONF_BPC_10; 2850 break; 2851 } 2852 } 2853 2854 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 2855 if (DISPLAY_VER(dev_priv) < 4 || 2856 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2857 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; 2858 else 2859 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; 2860 } else { 2861 val |= TRANSCONF_INTERLACE_PROGRESSIVE; 2862 } 2863 2864 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2865 crtc_state->limited_color_range) 2866 val |= TRANSCONF_COLOR_RANGE_SELECT; 2867 2868 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 2869 2870 if (crtc_state->wgc_enable) 2871 val |= TRANSCONF_WGC_ENABLE; 2872 2873 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 2874 2875 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 2876 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 2877 } 2878 2879 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 2880 { 2881 if (IS_I830(dev_priv)) 2882 return false; 2883 2884 return DISPLAY_VER(dev_priv) >= 4 || 2885 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 2886 } 2887 2888 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 2889 { 2890 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2891 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2892 enum pipe pipe; 2893 u32 tmp; 2894 2895 if (!i9xx_has_pfit(dev_priv)) 2896 return; 2897 2898 tmp = intel_de_read(dev_priv, PFIT_CONTROL); 2899 if (!(tmp & PFIT_ENABLE)) 2900 return; 2901 2902 /* Check whether the pfit is attached to our pipe. */ 2903 if (DISPLAY_VER(dev_priv) >= 4) 2904 pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp); 2905 else 2906 pipe = PIPE_B; 2907 2908 if (pipe != crtc->pipe) 2909 return; 2910 2911 crtc_state->gmch_pfit.control = tmp; 2912 crtc_state->gmch_pfit.pgm_ratios = 2913 intel_de_read(dev_priv, PFIT_PGM_RATIOS); 2914 } 2915 2916 static enum intel_output_format 2917 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) 2918 { 2919 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2920 u32 tmp; 2921 2922 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 2923 2924 if (tmp & PIPE_MISC_YUV420_ENABLE) { 2925 /* We support 4:2:0 in full blend mode only */ 2926 drm_WARN_ON(&dev_priv->drm, 2927 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); 2928 2929 return INTEL_OUTPUT_FORMAT_YCBCR420; 2930 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { 2931 return INTEL_OUTPUT_FORMAT_YCBCR444; 2932 } else { 2933 return INTEL_OUTPUT_FORMAT_RGB; 2934 } 2935 } 2936 2937 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 2938 struct intel_crtc_state *pipe_config) 2939 { 2940 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2941 enum intel_display_power_domain power_domain; 2942 intel_wakeref_t wakeref; 2943 u32 tmp; 2944 bool ret; 2945 2946 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 2947 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 2948 if (!wakeref) 2949 return false; 2950 2951 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2952 pipe_config->sink_format = pipe_config->output_format; 2953 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 2954 pipe_config->shared_dpll = NULL; 2955 2956 ret = false; 2957 2958 tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 2959 if (!(tmp & TRANSCONF_ENABLE)) 2960 goto out; 2961 2962 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2963 IS_CHERRYVIEW(dev_priv)) { 2964 switch (tmp & TRANSCONF_BPC_MASK) { 2965 case TRANSCONF_BPC_6: 2966 pipe_config->pipe_bpp = 18; 2967 break; 2968 case TRANSCONF_BPC_8: 2969 pipe_config->pipe_bpp = 24; 2970 break; 2971 case TRANSCONF_BPC_10: 2972 pipe_config->pipe_bpp = 30; 2973 break; 2974 default: 2975 MISSING_CASE(tmp); 2976 break; 2977 } 2978 } 2979 2980 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2981 (tmp & TRANSCONF_COLOR_RANGE_SELECT)) 2982 pipe_config->limited_color_range = true; 2983 2984 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); 2985 2986 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 2987 2988 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2989 (tmp & TRANSCONF_WGC_ENABLE)) 2990 pipe_config->wgc_enable = true; 2991 2992 intel_color_get_config(pipe_config); 2993 2994 if (DISPLAY_VER(dev_priv) < 4) 2995 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; 2996 2997 intel_get_transcoder_timings(crtc, pipe_config); 2998 intel_get_pipe_src_size(crtc, pipe_config); 2999 3000 i9xx_get_pfit_config(pipe_config); 3001 3002 if (DISPLAY_VER(dev_priv) >= 4) { 3003 /* No way to read it out on pipes B and C */ 3004 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 3005 tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe]; 3006 else 3007 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); 3008 pipe_config->pixel_multiplier = 3009 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 3010 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 3011 pipe_config->dpll_hw_state.dpll_md = tmp; 3012 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 3013 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 3014 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); 3015 pipe_config->pixel_multiplier = 3016 ((tmp & SDVO_MULTIPLIER_MASK) 3017 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 3018 } else { 3019 /* Note that on i915G/GM the pixel multiplier is in the sdvo 3020 * port and will be fixed up in the encoder->get_config 3021 * function. */ 3022 pipe_config->pixel_multiplier = 1; 3023 } 3024 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, 3025 DPLL(crtc->pipe)); 3026 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 3027 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, 3028 FP0(crtc->pipe)); 3029 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, 3030 FP1(crtc->pipe)); 3031 } else { 3032 /* Mask out read-only status bits. */ 3033 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 3034 DPLL_PORTC_READY_MASK | 3035 DPLL_PORTB_READY_MASK); 3036 } 3037 3038 if (IS_CHERRYVIEW(dev_priv)) 3039 chv_crtc_clock_get(crtc, pipe_config); 3040 else if (IS_VALLEYVIEW(dev_priv)) 3041 vlv_crtc_clock_get(crtc, pipe_config); 3042 else 3043 i9xx_crtc_clock_get(crtc, pipe_config); 3044 3045 /* 3046 * Normally the dotclock is filled in by the encoder .get_config() 3047 * but in case the pipe is enabled w/o any ports we need a sane 3048 * default. 3049 */ 3050 pipe_config->hw.adjusted_mode.crtc_clock = 3051 pipe_config->port_clock / pipe_config->pixel_multiplier; 3052 3053 ret = true; 3054 3055 out: 3056 intel_display_power_put(dev_priv, power_domain, wakeref); 3057 3058 return ret; 3059 } 3060 3061 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3062 { 3063 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3064 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3065 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3066 u32 val = 0; 3067 3068 /* 3069 * - During modeset the pipe is still disabled and must remain so 3070 * - During fastset the pipe is already enabled and must remain so 3071 */ 3072 if (!intel_crtc_needs_modeset(crtc_state)) 3073 val |= TRANSCONF_ENABLE; 3074 3075 switch (crtc_state->pipe_bpp) { 3076 default: 3077 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3078 MISSING_CASE(crtc_state->pipe_bpp); 3079 fallthrough; 3080 case 18: 3081 val |= TRANSCONF_BPC_6; 3082 break; 3083 case 24: 3084 val |= TRANSCONF_BPC_8; 3085 break; 3086 case 30: 3087 val |= TRANSCONF_BPC_10; 3088 break; 3089 case 36: 3090 val |= TRANSCONF_BPC_12; 3091 break; 3092 } 3093 3094 if (crtc_state->dither) 3095 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3096 3097 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3098 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3099 else 3100 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3101 3102 /* 3103 * This would end up with an odd purple hue over 3104 * the entire display. Make sure we don't do it. 3105 */ 3106 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 3107 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3108 3109 if (crtc_state->limited_color_range && 3110 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3111 val |= TRANSCONF_COLOR_RANGE_SELECT; 3112 3113 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3114 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; 3115 3116 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3117 3118 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3119 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3120 3121 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 3122 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 3123 } 3124 3125 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3126 { 3127 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3128 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3129 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3130 u32 val = 0; 3131 3132 /* 3133 * - During modeset the pipe is still disabled and must remain so 3134 * - During fastset the pipe is already enabled and must remain so 3135 */ 3136 if (!intel_crtc_needs_modeset(crtc_state)) 3137 val |= TRANSCONF_ENABLE; 3138 3139 if (IS_HASWELL(dev_priv) && crtc_state->dither) 3140 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3141 3142 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3143 val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3144 else 3145 val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3146 3147 if (IS_HASWELL(dev_priv) && 3148 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3149 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; 3150 3151 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 3152 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 3153 } 3154 3155 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) 3156 { 3157 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3158 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3159 u32 val = 0; 3160 3161 switch (crtc_state->pipe_bpp) { 3162 case 18: 3163 val |= PIPE_MISC_BPC_6; 3164 break; 3165 case 24: 3166 val |= PIPE_MISC_BPC_8; 3167 break; 3168 case 30: 3169 val |= PIPE_MISC_BPC_10; 3170 break; 3171 case 36: 3172 /* Port output 12BPC defined for ADLP+ */ 3173 if (DISPLAY_VER(dev_priv) >= 13) 3174 val |= PIPE_MISC_BPC_12_ADLP; 3175 break; 3176 default: 3177 MISSING_CASE(crtc_state->pipe_bpp); 3178 break; 3179 } 3180 3181 if (crtc_state->dither) 3182 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; 3183 3184 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3185 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3186 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; 3187 3188 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3189 val |= PIPE_MISC_YUV420_ENABLE | 3190 PIPE_MISC_YUV420_MODE_FULL_BLEND; 3191 3192 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) 3193 val |= PIPE_MISC_HDR_MODE_PRECISION; 3194 3195 if (DISPLAY_VER(dev_priv) >= 12) 3196 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; 3197 3198 /* allow PSR with sprite enabled */ 3199 if (IS_BROADWELL(dev_priv)) 3200 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; 3201 3202 intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); 3203 } 3204 3205 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) 3206 { 3207 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3208 u32 tmp; 3209 3210 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3211 3212 switch (tmp & PIPE_MISC_BPC_MASK) { 3213 case PIPE_MISC_BPC_6: 3214 return 18; 3215 case PIPE_MISC_BPC_8: 3216 return 24; 3217 case PIPE_MISC_BPC_10: 3218 return 30; 3219 /* 3220 * PORT OUTPUT 12 BPC defined for ADLP+. 3221 * 3222 * TODO: 3223 * For previous platforms with DSI interface, bits 5:7 3224 * are used for storing pipe_bpp irrespective of dithering. 3225 * Since the value of 12 BPC is not defined for these bits 3226 * on older platforms, need to find a workaround for 12 BPC 3227 * MIPI DSI HW readout. 3228 */ 3229 case PIPE_MISC_BPC_12_ADLP: 3230 if (DISPLAY_VER(dev_priv) >= 13) 3231 return 36; 3232 fallthrough; 3233 default: 3234 MISSING_CASE(tmp); 3235 return 0; 3236 } 3237 } 3238 3239 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3240 { 3241 /* 3242 * Account for spread spectrum to avoid 3243 * oversubscribing the link. Max center spread 3244 * is 2.5%; use 5% for safety's sake. 3245 */ 3246 u32 bps = target_clock * bpp * 21 / 20; 3247 return DIV_ROUND_UP(bps, link_bw * 8); 3248 } 3249 3250 void intel_get_m_n(struct drm_i915_private *i915, 3251 struct intel_link_m_n *m_n, 3252 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 3253 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3254 { 3255 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; 3256 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; 3257 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; 3258 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; 3259 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; 3260 } 3261 3262 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3263 enum transcoder transcoder, 3264 struct intel_link_m_n *m_n) 3265 { 3266 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3267 enum pipe pipe = crtc->pipe; 3268 3269 if (DISPLAY_VER(dev_priv) >= 5) 3270 intel_get_m_n(dev_priv, m_n, 3271 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), 3272 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); 3273 else 3274 intel_get_m_n(dev_priv, m_n, 3275 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 3276 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3277 } 3278 3279 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 3280 enum transcoder transcoder, 3281 struct intel_link_m_n *m_n) 3282 { 3283 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3284 3285 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 3286 return; 3287 3288 intel_get_m_n(dev_priv, m_n, 3289 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), 3290 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); 3291 } 3292 3293 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 3294 { 3295 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3296 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3297 u32 ctl, pos, size; 3298 enum pipe pipe; 3299 3300 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 3301 if ((ctl & PF_ENABLE) == 0) 3302 return; 3303 3304 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 3305 pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl); 3306 else 3307 pipe = crtc->pipe; 3308 3309 crtc_state->pch_pfit.enabled = true; 3310 3311 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 3312 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 3313 3314 drm_rect_init(&crtc_state->pch_pfit.dst, 3315 REG_FIELD_GET(PF_WIN_XPOS_MASK, pos), 3316 REG_FIELD_GET(PF_WIN_YPOS_MASK, pos), 3317 REG_FIELD_GET(PF_WIN_XSIZE_MASK, size), 3318 REG_FIELD_GET(PF_WIN_YSIZE_MASK, size)); 3319 3320 /* 3321 * We currently do not free assignements of panel fitters on 3322 * ivb/hsw (since we don't use the higher upscaling modes which 3323 * differentiates them) so just WARN about this case for now. 3324 */ 3325 drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe); 3326 } 3327 3328 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3329 struct intel_crtc_state *pipe_config) 3330 { 3331 struct drm_device *dev = crtc->base.dev; 3332 struct drm_i915_private *dev_priv = to_i915(dev); 3333 enum intel_display_power_domain power_domain; 3334 intel_wakeref_t wakeref; 3335 u32 tmp; 3336 bool ret; 3337 3338 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3339 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3340 if (!wakeref) 3341 return false; 3342 3343 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3344 pipe_config->shared_dpll = NULL; 3345 3346 ret = false; 3347 tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 3348 if (!(tmp & TRANSCONF_ENABLE)) 3349 goto out; 3350 3351 switch (tmp & TRANSCONF_BPC_MASK) { 3352 case TRANSCONF_BPC_6: 3353 pipe_config->pipe_bpp = 18; 3354 break; 3355 case TRANSCONF_BPC_8: 3356 pipe_config->pipe_bpp = 24; 3357 break; 3358 case TRANSCONF_BPC_10: 3359 pipe_config->pipe_bpp = 30; 3360 break; 3361 case TRANSCONF_BPC_12: 3362 pipe_config->pipe_bpp = 36; 3363 break; 3364 default: 3365 break; 3366 } 3367 3368 if (tmp & TRANSCONF_COLOR_RANGE_SELECT) 3369 pipe_config->limited_color_range = true; 3370 3371 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { 3372 case TRANSCONF_OUTPUT_COLORSPACE_YUV601: 3373 case TRANSCONF_OUTPUT_COLORSPACE_YUV709: 3374 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3375 break; 3376 default: 3377 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3378 break; 3379 } 3380 3381 pipe_config->sink_format = pipe_config->output_format; 3382 3383 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); 3384 3385 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3386 3387 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); 3388 3389 intel_color_get_config(pipe_config); 3390 3391 pipe_config->pixel_multiplier = 1; 3392 3393 ilk_pch_get_config(pipe_config); 3394 3395 intel_get_transcoder_timings(crtc, pipe_config); 3396 intel_get_pipe_src_size(crtc, pipe_config); 3397 3398 ilk_get_pfit_config(pipe_config); 3399 3400 ret = true; 3401 3402 out: 3403 intel_display_power_put(dev_priv, power_domain, wakeref); 3404 3405 return ret; 3406 } 3407 3408 static u8 bigjoiner_pipes(struct drm_i915_private *i915) 3409 { 3410 u8 pipes; 3411 3412 if (DISPLAY_VER(i915) >= 12) 3413 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3414 else if (DISPLAY_VER(i915) >= 11) 3415 pipes = BIT(PIPE_B) | BIT(PIPE_C); 3416 else 3417 pipes = 0; 3418 3419 return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask; 3420 } 3421 3422 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, 3423 enum transcoder cpu_transcoder) 3424 { 3425 enum intel_display_power_domain power_domain; 3426 intel_wakeref_t wakeref; 3427 u32 tmp = 0; 3428 3429 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3430 3431 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3432 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 3433 3434 return tmp & TRANS_DDI_FUNC_ENABLE; 3435 } 3436 3437 static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv, 3438 u8 *master_pipes, u8 *slave_pipes) 3439 { 3440 struct intel_crtc *crtc; 3441 3442 *master_pipes = 0; 3443 *slave_pipes = 0; 3444 3445 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, 3446 bigjoiner_pipes(dev_priv)) { 3447 enum intel_display_power_domain power_domain; 3448 enum pipe pipe = crtc->pipe; 3449 intel_wakeref_t wakeref; 3450 3451 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); 3452 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 3453 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 3454 3455 if (!(tmp & BIG_JOINER_ENABLE)) 3456 continue; 3457 3458 if (tmp & MASTER_BIG_JOINER_ENABLE) 3459 *master_pipes |= BIT(pipe); 3460 else 3461 *slave_pipes |= BIT(pipe); 3462 } 3463 3464 if (DISPLAY_VER(dev_priv) < 13) 3465 continue; 3466 3467 power_domain = POWER_DOMAIN_PIPE(pipe); 3468 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 3469 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 3470 3471 if (tmp & UNCOMPRESSED_JOINER_MASTER) 3472 *master_pipes |= BIT(pipe); 3473 if (tmp & UNCOMPRESSED_JOINER_SLAVE) 3474 *slave_pipes |= BIT(pipe); 3475 } 3476 } 3477 3478 /* Bigjoiner pipes should always be consecutive master and slave */ 3479 drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1, 3480 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", 3481 *master_pipes, *slave_pipes); 3482 } 3483 3484 static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes) 3485 { 3486 if ((slave_pipes & BIT(pipe)) == 0) 3487 return pipe; 3488 3489 /* ignore everything above our pipe */ 3490 master_pipes &= ~GENMASK(7, pipe); 3491 3492 /* highest remaining bit should be our master pipe */ 3493 return fls(master_pipes) - 1; 3494 } 3495 3496 static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes) 3497 { 3498 enum pipe master_pipe, next_master_pipe; 3499 3500 master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes); 3501 3502 if ((master_pipes & BIT(master_pipe)) == 0) 3503 return 0; 3504 3505 /* ignore our master pipe and everything below it */ 3506 master_pipes &= ~GENMASK(master_pipe, 0); 3507 /* make sure a high bit is set for the ffs() */ 3508 master_pipes |= BIT(7); 3509 /* lowest remaining bit should be the next master pipe */ 3510 next_master_pipe = ffs(master_pipes) - 1; 3511 3512 return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe); 3513 } 3514 3515 static u8 hsw_panel_transcoders(struct drm_i915_private *i915) 3516 { 3517 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 3518 3519 if (DISPLAY_VER(i915) >= 11) 3520 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 3521 3522 return panel_transcoder_mask; 3523 } 3524 3525 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3526 { 3527 struct drm_device *dev = crtc->base.dev; 3528 struct drm_i915_private *dev_priv = to_i915(dev); 3529 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); 3530 enum transcoder cpu_transcoder; 3531 u8 master_pipes, slave_pipes; 3532 u8 enabled_transcoders = 0; 3533 3534 /* 3535 * XXX: Do intel_display_power_get_if_enabled before reading this (for 3536 * consistency and less surprising code; it's in always on power). 3537 */ 3538 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, 3539 panel_transcoder_mask) { 3540 enum intel_display_power_domain power_domain; 3541 intel_wakeref_t wakeref; 3542 enum pipe trans_pipe; 3543 u32 tmp = 0; 3544 3545 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3546 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3547 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 3548 3549 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 3550 continue; 3551 3552 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 3553 default: 3554 drm_WARN(dev, 1, 3555 "unknown pipe linked to transcoder %s\n", 3556 transcoder_name(cpu_transcoder)); 3557 fallthrough; 3558 case TRANS_DDI_EDP_INPUT_A_ONOFF: 3559 case TRANS_DDI_EDP_INPUT_A_ON: 3560 trans_pipe = PIPE_A; 3561 break; 3562 case TRANS_DDI_EDP_INPUT_B_ONOFF: 3563 trans_pipe = PIPE_B; 3564 break; 3565 case TRANS_DDI_EDP_INPUT_C_ONOFF: 3566 trans_pipe = PIPE_C; 3567 break; 3568 case TRANS_DDI_EDP_INPUT_D_ONOFF: 3569 trans_pipe = PIPE_D; 3570 break; 3571 } 3572 3573 if (trans_pipe == crtc->pipe) 3574 enabled_transcoders |= BIT(cpu_transcoder); 3575 } 3576 3577 /* single pipe or bigjoiner master */ 3578 cpu_transcoder = (enum transcoder) crtc->pipe; 3579 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3580 enabled_transcoders |= BIT(cpu_transcoder); 3581 3582 /* bigjoiner slave -> consider the master pipe's transcoder as well */ 3583 enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes); 3584 if (slave_pipes & BIT(crtc->pipe)) { 3585 cpu_transcoder = (enum transcoder) 3586 get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes); 3587 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3588 enabled_transcoders |= BIT(cpu_transcoder); 3589 } 3590 3591 return enabled_transcoders; 3592 } 3593 3594 static bool has_edp_transcoders(u8 enabled_transcoders) 3595 { 3596 return enabled_transcoders & BIT(TRANSCODER_EDP); 3597 } 3598 3599 static bool has_dsi_transcoders(u8 enabled_transcoders) 3600 { 3601 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 3602 BIT(TRANSCODER_DSI_1)); 3603 } 3604 3605 static bool has_pipe_transcoders(u8 enabled_transcoders) 3606 { 3607 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 3608 BIT(TRANSCODER_DSI_0) | 3609 BIT(TRANSCODER_DSI_1)); 3610 } 3611 3612 static void assert_enabled_transcoders(struct drm_i915_private *i915, 3613 u8 enabled_transcoders) 3614 { 3615 /* Only one type of transcoder please */ 3616 drm_WARN_ON(&i915->drm, 3617 has_edp_transcoders(enabled_transcoders) + 3618 has_dsi_transcoders(enabled_transcoders) + 3619 has_pipe_transcoders(enabled_transcoders) > 1); 3620 3621 /* Only DSI transcoders can be ganged */ 3622 drm_WARN_ON(&i915->drm, 3623 !has_dsi_transcoders(enabled_transcoders) && 3624 !is_power_of_2(enabled_transcoders)); 3625 } 3626 3627 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 3628 struct intel_crtc_state *pipe_config, 3629 struct intel_display_power_domain_set *power_domain_set) 3630 { 3631 struct drm_device *dev = crtc->base.dev; 3632 struct drm_i915_private *dev_priv = to_i915(dev); 3633 unsigned long enabled_transcoders; 3634 u32 tmp; 3635 3636 enabled_transcoders = hsw_enabled_transcoders(crtc); 3637 if (!enabled_transcoders) 3638 return false; 3639 3640 assert_enabled_transcoders(dev_priv, enabled_transcoders); 3641 3642 /* 3643 * With the exception of DSI we should only ever have 3644 * a single enabled transcoder. With DSI let's just 3645 * pick the first one. 3646 */ 3647 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 3648 3649 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 3650 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 3651 return false; 3652 3653 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { 3654 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 3655 3656 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 3657 pipe_config->pch_pfit.force_thru = true; 3658 } 3659 3660 tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 3661 3662 return tmp & TRANSCONF_ENABLE; 3663 } 3664 3665 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 3666 struct intel_crtc_state *pipe_config, 3667 struct intel_display_power_domain_set *power_domain_set) 3668 { 3669 struct drm_device *dev = crtc->base.dev; 3670 struct drm_i915_private *dev_priv = to_i915(dev); 3671 enum transcoder cpu_transcoder; 3672 enum port port; 3673 u32 tmp; 3674 3675 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 3676 if (port == PORT_A) 3677 cpu_transcoder = TRANSCODER_DSI_A; 3678 else 3679 cpu_transcoder = TRANSCODER_DSI_C; 3680 3681 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 3682 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 3683 continue; 3684 3685 /* 3686 * The PLL needs to be enabled with a valid divider 3687 * configuration, otherwise accessing DSI registers will hang 3688 * the machine. See BSpec North Display Engine 3689 * registers/MIPI[BXT]. We can break out here early, since we 3690 * need the same DSI PLL to be enabled for both DSI ports. 3691 */ 3692 if (!bxt_dsi_pll_is_enabled(dev_priv)) 3693 break; 3694 3695 /* XXX: this works for video mode only */ 3696 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); 3697 if (!(tmp & DPI_ENABLE)) 3698 continue; 3699 3700 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 3701 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 3702 continue; 3703 3704 pipe_config->cpu_transcoder = cpu_transcoder; 3705 break; 3706 } 3707 3708 return transcoder_is_dsi(pipe_config->cpu_transcoder); 3709 } 3710 3711 static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state) 3712 { 3713 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3714 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3715 u8 master_pipes, slave_pipes; 3716 enum pipe pipe = crtc->pipe; 3717 3718 enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes); 3719 3720 if (((master_pipes | slave_pipes) & BIT(pipe)) == 0) 3721 return; 3722 3723 crtc_state->bigjoiner_pipes = 3724 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) | 3725 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes); 3726 } 3727 3728 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 3729 struct intel_crtc_state *pipe_config) 3730 { 3731 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3732 bool active; 3733 u32 tmp; 3734 3735 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 3736 POWER_DOMAIN_PIPE(crtc->pipe))) 3737 return false; 3738 3739 pipe_config->shared_dpll = NULL; 3740 3741 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains); 3742 3743 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 3744 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) { 3745 drm_WARN_ON(&dev_priv->drm, active); 3746 active = true; 3747 } 3748 3749 if (!active) 3750 goto out; 3751 3752 intel_bigjoiner_get_config(pipe_config); 3753 intel_dsc_get_config(pipe_config); 3754 3755 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 3756 DISPLAY_VER(dev_priv) >= 11) 3757 intel_get_transcoder_timings(crtc, pipe_config); 3758 3759 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 3760 intel_vrr_get_config(pipe_config); 3761 3762 intel_get_pipe_src_size(crtc, pipe_config); 3763 3764 if (IS_HASWELL(dev_priv)) { 3765 u32 tmp = intel_de_read(dev_priv, 3766 TRANSCONF(pipe_config->cpu_transcoder)); 3767 3768 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) 3769 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3770 else 3771 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3772 } else { 3773 pipe_config->output_format = 3774 bdw_get_pipe_misc_output_format(crtc); 3775 } 3776 3777 pipe_config->sink_format = pipe_config->output_format; 3778 3779 intel_color_get_config(pipe_config); 3780 3781 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 3782 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 3783 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 3784 pipe_config->ips_linetime = 3785 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 3786 3787 if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 3788 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 3789 if (DISPLAY_VER(dev_priv) >= 9) 3790 skl_scaler_get_config(pipe_config); 3791 else 3792 ilk_get_pfit_config(pipe_config); 3793 } 3794 3795 hsw_ips_get_config(pipe_config); 3796 3797 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 3798 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 3799 pipe_config->pixel_multiplier = 3800 intel_de_read(dev_priv, 3801 TRANS_MULT(pipe_config->cpu_transcoder)) + 1; 3802 } else { 3803 pipe_config->pixel_multiplier = 1; 3804 } 3805 3806 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 3807 tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder)); 3808 3809 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 3810 } else { 3811 /* no idea if this is correct */ 3812 pipe_config->framestart_delay = 1; 3813 } 3814 3815 out: 3816 intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains); 3817 3818 return active; 3819 } 3820 3821 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 3822 { 3823 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3824 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3825 3826 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state)) 3827 return false; 3828 3829 crtc_state->hw.active = true; 3830 3831 intel_crtc_readout_derived_state(crtc_state); 3832 3833 return true; 3834 } 3835 3836 int intel_dotclock_calculate(int link_freq, 3837 const struct intel_link_m_n *m_n) 3838 { 3839 /* 3840 * The calculation for the data clock -> pixel clock is: 3841 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 3842 * But we want to avoid losing precison if possible, so: 3843 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 3844 * 3845 * and for link freq (10kbs units) -> pixel clock it is: 3846 * link_symbol_clock = link_freq * 10 / link_symbol_size 3847 * pixel_clock = (m * link_symbol_clock) / n 3848 * or for more precision: 3849 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size) 3850 */ 3851 3852 if (!m_n->link_n) 3853 return 0; 3854 3855 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10), 3856 m_n->link_n * intel_dp_link_symbol_size(link_freq)); 3857 } 3858 3859 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) 3860 { 3861 int dotclock; 3862 3863 if (intel_crtc_has_dp_encoder(pipe_config)) 3864 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 3865 &pipe_config->dp_m_n); 3866 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) 3867 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, 3868 pipe_config->pipe_bpp); 3869 else 3870 dotclock = pipe_config->port_clock; 3871 3872 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 3873 !intel_crtc_has_dp_encoder(pipe_config)) 3874 dotclock *= 2; 3875 3876 if (pipe_config->pixel_multiplier) 3877 dotclock /= pipe_config->pixel_multiplier; 3878 3879 return dotclock; 3880 } 3881 3882 /* Returns the currently programmed mode of the given encoder. */ 3883 struct drm_display_mode * 3884 intel_encoder_current_mode(struct intel_encoder *encoder) 3885 { 3886 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3887 struct intel_crtc_state *crtc_state; 3888 struct drm_display_mode *mode; 3889 struct intel_crtc *crtc; 3890 enum pipe pipe; 3891 3892 if (!encoder->get_hw_state(encoder, &pipe)) 3893 return NULL; 3894 3895 crtc = intel_crtc_for_pipe(dev_priv, pipe); 3896 3897 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 3898 if (!mode) 3899 return NULL; 3900 3901 crtc_state = intel_crtc_state_alloc(crtc); 3902 if (!crtc_state) { 3903 kfree(mode); 3904 return NULL; 3905 } 3906 3907 if (!intel_crtc_get_pipe_config(crtc_state)) { 3908 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 3909 kfree(mode); 3910 return NULL; 3911 } 3912 3913 intel_encoder_get_config(encoder, crtc_state); 3914 3915 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 3916 3917 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 3918 3919 return mode; 3920 } 3921 3922 static bool encoders_cloneable(const struct intel_encoder *a, 3923 const struct intel_encoder *b) 3924 { 3925 /* masks could be asymmetric, so check both ways */ 3926 return a == b || (a->cloneable & BIT(b->type) && 3927 b->cloneable & BIT(a->type)); 3928 } 3929 3930 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 3931 struct intel_crtc *crtc, 3932 struct intel_encoder *encoder) 3933 { 3934 struct intel_encoder *source_encoder; 3935 struct drm_connector *connector; 3936 struct drm_connector_state *connector_state; 3937 int i; 3938 3939 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 3940 if (connector_state->crtc != &crtc->base) 3941 continue; 3942 3943 source_encoder = 3944 to_intel_encoder(connector_state->best_encoder); 3945 if (!encoders_cloneable(encoder, source_encoder)) 3946 return false; 3947 } 3948 3949 return true; 3950 } 3951 3952 static int icl_add_linked_planes(struct intel_atomic_state *state) 3953 { 3954 struct intel_plane *plane, *linked; 3955 struct intel_plane_state *plane_state, *linked_plane_state; 3956 int i; 3957 3958 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 3959 linked = plane_state->planar_linked_plane; 3960 3961 if (!linked) 3962 continue; 3963 3964 linked_plane_state = intel_atomic_get_plane_state(state, linked); 3965 if (IS_ERR(linked_plane_state)) 3966 return PTR_ERR(linked_plane_state); 3967 3968 drm_WARN_ON(state->base.dev, 3969 linked_plane_state->planar_linked_plane != plane); 3970 drm_WARN_ON(state->base.dev, 3971 linked_plane_state->planar_slave == plane_state->planar_slave); 3972 } 3973 3974 return 0; 3975 } 3976 3977 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 3978 { 3979 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3980 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3981 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 3982 struct intel_plane *plane, *linked; 3983 struct intel_plane_state *plane_state; 3984 int i; 3985 3986 if (DISPLAY_VER(dev_priv) < 11) 3987 return 0; 3988 3989 /* 3990 * Destroy all old plane links and make the slave plane invisible 3991 * in the crtc_state->active_planes mask. 3992 */ 3993 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 3994 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 3995 continue; 3996 3997 plane_state->planar_linked_plane = NULL; 3998 if (plane_state->planar_slave && !plane_state->uapi.visible) { 3999 crtc_state->enabled_planes &= ~BIT(plane->id); 4000 crtc_state->active_planes &= ~BIT(plane->id); 4001 crtc_state->update_planes |= BIT(plane->id); 4002 crtc_state->data_rate[plane->id] = 0; 4003 crtc_state->rel_data_rate[plane->id] = 0; 4004 } 4005 4006 plane_state->planar_slave = false; 4007 } 4008 4009 if (!crtc_state->nv12_planes) 4010 return 0; 4011 4012 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4013 struct intel_plane_state *linked_state = NULL; 4014 4015 if (plane->pipe != crtc->pipe || 4016 !(crtc_state->nv12_planes & BIT(plane->id))) 4017 continue; 4018 4019 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 4020 if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 4021 continue; 4022 4023 if (crtc_state->active_planes & BIT(linked->id)) 4024 continue; 4025 4026 linked_state = intel_atomic_get_plane_state(state, linked); 4027 if (IS_ERR(linked_state)) 4028 return PTR_ERR(linked_state); 4029 4030 break; 4031 } 4032 4033 if (!linked_state) { 4034 drm_dbg_kms(&dev_priv->drm, 4035 "Need %d free Y planes for planar YUV\n", 4036 hweight8(crtc_state->nv12_planes)); 4037 4038 return -EINVAL; 4039 } 4040 4041 plane_state->planar_linked_plane = linked; 4042 4043 linked_state->planar_slave = true; 4044 linked_state->planar_linked_plane = plane; 4045 crtc_state->enabled_planes |= BIT(linked->id); 4046 crtc_state->active_planes |= BIT(linked->id); 4047 crtc_state->update_planes |= BIT(linked->id); 4048 crtc_state->data_rate[linked->id] = 4049 crtc_state->data_rate_y[plane->id]; 4050 crtc_state->rel_data_rate[linked->id] = 4051 crtc_state->rel_data_rate_y[plane->id]; 4052 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 4053 linked->base.name, plane->base.name); 4054 4055 /* Copy parameters to slave plane */ 4056 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 4057 linked_state->color_ctl = plane_state->color_ctl; 4058 linked_state->view = plane_state->view; 4059 linked_state->decrypt = plane_state->decrypt; 4060 4061 intel_plane_copy_hw_state(linked_state, plane_state); 4062 linked_state->uapi.src = plane_state->uapi.src; 4063 linked_state->uapi.dst = plane_state->uapi.dst; 4064 4065 if (icl_is_hdr_plane(dev_priv, plane->id)) { 4066 if (linked->id == PLANE_SPRITE5) 4067 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; 4068 else if (linked->id == PLANE_SPRITE4) 4069 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; 4070 else if (linked->id == PLANE_SPRITE3) 4071 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; 4072 else if (linked->id == PLANE_SPRITE2) 4073 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; 4074 else 4075 MISSING_CASE(linked->id); 4076 } 4077 } 4078 4079 return 0; 4080 } 4081 4082 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 4083 { 4084 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 4085 struct intel_atomic_state *state = 4086 to_intel_atomic_state(new_crtc_state->uapi.state); 4087 const struct intel_crtc_state *old_crtc_state = 4088 intel_atomic_get_old_crtc_state(state, crtc); 4089 4090 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 4091 } 4092 4093 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4094 { 4095 const struct drm_display_mode *pipe_mode = 4096 &crtc_state->hw.pipe_mode; 4097 int linetime_wm; 4098 4099 if (!crtc_state->hw.enable) 4100 return 0; 4101 4102 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4103 pipe_mode->crtc_clock); 4104 4105 return min(linetime_wm, 0x1ff); 4106 } 4107 4108 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4109 const struct intel_cdclk_state *cdclk_state) 4110 { 4111 const struct drm_display_mode *pipe_mode = 4112 &crtc_state->hw.pipe_mode; 4113 int linetime_wm; 4114 4115 if (!crtc_state->hw.enable) 4116 return 0; 4117 4118 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4119 cdclk_state->logical.cdclk); 4120 4121 return min(linetime_wm, 0x1ff); 4122 } 4123 4124 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4125 { 4126 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4127 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4128 const struct drm_display_mode *pipe_mode = 4129 &crtc_state->hw.pipe_mode; 4130 int linetime_wm; 4131 4132 if (!crtc_state->hw.enable) 4133 return 0; 4134 4135 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4136 crtc_state->pixel_rate); 4137 4138 /* Display WA #1135: BXT:ALL GLK:ALL */ 4139 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4140 skl_watermark_ipc_enabled(dev_priv)) 4141 linetime_wm /= 2; 4142 4143 return min(linetime_wm, 0x1ff); 4144 } 4145 4146 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4147 struct intel_crtc *crtc) 4148 { 4149 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4150 struct intel_crtc_state *crtc_state = 4151 intel_atomic_get_new_crtc_state(state, crtc); 4152 const struct intel_cdclk_state *cdclk_state; 4153 4154 if (DISPLAY_VER(dev_priv) >= 9) 4155 crtc_state->linetime = skl_linetime_wm(crtc_state); 4156 else 4157 crtc_state->linetime = hsw_linetime_wm(crtc_state); 4158 4159 if (!hsw_crtc_supports_ips(crtc)) 4160 return 0; 4161 4162 cdclk_state = intel_atomic_get_cdclk_state(state); 4163 if (IS_ERR(cdclk_state)) 4164 return PTR_ERR(cdclk_state); 4165 4166 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4167 cdclk_state); 4168 4169 return 0; 4170 } 4171 4172 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4173 struct intel_crtc *crtc) 4174 { 4175 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4176 struct intel_crtc_state *crtc_state = 4177 intel_atomic_get_new_crtc_state(state, crtc); 4178 int ret; 4179 4180 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) && 4181 intel_crtc_needs_modeset(crtc_state) && 4182 !crtc_state->hw.active) 4183 crtc_state->update_wm_post = true; 4184 4185 if (intel_crtc_needs_modeset(crtc_state)) { 4186 ret = intel_dpll_crtc_get_shared_dpll(state, crtc); 4187 if (ret) 4188 return ret; 4189 } 4190 4191 /* 4192 * May need to update pipe gamma enable bits 4193 * when C8 planes are getting enabled/disabled. 4194 */ 4195 if (c8_planes_changed(crtc_state)) 4196 crtc_state->uapi.color_mgmt_changed = true; 4197 4198 if (intel_crtc_needs_color_update(crtc_state)) { 4199 ret = intel_color_check(crtc_state); 4200 if (ret) 4201 return ret; 4202 } 4203 4204 ret = intel_compute_pipe_wm(state, crtc); 4205 if (ret) { 4206 drm_dbg_kms(&dev_priv->drm, 4207 "Target pipe watermarks are invalid\n"); 4208 return ret; 4209 } 4210 4211 /* 4212 * Calculate 'intermediate' watermarks that satisfy both the 4213 * old state and the new state. We can program these 4214 * immediately. 4215 */ 4216 ret = intel_compute_intermediate_wm(state, crtc); 4217 if (ret) { 4218 drm_dbg_kms(&dev_priv->drm, 4219 "No valid intermediate pipe watermarks are possible\n"); 4220 return ret; 4221 } 4222 4223 if (DISPLAY_VER(dev_priv) >= 9) { 4224 if (intel_crtc_needs_modeset(crtc_state) || 4225 intel_crtc_needs_fastset(crtc_state)) { 4226 ret = skl_update_scaler_crtc(crtc_state); 4227 if (ret) 4228 return ret; 4229 } 4230 4231 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); 4232 if (ret) 4233 return ret; 4234 } 4235 4236 if (HAS_IPS(dev_priv)) { 4237 ret = hsw_ips_compute_config(state, crtc); 4238 if (ret) 4239 return ret; 4240 } 4241 4242 if (DISPLAY_VER(dev_priv) >= 9 || 4243 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 4244 ret = hsw_compute_linetime_wm(state, crtc); 4245 if (ret) 4246 return ret; 4247 4248 } 4249 4250 ret = intel_psr2_sel_fetch_update(state, crtc); 4251 if (ret) 4252 return ret; 4253 4254 return 0; 4255 } 4256 4257 static int 4258 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 4259 struct intel_crtc_state *crtc_state) 4260 { 4261 struct drm_connector *connector = conn_state->connector; 4262 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 4263 const struct drm_display_info *info = &connector->display_info; 4264 int bpp; 4265 4266 switch (conn_state->max_bpc) { 4267 case 6 ... 7: 4268 bpp = 6 * 3; 4269 break; 4270 case 8 ... 9: 4271 bpp = 8 * 3; 4272 break; 4273 case 10 ... 11: 4274 bpp = 10 * 3; 4275 break; 4276 case 12 ... 16: 4277 bpp = 12 * 3; 4278 break; 4279 default: 4280 MISSING_CASE(conn_state->max_bpc); 4281 return -EINVAL; 4282 } 4283 4284 if (bpp < crtc_state->pipe_bpp) { 4285 drm_dbg_kms(&i915->drm, 4286 "[CONNECTOR:%d:%s] Limiting display bpp to %d " 4287 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", 4288 connector->base.id, connector->name, 4289 bpp, 3 * info->bpc, 4290 3 * conn_state->max_requested_bpc, 4291 crtc_state->pipe_bpp); 4292 4293 crtc_state->pipe_bpp = bpp; 4294 } 4295 4296 return 0; 4297 } 4298 4299 static int 4300 compute_baseline_pipe_bpp(struct intel_atomic_state *state, 4301 struct intel_crtc *crtc) 4302 { 4303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4304 struct intel_crtc_state *crtc_state = 4305 intel_atomic_get_new_crtc_state(state, crtc); 4306 struct drm_connector *connector; 4307 struct drm_connector_state *connector_state; 4308 int bpp, i; 4309 4310 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 4311 IS_CHERRYVIEW(dev_priv))) 4312 bpp = 10*3; 4313 else if (DISPLAY_VER(dev_priv) >= 5) 4314 bpp = 12*3; 4315 else 4316 bpp = 8*3; 4317 4318 crtc_state->pipe_bpp = bpp; 4319 4320 /* Clamp display bpp to connector max bpp */ 4321 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4322 int ret; 4323 4324 if (connector_state->crtc != &crtc->base) 4325 continue; 4326 4327 ret = compute_sink_pipe_bpp(connector_state, crtc_state); 4328 if (ret) 4329 return ret; 4330 } 4331 4332 return 0; 4333 } 4334 4335 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 4336 { 4337 struct drm_device *dev = state->base.dev; 4338 struct drm_connector *connector; 4339 struct drm_connector_list_iter conn_iter; 4340 unsigned int used_ports = 0; 4341 unsigned int used_mst_ports = 0; 4342 bool ret = true; 4343 4344 /* 4345 * We're going to peek into connector->state, 4346 * hence connection_mutex must be held. 4347 */ 4348 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 4349 4350 /* 4351 * Walk the connector list instead of the encoder 4352 * list to detect the problem on ddi platforms 4353 * where there's just one encoder per digital port. 4354 */ 4355 drm_connector_list_iter_begin(dev, &conn_iter); 4356 drm_for_each_connector_iter(connector, &conn_iter) { 4357 struct drm_connector_state *connector_state; 4358 struct intel_encoder *encoder; 4359 4360 connector_state = 4361 drm_atomic_get_new_connector_state(&state->base, 4362 connector); 4363 if (!connector_state) 4364 connector_state = connector->state; 4365 4366 if (!connector_state->best_encoder) 4367 continue; 4368 4369 encoder = to_intel_encoder(connector_state->best_encoder); 4370 4371 drm_WARN_ON(dev, !connector_state->crtc); 4372 4373 switch (encoder->type) { 4374 case INTEL_OUTPUT_DDI: 4375 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 4376 break; 4377 fallthrough; 4378 case INTEL_OUTPUT_DP: 4379 case INTEL_OUTPUT_HDMI: 4380 case INTEL_OUTPUT_EDP: 4381 /* the same port mustn't appear more than once */ 4382 if (used_ports & BIT(encoder->port)) 4383 ret = false; 4384 4385 used_ports |= BIT(encoder->port); 4386 break; 4387 case INTEL_OUTPUT_DP_MST: 4388 used_mst_ports |= 4389 1 << encoder->port; 4390 break; 4391 default: 4392 break; 4393 } 4394 } 4395 drm_connector_list_iter_end(&conn_iter); 4396 4397 /* can't mix MST and SST/HDMI on the same port */ 4398 if (used_ports & used_mst_ports) 4399 return false; 4400 4401 return ret; 4402 } 4403 4404 static void 4405 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 4406 struct intel_crtc *crtc) 4407 { 4408 struct intel_crtc_state *crtc_state = 4409 intel_atomic_get_new_crtc_state(state, crtc); 4410 4411 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 4412 4413 drm_property_replace_blob(&crtc_state->hw.degamma_lut, 4414 crtc_state->uapi.degamma_lut); 4415 drm_property_replace_blob(&crtc_state->hw.gamma_lut, 4416 crtc_state->uapi.gamma_lut); 4417 drm_property_replace_blob(&crtc_state->hw.ctm, 4418 crtc_state->uapi.ctm); 4419 } 4420 4421 static void 4422 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 4423 struct intel_crtc *crtc) 4424 { 4425 struct intel_crtc_state *crtc_state = 4426 intel_atomic_get_new_crtc_state(state, crtc); 4427 4428 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 4429 4430 crtc_state->hw.enable = crtc_state->uapi.enable; 4431 crtc_state->hw.active = crtc_state->uapi.active; 4432 drm_mode_copy(&crtc_state->hw.mode, 4433 &crtc_state->uapi.mode); 4434 drm_mode_copy(&crtc_state->hw.adjusted_mode, 4435 &crtc_state->uapi.adjusted_mode); 4436 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 4437 4438 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 4439 } 4440 4441 static void 4442 copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state, 4443 struct intel_crtc *slave_crtc) 4444 { 4445 struct intel_crtc_state *slave_crtc_state = 4446 intel_atomic_get_new_crtc_state(state, slave_crtc); 4447 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); 4448 const struct intel_crtc_state *master_crtc_state = 4449 intel_atomic_get_new_crtc_state(state, master_crtc); 4450 4451 drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut, 4452 master_crtc_state->hw.degamma_lut); 4453 drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut, 4454 master_crtc_state->hw.gamma_lut); 4455 drm_property_replace_blob(&slave_crtc_state->hw.ctm, 4456 master_crtc_state->hw.ctm); 4457 4458 slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed; 4459 } 4460 4461 static int 4462 copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, 4463 struct intel_crtc *slave_crtc) 4464 { 4465 struct intel_crtc_state *slave_crtc_state = 4466 intel_atomic_get_new_crtc_state(state, slave_crtc); 4467 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); 4468 const struct intel_crtc_state *master_crtc_state = 4469 intel_atomic_get_new_crtc_state(state, master_crtc); 4470 struct intel_crtc_state *saved_state; 4471 4472 WARN_ON(master_crtc_state->bigjoiner_pipes != 4473 slave_crtc_state->bigjoiner_pipes); 4474 4475 saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL); 4476 if (!saved_state) 4477 return -ENOMEM; 4478 4479 /* preserve some things from the slave's original crtc state */ 4480 saved_state->uapi = slave_crtc_state->uapi; 4481 saved_state->scaler_state = slave_crtc_state->scaler_state; 4482 saved_state->shared_dpll = slave_crtc_state->shared_dpll; 4483 saved_state->crc_enabled = slave_crtc_state->crc_enabled; 4484 4485 intel_crtc_free_hw_state(slave_crtc_state); 4486 if (slave_crtc_state->dp_tunnel_ref.tunnel) 4487 drm_dp_tunnel_ref_put(&slave_crtc_state->dp_tunnel_ref); 4488 memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state)); 4489 kfree(saved_state); 4490 4491 /* Re-init hw state */ 4492 memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw)); 4493 slave_crtc_state->hw.enable = master_crtc_state->hw.enable; 4494 slave_crtc_state->hw.active = master_crtc_state->hw.active; 4495 drm_mode_copy(&slave_crtc_state->hw.mode, 4496 &master_crtc_state->hw.mode); 4497 drm_mode_copy(&slave_crtc_state->hw.pipe_mode, 4498 &master_crtc_state->hw.pipe_mode); 4499 drm_mode_copy(&slave_crtc_state->hw.adjusted_mode, 4500 &master_crtc_state->hw.adjusted_mode); 4501 slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter; 4502 4503 if (master_crtc_state->dp_tunnel_ref.tunnel) 4504 drm_dp_tunnel_ref_get(master_crtc_state->dp_tunnel_ref.tunnel, 4505 &slave_crtc_state->dp_tunnel_ref); 4506 4507 copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc); 4508 4509 slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed; 4510 slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed; 4511 slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed; 4512 4513 WARN_ON(master_crtc_state->bigjoiner_pipes != 4514 slave_crtc_state->bigjoiner_pipes); 4515 4516 return 0; 4517 } 4518 4519 static int 4520 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 4521 struct intel_crtc *crtc) 4522 { 4523 struct intel_crtc_state *crtc_state = 4524 intel_atomic_get_new_crtc_state(state, crtc); 4525 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4526 struct intel_crtc_state *saved_state; 4527 4528 saved_state = intel_crtc_state_alloc(crtc); 4529 if (!saved_state) 4530 return -ENOMEM; 4531 4532 /* free the old crtc_state->hw members */ 4533 intel_crtc_free_hw_state(crtc_state); 4534 4535 intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state); 4536 4537 /* FIXME: before the switch to atomic started, a new pipe_config was 4538 * kzalloc'd. Code that depends on any field being zero should be 4539 * fixed, so that the crtc_state can be safely duplicated. For now, 4540 * only fields that are know to not cause problems are preserved. */ 4541 4542 saved_state->uapi = crtc_state->uapi; 4543 saved_state->inherited = crtc_state->inherited; 4544 saved_state->scaler_state = crtc_state->scaler_state; 4545 saved_state->shared_dpll = crtc_state->shared_dpll; 4546 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 4547 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 4548 sizeof(saved_state->icl_port_dplls)); 4549 saved_state->crc_enabled = crtc_state->crc_enabled; 4550 if (IS_G4X(dev_priv) || 4551 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4552 saved_state->wm = crtc_state->wm; 4553 4554 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 4555 kfree(saved_state); 4556 4557 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 4558 4559 return 0; 4560 } 4561 4562 static int 4563 intel_modeset_pipe_config(struct intel_atomic_state *state, 4564 struct intel_crtc *crtc, 4565 const struct intel_link_bw_limits *limits) 4566 { 4567 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4568 struct intel_crtc_state *crtc_state = 4569 intel_atomic_get_new_crtc_state(state, crtc); 4570 struct drm_connector *connector; 4571 struct drm_connector_state *connector_state; 4572 int pipe_src_w, pipe_src_h; 4573 int base_bpp, ret, i; 4574 4575 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; 4576 4577 crtc_state->framestart_delay = 1; 4578 4579 /* 4580 * Sanitize sync polarity flags based on requested ones. If neither 4581 * positive or negative polarity is requested, treat this as meaning 4582 * negative polarity. 4583 */ 4584 if (!(crtc_state->hw.adjusted_mode.flags & 4585 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 4586 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 4587 4588 if (!(crtc_state->hw.adjusted_mode.flags & 4589 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 4590 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 4591 4592 ret = compute_baseline_pipe_bpp(state, crtc); 4593 if (ret) 4594 return ret; 4595 4596 crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe); 4597 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; 4598 4599 if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) { 4600 drm_dbg_kms(&i915->drm, 4601 "[CRTC:%d:%s] Link bpp limited to " BPP_X16_FMT "\n", 4602 crtc->base.base.id, crtc->base.name, 4603 BPP_X16_ARGS(crtc_state->max_link_bpp_x16)); 4604 crtc_state->bw_constrained = true; 4605 } 4606 4607 base_bpp = crtc_state->pipe_bpp; 4608 4609 /* 4610 * Determine the real pipe dimensions. Note that stereo modes can 4611 * increase the actual pipe size due to the frame doubling and 4612 * insertion of additional space for blanks between the frame. This 4613 * is stored in the crtc timings. We use the requested mode to do this 4614 * computation to clearly distinguish it from the adjusted mode, which 4615 * can be changed by the connectors in the below retry loop. 4616 */ 4617 drm_mode_get_hv_timing(&crtc_state->hw.mode, 4618 &pipe_src_w, &pipe_src_h); 4619 drm_rect_init(&crtc_state->pipe_src, 0, 0, 4620 pipe_src_w, pipe_src_h); 4621 4622 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4623 struct intel_encoder *encoder = 4624 to_intel_encoder(connector_state->best_encoder); 4625 4626 if (connector_state->crtc != &crtc->base) 4627 continue; 4628 4629 if (!check_single_encoder_cloning(state, crtc, encoder)) { 4630 drm_dbg_kms(&i915->drm, 4631 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", 4632 encoder->base.base.id, encoder->base.name); 4633 return -EINVAL; 4634 } 4635 4636 /* 4637 * Determine output_types before calling the .compute_config() 4638 * hooks so that the hooks can use this information safely. 4639 */ 4640 if (encoder->compute_output_type) 4641 crtc_state->output_types |= 4642 BIT(encoder->compute_output_type(encoder, crtc_state, 4643 connector_state)); 4644 else 4645 crtc_state->output_types |= BIT(encoder->type); 4646 } 4647 4648 /* Ensure the port clock defaults are reset when retrying. */ 4649 crtc_state->port_clock = 0; 4650 crtc_state->pixel_multiplier = 1; 4651 4652 /* Fill in default crtc timings, allow encoders to overwrite them. */ 4653 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, 4654 CRTC_STEREO_DOUBLE); 4655 4656 /* Pass our mode to the connectors and the CRTC to give them a chance to 4657 * adjust it according to limitations or connector properties, and also 4658 * a chance to reject the mode entirely. 4659 */ 4660 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4661 struct intel_encoder *encoder = 4662 to_intel_encoder(connector_state->best_encoder); 4663 4664 if (connector_state->crtc != &crtc->base) 4665 continue; 4666 4667 ret = encoder->compute_config(encoder, crtc_state, 4668 connector_state); 4669 if (ret == -EDEADLK) 4670 return ret; 4671 if (ret < 0) { 4672 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n", 4673 encoder->base.base.id, encoder->base.name, ret); 4674 return ret; 4675 } 4676 } 4677 4678 /* Set default port clock if not overwritten by the encoder. Needs to be 4679 * done afterwards in case the encoder adjusts the mode. */ 4680 if (!crtc_state->port_clock) 4681 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock 4682 * crtc_state->pixel_multiplier; 4683 4684 ret = intel_crtc_compute_config(state, crtc); 4685 if (ret == -EDEADLK) 4686 return ret; 4687 if (ret < 0) { 4688 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n", 4689 crtc->base.base.id, crtc->base.name, ret); 4690 return ret; 4691 } 4692 4693 /* Dithering seems to not pass-through bits correctly when it should, so 4694 * only enable it on 6bpc panels and when its not a compliance 4695 * test requesting 6bpc video pattern. 4696 */ 4697 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && 4698 !crtc_state->dither_force_disable; 4699 drm_dbg_kms(&i915->drm, 4700 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 4701 crtc->base.base.id, crtc->base.name, 4702 base_bpp, crtc_state->pipe_bpp, crtc_state->dither); 4703 4704 return 0; 4705 } 4706 4707 static int 4708 intel_modeset_pipe_config_late(struct intel_atomic_state *state, 4709 struct intel_crtc *crtc) 4710 { 4711 struct intel_crtc_state *crtc_state = 4712 intel_atomic_get_new_crtc_state(state, crtc); 4713 struct drm_connector_state *conn_state; 4714 struct drm_connector *connector; 4715 int i; 4716 4717 intel_bigjoiner_adjust_pipe_src(crtc_state); 4718 4719 for_each_new_connector_in_state(&state->base, connector, 4720 conn_state, i) { 4721 struct intel_encoder *encoder = 4722 to_intel_encoder(conn_state->best_encoder); 4723 int ret; 4724 4725 if (conn_state->crtc != &crtc->base || 4726 !encoder->compute_config_late) 4727 continue; 4728 4729 ret = encoder->compute_config_late(encoder, crtc_state, 4730 conn_state); 4731 if (ret) 4732 return ret; 4733 } 4734 4735 return 0; 4736 } 4737 4738 bool intel_fuzzy_clock_check(int clock1, int clock2) 4739 { 4740 int diff; 4741 4742 if (clock1 == clock2) 4743 return true; 4744 4745 if (!clock1 || !clock2) 4746 return false; 4747 4748 diff = abs(clock1 - clock2); 4749 4750 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 4751 return true; 4752 4753 return false; 4754 } 4755 4756 static bool 4757 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 4758 const struct intel_link_m_n *m2_n2) 4759 { 4760 return m_n->tu == m2_n2->tu && 4761 m_n->data_m == m2_n2->data_m && 4762 m_n->data_n == m2_n2->data_n && 4763 m_n->link_m == m2_n2->link_m && 4764 m_n->link_n == m2_n2->link_n; 4765 } 4766 4767 static bool 4768 intel_compare_infoframe(const union hdmi_infoframe *a, 4769 const union hdmi_infoframe *b) 4770 { 4771 return memcmp(a, b, sizeof(*a)) == 0; 4772 } 4773 4774 static bool 4775 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 4776 const struct drm_dp_vsc_sdp *b) 4777 { 4778 return a->pixelformat == b->pixelformat && 4779 a->colorimetry == b->colorimetry && 4780 a->bpc == b->bpc && 4781 a->dynamic_range == b->dynamic_range && 4782 a->content_type == b->content_type; 4783 } 4784 4785 static bool 4786 intel_compare_buffer(const u8 *a, const u8 *b, size_t len) 4787 { 4788 return memcmp(a, b, len) == 0; 4789 } 4790 4791 static void 4792 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 4793 bool fastset, const char *name, 4794 const union hdmi_infoframe *a, 4795 const union hdmi_infoframe *b) 4796 { 4797 if (fastset) { 4798 if (!drm_debug_enabled(DRM_UT_KMS)) 4799 return; 4800 4801 drm_dbg_kms(&dev_priv->drm, 4802 "fastset requirement not met in %s infoframe\n", name); 4803 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 4804 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 4805 drm_dbg_kms(&dev_priv->drm, "found:\n"); 4806 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 4807 } else { 4808 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); 4809 drm_err(&dev_priv->drm, "expected:\n"); 4810 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 4811 drm_err(&dev_priv->drm, "found:\n"); 4812 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 4813 } 4814 } 4815 4816 static void 4817 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915, 4818 bool fastset, const char *name, 4819 const struct drm_dp_vsc_sdp *a, 4820 const struct drm_dp_vsc_sdp *b) 4821 { 4822 struct drm_printer p; 4823 4824 if (fastset) { 4825 p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL); 4826 4827 drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name); 4828 } else { 4829 p = drm_err_printer(&i915->drm, NULL); 4830 4831 drm_printf(&p, "mismatch in %s dp sdp\n", name); 4832 } 4833 4834 drm_printf(&p, "expected:\n"); 4835 drm_dp_vsc_sdp_log(&p, a); 4836 drm_printf(&p, "found:\n"); 4837 drm_dp_vsc_sdp_log(&p, b); 4838 } 4839 4840 /* Returns the length up to and including the last differing byte */ 4841 static size_t 4842 memcmp_diff_len(const u8 *a, const u8 *b, size_t len) 4843 { 4844 int i; 4845 4846 for (i = len - 1; i >= 0; i--) { 4847 if (a[i] != b[i]) 4848 return i + 1; 4849 } 4850 4851 return 0; 4852 } 4853 4854 static void 4855 pipe_config_buffer_mismatch(bool fastset, const struct intel_crtc *crtc, 4856 const char *name, 4857 const u8 *a, const u8 *b, size_t len) 4858 { 4859 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4860 4861 if (fastset) { 4862 if (!drm_debug_enabled(DRM_UT_KMS)) 4863 return; 4864 4865 /* only dump up to the last difference */ 4866 len = memcmp_diff_len(a, b, len); 4867 4868 drm_dbg_kms(&dev_priv->drm, 4869 "[CRTC:%d:%s] fastset requirement not met in %s buffer\n", 4870 crtc->base.base.id, crtc->base.name, name); 4871 print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE, 4872 16, 0, a, len, false); 4873 print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE, 4874 16, 0, b, len, false); 4875 } else { 4876 /* only dump up to the last difference */ 4877 len = memcmp_diff_len(a, b, len); 4878 4879 drm_err(&dev_priv->drm, "[CRTC:%d:%s] mismatch in %s buffer\n", 4880 crtc->base.base.id, crtc->base.name, name); 4881 print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE, 4882 16, 0, a, len, false); 4883 print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE, 4884 16, 0, b, len, false); 4885 } 4886 } 4887 4888 static void __printf(4, 5) 4889 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 4890 const char *name, const char *format, ...) 4891 { 4892 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4893 struct va_format vaf; 4894 va_list args; 4895 4896 va_start(args, format); 4897 vaf.fmt = format; 4898 vaf.va = &args; 4899 4900 if (fastset) 4901 drm_dbg_kms(&i915->drm, 4902 "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", 4903 crtc->base.base.id, crtc->base.name, name, &vaf); 4904 else 4905 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", 4906 crtc->base.base.id, crtc->base.name, name, &vaf); 4907 4908 va_end(args); 4909 } 4910 4911 static void 4912 pipe_config_pll_mismatch(bool fastset, 4913 const struct intel_crtc *crtc, 4914 const char *name, 4915 const struct intel_dpll_hw_state *a, 4916 const struct intel_dpll_hw_state *b) 4917 { 4918 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4919 4920 if (fastset) { 4921 if (!drm_debug_enabled(DRM_UT_KMS)) 4922 return; 4923 4924 drm_dbg_kms(&i915->drm, 4925 "[CRTC:%d:%s] fastset requirement not met in %s\n", 4926 crtc->base.base.id, crtc->base.name, name); 4927 drm_dbg_kms(&i915->drm, "expected:\n"); 4928 intel_dpll_dump_hw_state(i915, a); 4929 drm_dbg_kms(&i915->drm, "found:\n"); 4930 intel_dpll_dump_hw_state(i915, b); 4931 } else { 4932 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s buffer\n", 4933 crtc->base.base.id, crtc->base.name, name); 4934 drm_err(&i915->drm, "expected:\n"); 4935 intel_dpll_dump_hw_state(i915, a); 4936 drm_err(&i915->drm, "found:\n"); 4937 intel_dpll_dump_hw_state(i915, b); 4938 } 4939 } 4940 4941 bool 4942 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 4943 const struct intel_crtc_state *pipe_config, 4944 bool fastset) 4945 { 4946 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 4947 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 4948 bool ret = true; 4949 4950 #define PIPE_CONF_CHECK_X(name) do { \ 4951 if (current_config->name != pipe_config->name) { \ 4952 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 4953 __stringify(name) " is bool"); \ 4954 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 4955 "(expected 0x%08x, found 0x%08x)", \ 4956 current_config->name, \ 4957 pipe_config->name); \ 4958 ret = false; \ 4959 } \ 4960 } while (0) 4961 4962 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 4963 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 4964 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 4965 __stringify(name) " is bool"); \ 4966 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 4967 "(expected 0x%08x, found 0x%08x)", \ 4968 current_config->name & (mask), \ 4969 pipe_config->name & (mask)); \ 4970 ret = false; \ 4971 } \ 4972 } while (0) 4973 4974 #define PIPE_CONF_CHECK_I(name) do { \ 4975 if (current_config->name != pipe_config->name) { \ 4976 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ 4977 __stringify(name) " is bool"); \ 4978 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 4979 "(expected %i, found %i)", \ 4980 current_config->name, \ 4981 pipe_config->name); \ 4982 ret = false; \ 4983 } \ 4984 } while (0) 4985 4986 #define PIPE_CONF_CHECK_BOOL(name) do { \ 4987 if (current_config->name != pipe_config->name) { \ 4988 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ 4989 __stringify(name) " is not bool"); \ 4990 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 4991 "(expected %s, found %s)", \ 4992 str_yes_no(current_config->name), \ 4993 str_yes_no(pipe_config->name)); \ 4994 ret = false; \ 4995 } \ 4996 } while (0) 4997 4998 #define PIPE_CONF_CHECK_P(name) do { \ 4999 if (current_config->name != pipe_config->name) { \ 5000 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5001 "(expected %p, found %p)", \ 5002 current_config->name, \ 5003 pipe_config->name); \ 5004 ret = false; \ 5005 } \ 5006 } while (0) 5007 5008 #define PIPE_CONF_CHECK_M_N(name) do { \ 5009 if (!intel_compare_link_m_n(¤t_config->name, \ 5010 &pipe_config->name)) { \ 5011 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5012 "(expected tu %i data %i/%i link %i/%i, " \ 5013 "found tu %i, data %i/%i link %i/%i)", \ 5014 current_config->name.tu, \ 5015 current_config->name.data_m, \ 5016 current_config->name.data_n, \ 5017 current_config->name.link_m, \ 5018 current_config->name.link_n, \ 5019 pipe_config->name.tu, \ 5020 pipe_config->name.data_m, \ 5021 pipe_config->name.data_n, \ 5022 pipe_config->name.link_m, \ 5023 pipe_config->name.link_n); \ 5024 ret = false; \ 5025 } \ 5026 } while (0) 5027 5028 #define PIPE_CONF_CHECK_PLL(name) do { \ 5029 if (!intel_dpll_compare_hw_state(dev_priv, ¤t_config->name, \ 5030 &pipe_config->name)) { \ 5031 pipe_config_pll_mismatch(fastset, crtc, __stringify(name), \ 5032 ¤t_config->name, \ 5033 &pipe_config->name); \ 5034 ret = false; \ 5035 } \ 5036 } while (0) 5037 5038 #define PIPE_CONF_CHECK_TIMINGS(name) do { \ 5039 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ 5040 PIPE_CONF_CHECK_I(name.crtc_htotal); \ 5041 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ 5042 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ 5043 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ 5044 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ 5045 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ 5046 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ 5047 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ 5048 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ 5049 if (!fastset || !pipe_config->update_lrr) { \ 5050 PIPE_CONF_CHECK_I(name.crtc_vtotal); \ 5051 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ 5052 } \ 5053 } while (0) 5054 5055 #define PIPE_CONF_CHECK_RECT(name) do { \ 5056 PIPE_CONF_CHECK_I(name.x1); \ 5057 PIPE_CONF_CHECK_I(name.x2); \ 5058 PIPE_CONF_CHECK_I(name.y1); \ 5059 PIPE_CONF_CHECK_I(name.y2); \ 5060 } while (0) 5061 5062 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 5063 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 5064 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5065 "(%x) (expected %i, found %i)", \ 5066 (mask), \ 5067 current_config->name & (mask), \ 5068 pipe_config->name & (mask)); \ 5069 ret = false; \ 5070 } \ 5071 } while (0) 5072 5073 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 5074 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 5075 &pipe_config->infoframes.name)) { \ 5076 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 5077 ¤t_config->infoframes.name, \ 5078 &pipe_config->infoframes.name); \ 5079 ret = false; \ 5080 } \ 5081 } while (0) 5082 5083 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 5084 if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 5085 &pipe_config->infoframes.name)) { \ 5086 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ 5087 ¤t_config->infoframes.name, \ 5088 &pipe_config->infoframes.name); \ 5089 ret = false; \ 5090 } \ 5091 } while (0) 5092 5093 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \ 5094 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ 5095 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ 5096 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ 5097 pipe_config_buffer_mismatch(fastset, crtc, __stringify(name), \ 5098 current_config->name, \ 5099 pipe_config->name, \ 5100 (len)); \ 5101 ret = false; \ 5102 } \ 5103 } while (0) 5104 5105 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \ 5106 if (current_config->gamma_mode == pipe_config->gamma_mode && \ 5107 !intel_color_lut_equal(current_config, \ 5108 current_config->lut, pipe_config->lut, \ 5109 is_pre_csc_lut)) { \ 5110 pipe_config_mismatch(fastset, crtc, __stringify(lut), \ 5111 "hw_state doesn't match sw_state"); \ 5112 ret = false; \ 5113 } \ 5114 } while (0) 5115 5116 #define PIPE_CONF_CHECK_CSC(name) do { \ 5117 PIPE_CONF_CHECK_X(name.preoff[0]); \ 5118 PIPE_CONF_CHECK_X(name.preoff[1]); \ 5119 PIPE_CONF_CHECK_X(name.preoff[2]); \ 5120 PIPE_CONF_CHECK_X(name.coeff[0]); \ 5121 PIPE_CONF_CHECK_X(name.coeff[1]); \ 5122 PIPE_CONF_CHECK_X(name.coeff[2]); \ 5123 PIPE_CONF_CHECK_X(name.coeff[3]); \ 5124 PIPE_CONF_CHECK_X(name.coeff[4]); \ 5125 PIPE_CONF_CHECK_X(name.coeff[5]); \ 5126 PIPE_CONF_CHECK_X(name.coeff[6]); \ 5127 PIPE_CONF_CHECK_X(name.coeff[7]); \ 5128 PIPE_CONF_CHECK_X(name.coeff[8]); \ 5129 PIPE_CONF_CHECK_X(name.postoff[0]); \ 5130 PIPE_CONF_CHECK_X(name.postoff[1]); \ 5131 PIPE_CONF_CHECK_X(name.postoff[2]); \ 5132 } while (0) 5133 5134 #define PIPE_CONF_QUIRK(quirk) \ 5135 ((current_config->quirks | pipe_config->quirks) & (quirk)) 5136 5137 PIPE_CONF_CHECK_BOOL(hw.enable); 5138 PIPE_CONF_CHECK_BOOL(hw.active); 5139 5140 PIPE_CONF_CHECK_I(cpu_transcoder); 5141 PIPE_CONF_CHECK_I(mst_master_transcoder); 5142 5143 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 5144 PIPE_CONF_CHECK_I(fdi_lanes); 5145 PIPE_CONF_CHECK_M_N(fdi_m_n); 5146 5147 PIPE_CONF_CHECK_I(lane_count); 5148 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 5149 5150 if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) { 5151 if (!fastset || !pipe_config->update_m_n) 5152 PIPE_CONF_CHECK_M_N(dp_m_n); 5153 } else { 5154 PIPE_CONF_CHECK_M_N(dp_m_n); 5155 PIPE_CONF_CHECK_M_N(dp_m2_n2); 5156 } 5157 5158 PIPE_CONF_CHECK_X(output_types); 5159 5160 PIPE_CONF_CHECK_I(framestart_delay); 5161 PIPE_CONF_CHECK_I(msa_timing_delay); 5162 5163 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); 5164 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); 5165 5166 PIPE_CONF_CHECK_I(pixel_multiplier); 5167 5168 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5169 DRM_MODE_FLAG_INTERLACE); 5170 5171 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 5172 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5173 DRM_MODE_FLAG_PHSYNC); 5174 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5175 DRM_MODE_FLAG_NHSYNC); 5176 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5177 DRM_MODE_FLAG_PVSYNC); 5178 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5179 DRM_MODE_FLAG_NVSYNC); 5180 } 5181 5182 PIPE_CONF_CHECK_I(output_format); 5183 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 5184 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 5185 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5186 PIPE_CONF_CHECK_BOOL(limited_color_range); 5187 5188 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 5189 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 5190 PIPE_CONF_CHECK_BOOL(has_infoframe); 5191 PIPE_CONF_CHECK_BOOL(enhanced_framing); 5192 PIPE_CONF_CHECK_BOOL(fec_enable); 5193 5194 if (!fastset) { 5195 PIPE_CONF_CHECK_BOOL(has_audio); 5196 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5197 } 5198 5199 PIPE_CONF_CHECK_X(gmch_pfit.control); 5200 /* pfit ratios are autocomputed by the hw on gen4+ */ 5201 if (DISPLAY_VER(dev_priv) < 4) 5202 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 5203 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 5204 5205 /* 5206 * Changing the EDP transcoder input mux 5207 * (A_ONOFF vs. A_ON) requires a full modeset. 5208 */ 5209 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 5210 5211 if (!fastset) { 5212 PIPE_CONF_CHECK_RECT(pipe_src); 5213 5214 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 5215 PIPE_CONF_CHECK_RECT(pch_pfit.dst); 5216 5217 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 5218 PIPE_CONF_CHECK_I(pixel_rate); 5219 5220 PIPE_CONF_CHECK_X(gamma_mode); 5221 if (IS_CHERRYVIEW(dev_priv)) 5222 PIPE_CONF_CHECK_X(cgm_mode); 5223 else 5224 PIPE_CONF_CHECK_X(csc_mode); 5225 PIPE_CONF_CHECK_BOOL(gamma_enable); 5226 PIPE_CONF_CHECK_BOOL(csc_enable); 5227 PIPE_CONF_CHECK_BOOL(wgc_enable); 5228 5229 PIPE_CONF_CHECK_I(linetime); 5230 PIPE_CONF_CHECK_I(ips_linetime); 5231 5232 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); 5233 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); 5234 5235 PIPE_CONF_CHECK_CSC(csc); 5236 PIPE_CONF_CHECK_CSC(output_csc); 5237 } 5238 5239 PIPE_CONF_CHECK_BOOL(double_wide); 5240 5241 if (dev_priv->display.dpll.mgr) 5242 PIPE_CONF_CHECK_P(shared_dpll); 5243 5244 /* FIXME convert everything over the dpll_mgr */ 5245 if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv)) 5246 PIPE_CONF_CHECK_PLL(dpll_hw_state); 5247 5248 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 5249 PIPE_CONF_CHECK_X(dsi_pll.div); 5250 5251 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 5252 PIPE_CONF_CHECK_I(pipe_bpp); 5253 5254 if (!fastset || !pipe_config->update_m_n) { 5255 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); 5256 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); 5257 } 5258 PIPE_CONF_CHECK_I(port_clock); 5259 5260 PIPE_CONF_CHECK_I(min_voltage_level); 5261 5262 if (current_config->has_psr || pipe_config->has_psr) 5263 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, 5264 ~intel_hdmi_infoframe_enable(DP_SDP_VSC)); 5265 else 5266 PIPE_CONF_CHECK_X(infoframes.enable); 5267 5268 PIPE_CONF_CHECK_X(infoframes.gcp); 5269 PIPE_CONF_CHECK_INFOFRAME(avi); 5270 PIPE_CONF_CHECK_INFOFRAME(spd); 5271 PIPE_CONF_CHECK_INFOFRAME(hdmi); 5272 PIPE_CONF_CHECK_INFOFRAME(drm); 5273 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 5274 5275 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 5276 PIPE_CONF_CHECK_I(master_transcoder); 5277 PIPE_CONF_CHECK_X(bigjoiner_pipes); 5278 5279 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable); 5280 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb); 5281 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422); 5282 PIPE_CONF_CHECK_BOOL(dsc.config.native_422); 5283 PIPE_CONF_CHECK_BOOL(dsc.config.native_420); 5284 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable); 5285 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth); 5286 PIPE_CONF_CHECK_I(dsc.config.bits_per_component); 5287 PIPE_CONF_CHECK_I(dsc.config.pic_width); 5288 PIPE_CONF_CHECK_I(dsc.config.pic_height); 5289 PIPE_CONF_CHECK_I(dsc.config.slice_width); 5290 PIPE_CONF_CHECK_I(dsc.config.slice_height); 5291 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay); 5292 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay); 5293 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval); 5294 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval); 5295 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value); 5296 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset); 5297 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp); 5298 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp); 5299 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset); 5300 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset); 5301 PIPE_CONF_CHECK_I(dsc.config.initial_offset); 5302 PIPE_CONF_CHECK_I(dsc.config.final_offset); 5303 PIPE_CONF_CHECK_I(dsc.config.rc_model_size); 5304 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0); 5305 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1); 5306 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size); 5307 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset); 5308 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset); 5309 5310 PIPE_CONF_CHECK_BOOL(dsc.compression_enable); 5311 PIPE_CONF_CHECK_BOOL(dsc.dsc_split); 5312 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16); 5313 5314 PIPE_CONF_CHECK_BOOL(splitter.enable); 5315 PIPE_CONF_CHECK_I(splitter.link_count); 5316 PIPE_CONF_CHECK_I(splitter.pixel_overlap); 5317 5318 if (!fastset) { 5319 PIPE_CONF_CHECK_BOOL(vrr.enable); 5320 PIPE_CONF_CHECK_I(vrr.vmin); 5321 PIPE_CONF_CHECK_I(vrr.vmax); 5322 PIPE_CONF_CHECK_I(vrr.flipline); 5323 PIPE_CONF_CHECK_I(vrr.pipeline_full); 5324 PIPE_CONF_CHECK_I(vrr.guardband); 5325 } 5326 5327 #undef PIPE_CONF_CHECK_X 5328 #undef PIPE_CONF_CHECK_I 5329 #undef PIPE_CONF_CHECK_BOOL 5330 #undef PIPE_CONF_CHECK_P 5331 #undef PIPE_CONF_CHECK_FLAGS 5332 #undef PIPE_CONF_CHECK_COLOR_LUT 5333 #undef PIPE_CONF_CHECK_TIMINGS 5334 #undef PIPE_CONF_CHECK_RECT 5335 #undef PIPE_CONF_QUIRK 5336 5337 return ret; 5338 } 5339 5340 static void 5341 intel_verify_planes(struct intel_atomic_state *state) 5342 { 5343 struct intel_plane *plane; 5344 const struct intel_plane_state *plane_state; 5345 int i; 5346 5347 for_each_new_intel_plane_in_state(state, plane, 5348 plane_state, i) 5349 assert_plane(plane, plane_state->planar_slave || 5350 plane_state->uapi.visible); 5351 } 5352 5353 static int intel_modeset_pipe(struct intel_atomic_state *state, 5354 struct intel_crtc_state *crtc_state, 5355 const char *reason) 5356 { 5357 struct drm_i915_private *i915 = to_i915(state->base.dev); 5358 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5359 int ret; 5360 5361 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n", 5362 crtc->base.base.id, crtc->base.name, reason); 5363 5364 ret = drm_atomic_add_affected_connectors(&state->base, 5365 &crtc->base); 5366 if (ret) 5367 return ret; 5368 5369 ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc); 5370 if (ret) 5371 return ret; 5372 5373 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); 5374 if (ret) 5375 return ret; 5376 5377 ret = intel_atomic_add_affected_planes(state, crtc); 5378 if (ret) 5379 return ret; 5380 5381 crtc_state->uapi.mode_changed = true; 5382 5383 return 0; 5384 } 5385 5386 /** 5387 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes 5388 * @state: intel atomic state 5389 * @reason: the reason for the full modeset 5390 * @mask: mask of pipes to modeset 5391 * 5392 * Add pipes in @mask to @state and force a full modeset on the enabled ones 5393 * due to the description in @reason. 5394 * This function can be called only before new plane states are computed. 5395 * 5396 * Returns 0 in case of success, negative error code otherwise. 5397 */ 5398 int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state, 5399 const char *reason, u8 mask) 5400 { 5401 struct drm_i915_private *i915 = to_i915(state->base.dev); 5402 struct intel_crtc *crtc; 5403 5404 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) { 5405 struct intel_crtc_state *crtc_state; 5406 int ret; 5407 5408 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5409 if (IS_ERR(crtc_state)) 5410 return PTR_ERR(crtc_state); 5411 5412 if (!crtc_state->hw.enable || 5413 intel_crtc_needs_modeset(crtc_state)) 5414 continue; 5415 5416 ret = intel_modeset_pipe(state, crtc_state, reason); 5417 if (ret) 5418 return ret; 5419 } 5420 5421 return 0; 5422 } 5423 5424 static void 5425 intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state) 5426 { 5427 crtc_state->uapi.mode_changed = true; 5428 5429 crtc_state->update_pipe = false; 5430 crtc_state->update_m_n = false; 5431 crtc_state->update_lrr = false; 5432 } 5433 5434 /** 5435 * intel_modeset_all_pipes_late - force a full modeset on all pipes 5436 * @state: intel atomic state 5437 * @reason: the reason for the full modeset 5438 * 5439 * Add all pipes to @state and force a full modeset on the active ones due to 5440 * the description in @reason. 5441 * This function can be called only after new plane states are computed already. 5442 * 5443 * Returns 0 in case of success, negative error code otherwise. 5444 */ 5445 int intel_modeset_all_pipes_late(struct intel_atomic_state *state, 5446 const char *reason) 5447 { 5448 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5449 struct intel_crtc *crtc; 5450 5451 for_each_intel_crtc(&dev_priv->drm, crtc) { 5452 struct intel_crtc_state *crtc_state; 5453 int ret; 5454 5455 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5456 if (IS_ERR(crtc_state)) 5457 return PTR_ERR(crtc_state); 5458 5459 if (!crtc_state->hw.active || 5460 intel_crtc_needs_modeset(crtc_state)) 5461 continue; 5462 5463 ret = intel_modeset_pipe(state, crtc_state, reason); 5464 if (ret) 5465 return ret; 5466 5467 intel_crtc_flag_modeset(crtc_state); 5468 5469 crtc_state->update_planes |= crtc_state->active_planes; 5470 crtc_state->async_flip_planes = 0; 5471 crtc_state->do_async_flip = false; 5472 } 5473 5474 return 0; 5475 } 5476 5477 /* 5478 * This implements the workaround described in the "notes" section of the mode 5479 * set sequence documentation. When going from no pipes or single pipe to 5480 * multiple pipes, and planes are enabled after the pipe, we need to wait at 5481 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 5482 */ 5483 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 5484 { 5485 struct intel_crtc_state *crtc_state; 5486 struct intel_crtc *crtc; 5487 struct intel_crtc_state *first_crtc_state = NULL; 5488 struct intel_crtc_state *other_crtc_state = NULL; 5489 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 5490 int i; 5491 5492 /* look at all crtc's that are going to be enabled in during modeset */ 5493 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5494 if (!crtc_state->hw.active || 5495 !intel_crtc_needs_modeset(crtc_state)) 5496 continue; 5497 5498 if (first_crtc_state) { 5499 other_crtc_state = crtc_state; 5500 break; 5501 } else { 5502 first_crtc_state = crtc_state; 5503 first_pipe = crtc->pipe; 5504 } 5505 } 5506 5507 /* No workaround needed? */ 5508 if (!first_crtc_state) 5509 return 0; 5510 5511 /* w/a possibly needed, check how many crtc's are already enabled. */ 5512 for_each_intel_crtc(state->base.dev, crtc) { 5513 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5514 if (IS_ERR(crtc_state)) 5515 return PTR_ERR(crtc_state); 5516 5517 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 5518 5519 if (!crtc_state->hw.active || 5520 intel_crtc_needs_modeset(crtc_state)) 5521 continue; 5522 5523 /* 2 or more enabled crtcs means no need for w/a */ 5524 if (enabled_pipe != INVALID_PIPE) 5525 return 0; 5526 5527 enabled_pipe = crtc->pipe; 5528 } 5529 5530 if (enabled_pipe != INVALID_PIPE) 5531 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 5532 else if (other_crtc_state) 5533 other_crtc_state->hsw_workaround_pipe = first_pipe; 5534 5535 return 0; 5536 } 5537 5538 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 5539 u8 active_pipes) 5540 { 5541 const struct intel_crtc_state *crtc_state; 5542 struct intel_crtc *crtc; 5543 int i; 5544 5545 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5546 if (crtc_state->hw.active) 5547 active_pipes |= BIT(crtc->pipe); 5548 else 5549 active_pipes &= ~BIT(crtc->pipe); 5550 } 5551 5552 return active_pipes; 5553 } 5554 5555 static int intel_modeset_checks(struct intel_atomic_state *state) 5556 { 5557 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5558 5559 state->modeset = true; 5560 5561 if (IS_HASWELL(dev_priv)) 5562 return hsw_mode_set_planes_workaround(state); 5563 5564 return 0; 5565 } 5566 5567 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 5568 struct intel_crtc_state *new_crtc_state) 5569 { 5570 struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev); 5571 5572 /* only allow LRR when the timings stay within the VRR range */ 5573 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range) 5574 new_crtc_state->update_lrr = false; 5575 5576 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 5577 drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n"); 5578 else 5579 new_crtc_state->uapi.mode_changed = false; 5580 5581 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n, 5582 &new_crtc_state->dp_m_n)) 5583 new_crtc_state->update_m_n = false; 5584 5585 if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal && 5586 old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end)) 5587 new_crtc_state->update_lrr = false; 5588 5589 if (intel_crtc_needs_modeset(new_crtc_state)) 5590 intel_crtc_flag_modeset(new_crtc_state); 5591 else 5592 new_crtc_state->update_pipe = true; 5593 } 5594 5595 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 5596 struct intel_crtc *crtc, 5597 u8 plane_ids_mask) 5598 { 5599 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5600 struct intel_plane *plane; 5601 5602 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 5603 struct intel_plane_state *plane_state; 5604 5605 if ((plane_ids_mask & BIT(plane->id)) == 0) 5606 continue; 5607 5608 plane_state = intel_atomic_get_plane_state(state, plane); 5609 if (IS_ERR(plane_state)) 5610 return PTR_ERR(plane_state); 5611 } 5612 5613 return 0; 5614 } 5615 5616 int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 5617 struct intel_crtc *crtc) 5618 { 5619 const struct intel_crtc_state *old_crtc_state = 5620 intel_atomic_get_old_crtc_state(state, crtc); 5621 const struct intel_crtc_state *new_crtc_state = 5622 intel_atomic_get_new_crtc_state(state, crtc); 5623 5624 return intel_crtc_add_planes_to_state(state, crtc, 5625 old_crtc_state->enabled_planes | 5626 new_crtc_state->enabled_planes); 5627 } 5628 5629 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 5630 { 5631 /* See {hsw,vlv,ivb}_plane_ratio() */ 5632 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 5633 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 5634 IS_IVYBRIDGE(dev_priv); 5635 } 5636 5637 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state, 5638 struct intel_crtc *crtc, 5639 struct intel_crtc *other) 5640 { 5641 const struct intel_plane_state __maybe_unused *plane_state; 5642 struct intel_plane *plane; 5643 u8 plane_ids = 0; 5644 int i; 5645 5646 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 5647 if (plane->pipe == crtc->pipe) 5648 plane_ids |= BIT(plane->id); 5649 } 5650 5651 return intel_crtc_add_planes_to_state(state, other, plane_ids); 5652 } 5653 5654 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) 5655 { 5656 struct drm_i915_private *i915 = to_i915(state->base.dev); 5657 const struct intel_crtc_state *crtc_state; 5658 struct intel_crtc *crtc; 5659 int i; 5660 5661 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5662 struct intel_crtc *other; 5663 5664 for_each_intel_crtc_in_pipe_mask(&i915->drm, other, 5665 crtc_state->bigjoiner_pipes) { 5666 int ret; 5667 5668 if (crtc == other) 5669 continue; 5670 5671 ret = intel_crtc_add_bigjoiner_planes(state, crtc, other); 5672 if (ret) 5673 return ret; 5674 } 5675 } 5676 5677 return 0; 5678 } 5679 5680 static int intel_atomic_check_planes(struct intel_atomic_state *state) 5681 { 5682 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5683 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 5684 struct intel_plane_state __maybe_unused *plane_state; 5685 struct intel_plane *plane; 5686 struct intel_crtc *crtc; 5687 int i, ret; 5688 5689 ret = icl_add_linked_planes(state); 5690 if (ret) 5691 return ret; 5692 5693 ret = intel_bigjoiner_add_affected_planes(state); 5694 if (ret) 5695 return ret; 5696 5697 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 5698 ret = intel_plane_atomic_check(state, plane); 5699 if (ret) { 5700 drm_dbg_atomic(&dev_priv->drm, 5701 "[PLANE:%d:%s] atomic driver check failed\n", 5702 plane->base.base.id, plane->base.name); 5703 return ret; 5704 } 5705 } 5706 5707 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 5708 new_crtc_state, i) { 5709 u8 old_active_planes, new_active_planes; 5710 5711 ret = icl_check_nv12_planes(new_crtc_state); 5712 if (ret) 5713 return ret; 5714 5715 /* 5716 * On some platforms the number of active planes affects 5717 * the planes' minimum cdclk calculation. Add such planes 5718 * to the state before we compute the minimum cdclk. 5719 */ 5720 if (!active_planes_affects_min_cdclk(dev_priv)) 5721 continue; 5722 5723 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 5724 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 5725 5726 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 5727 continue; 5728 5729 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 5730 if (ret) 5731 return ret; 5732 } 5733 5734 return 0; 5735 } 5736 5737 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 5738 { 5739 struct intel_crtc_state __maybe_unused *crtc_state; 5740 struct intel_crtc *crtc; 5741 int i; 5742 5743 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5744 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5745 int ret; 5746 5747 ret = intel_crtc_atomic_check(state, crtc); 5748 if (ret) { 5749 drm_dbg_atomic(&i915->drm, 5750 "[CRTC:%d:%s] atomic driver check failed\n", 5751 crtc->base.base.id, crtc->base.name); 5752 return ret; 5753 } 5754 } 5755 5756 return 0; 5757 } 5758 5759 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 5760 u8 transcoders) 5761 { 5762 const struct intel_crtc_state *new_crtc_state; 5763 struct intel_crtc *crtc; 5764 int i; 5765 5766 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5767 if (new_crtc_state->hw.enable && 5768 transcoders & BIT(new_crtc_state->cpu_transcoder) && 5769 intel_crtc_needs_modeset(new_crtc_state)) 5770 return true; 5771 } 5772 5773 return false; 5774 } 5775 5776 static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 5777 u8 pipes) 5778 { 5779 const struct intel_crtc_state *new_crtc_state; 5780 struct intel_crtc *crtc; 5781 int i; 5782 5783 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5784 if (new_crtc_state->hw.enable && 5785 pipes & BIT(crtc->pipe) && 5786 intel_crtc_needs_modeset(new_crtc_state)) 5787 return true; 5788 } 5789 5790 return false; 5791 } 5792 5793 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, 5794 struct intel_crtc *master_crtc) 5795 { 5796 struct drm_i915_private *i915 = to_i915(state->base.dev); 5797 struct intel_crtc_state *master_crtc_state = 5798 intel_atomic_get_new_crtc_state(state, master_crtc); 5799 struct intel_crtc *slave_crtc; 5800 5801 if (!master_crtc_state->bigjoiner_pipes) 5802 return 0; 5803 5804 /* sanity check */ 5805 if (drm_WARN_ON(&i915->drm, 5806 master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state))) 5807 return -EINVAL; 5808 5809 if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) { 5810 drm_dbg_kms(&i915->drm, 5811 "[CRTC:%d:%s] Cannot act as big joiner master " 5812 "(need 0x%x as pipes, only 0x%x possible)\n", 5813 master_crtc->base.base.id, master_crtc->base.name, 5814 master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915)); 5815 return -EINVAL; 5816 } 5817 5818 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 5819 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { 5820 struct intel_crtc_state *slave_crtc_state; 5821 int ret; 5822 5823 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); 5824 if (IS_ERR(slave_crtc_state)) 5825 return PTR_ERR(slave_crtc_state); 5826 5827 /* master being enabled, slave was already configured? */ 5828 if (slave_crtc_state->uapi.enable) { 5829 drm_dbg_kms(&i915->drm, 5830 "[CRTC:%d:%s] Slave is enabled as normal CRTC, but " 5831 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", 5832 slave_crtc->base.base.id, slave_crtc->base.name, 5833 master_crtc->base.base.id, master_crtc->base.name); 5834 return -EINVAL; 5835 } 5836 5837 /* 5838 * The state copy logic assumes the master crtc gets processed 5839 * before the slave crtc during the main compute_config loop. 5840 * This works because the crtcs are created in pipe order, 5841 * and the hardware requires master pipe < slave pipe as well. 5842 * Should that change we need to rethink the logic. 5843 */ 5844 if (WARN_ON(drm_crtc_index(&master_crtc->base) > 5845 drm_crtc_index(&slave_crtc->base))) 5846 return -EINVAL; 5847 5848 drm_dbg_kms(&i915->drm, 5849 "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n", 5850 slave_crtc->base.base.id, slave_crtc->base.name, 5851 master_crtc->base.base.id, master_crtc->base.name); 5852 5853 slave_crtc_state->bigjoiner_pipes = 5854 master_crtc_state->bigjoiner_pipes; 5855 5856 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc); 5857 if (ret) 5858 return ret; 5859 } 5860 5861 return 0; 5862 } 5863 5864 static void kill_bigjoiner_slave(struct intel_atomic_state *state, 5865 struct intel_crtc *master_crtc) 5866 { 5867 struct drm_i915_private *i915 = to_i915(state->base.dev); 5868 struct intel_crtc_state *master_crtc_state = 5869 intel_atomic_get_new_crtc_state(state, master_crtc); 5870 struct intel_crtc *slave_crtc; 5871 5872 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 5873 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { 5874 struct intel_crtc_state *slave_crtc_state = 5875 intel_atomic_get_new_crtc_state(state, slave_crtc); 5876 5877 slave_crtc_state->bigjoiner_pipes = 0; 5878 5879 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc); 5880 } 5881 5882 master_crtc_state->bigjoiner_pipes = 0; 5883 } 5884 5885 /** 5886 * DOC: asynchronous flip implementation 5887 * 5888 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 5889 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 5890 * Correspondingly, support is currently added for primary plane only. 5891 * 5892 * Async flip can only change the plane surface address, so anything else 5893 * changing is rejected from the intel_async_flip_check_hw() function. 5894 * Once this check is cleared, flip done interrupt is enabled using 5895 * the intel_crtc_enable_flip_done() function. 5896 * 5897 * As soon as the surface address register is written, flip done interrupt is 5898 * generated and the requested events are sent to the usersapce in the interrupt 5899 * handler itself. The timestamp and sequence sent during the flip done event 5900 * correspond to the last vblank and have no relation to the actual time when 5901 * the flip done event was sent. 5902 */ 5903 static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 5904 struct intel_crtc *crtc) 5905 { 5906 struct drm_i915_private *i915 = to_i915(state->base.dev); 5907 const struct intel_crtc_state *new_crtc_state = 5908 intel_atomic_get_new_crtc_state(state, crtc); 5909 const struct intel_plane_state *old_plane_state; 5910 struct intel_plane_state *new_plane_state; 5911 struct intel_plane *plane; 5912 int i; 5913 5914 if (!new_crtc_state->uapi.async_flip) 5915 return 0; 5916 5917 if (!new_crtc_state->uapi.active) { 5918 drm_dbg_kms(&i915->drm, 5919 "[CRTC:%d:%s] not active\n", 5920 crtc->base.base.id, crtc->base.name); 5921 return -EINVAL; 5922 } 5923 5924 if (intel_crtc_needs_modeset(new_crtc_state)) { 5925 drm_dbg_kms(&i915->drm, 5926 "[CRTC:%d:%s] modeset required\n", 5927 crtc->base.base.id, crtc->base.name); 5928 return -EINVAL; 5929 } 5930 5931 /* 5932 * FIXME: Bigjoiner+async flip is busted currently. 5933 * Remove this check once the issues are fixed. 5934 */ 5935 if (new_crtc_state->bigjoiner_pipes) { 5936 drm_dbg_kms(&i915->drm, 5937 "[CRTC:%d:%s] async flip disallowed with bigjoiner\n", 5938 crtc->base.base.id, crtc->base.name); 5939 return -EINVAL; 5940 } 5941 5942 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 5943 new_plane_state, i) { 5944 if (plane->pipe != crtc->pipe) 5945 continue; 5946 5947 /* 5948 * TODO: Async flip is only supported through the page flip IOCTL 5949 * as of now. So support currently added for primary plane only. 5950 * Support for other planes on platforms on which supports 5951 * this(vlv/chv and icl+) should be added when async flip is 5952 * enabled in the atomic IOCTL path. 5953 */ 5954 if (!plane->async_flip) { 5955 drm_dbg_kms(&i915->drm, 5956 "[PLANE:%d:%s] async flip not supported\n", 5957 plane->base.base.id, plane->base.name); 5958 return -EINVAL; 5959 } 5960 5961 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 5962 drm_dbg_kms(&i915->drm, 5963 "[PLANE:%d:%s] no old or new framebuffer\n", 5964 plane->base.base.id, plane->base.name); 5965 return -EINVAL; 5966 } 5967 } 5968 5969 return 0; 5970 } 5971 5972 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 5973 { 5974 struct drm_i915_private *i915 = to_i915(state->base.dev); 5975 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 5976 const struct intel_plane_state *new_plane_state, *old_plane_state; 5977 struct intel_plane *plane; 5978 int i; 5979 5980 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 5981 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 5982 5983 if (!new_crtc_state->uapi.async_flip) 5984 return 0; 5985 5986 if (!new_crtc_state->hw.active) { 5987 drm_dbg_kms(&i915->drm, 5988 "[CRTC:%d:%s] not active\n", 5989 crtc->base.base.id, crtc->base.name); 5990 return -EINVAL; 5991 } 5992 5993 if (intel_crtc_needs_modeset(new_crtc_state)) { 5994 drm_dbg_kms(&i915->drm, 5995 "[CRTC:%d:%s] modeset required\n", 5996 crtc->base.base.id, crtc->base.name); 5997 return -EINVAL; 5998 } 5999 6000 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 6001 drm_dbg_kms(&i915->drm, 6002 "[CRTC:%d:%s] Active planes cannot be in async flip\n", 6003 crtc->base.base.id, crtc->base.name); 6004 return -EINVAL; 6005 } 6006 6007 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6008 new_plane_state, i) { 6009 if (plane->pipe != crtc->pipe) 6010 continue; 6011 6012 /* 6013 * Only async flip capable planes should be in the state 6014 * if we're really about to ask the hardware to perform 6015 * an async flip. We should never get this far otherwise. 6016 */ 6017 if (drm_WARN_ON(&i915->drm, 6018 new_crtc_state->do_async_flip && !plane->async_flip)) 6019 return -EINVAL; 6020 6021 /* 6022 * Only check async flip capable planes other planes 6023 * may be involved in the initial commit due to 6024 * the wm0/ddb optimization. 6025 * 6026 * TODO maybe should track which planes actually 6027 * were requested to do the async flip... 6028 */ 6029 if (!plane->async_flip) 6030 continue; 6031 6032 /* 6033 * FIXME: This check is kept generic for all platforms. 6034 * Need to verify this for all gen9 platforms to enable 6035 * this selectively if required. 6036 */ 6037 switch (new_plane_state->hw.fb->modifier) { 6038 case DRM_FORMAT_MOD_LINEAR: 6039 /* 6040 * FIXME: Async on Linear buffer is supported on ICL as 6041 * but with additional alignment and fbc restrictions 6042 * need to be taken care of. These aren't applicable for 6043 * gen12+. 6044 */ 6045 if (DISPLAY_VER(i915) < 12) { 6046 drm_dbg_kms(&i915->drm, 6047 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n", 6048 plane->base.base.id, plane->base.name, 6049 new_plane_state->hw.fb->modifier, DISPLAY_VER(i915)); 6050 return -EINVAL; 6051 } 6052 break; 6053 6054 case I915_FORMAT_MOD_X_TILED: 6055 case I915_FORMAT_MOD_Y_TILED: 6056 case I915_FORMAT_MOD_Yf_TILED: 6057 case I915_FORMAT_MOD_4_TILED: 6058 break; 6059 default: 6060 drm_dbg_kms(&i915->drm, 6061 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n", 6062 plane->base.base.id, plane->base.name, 6063 new_plane_state->hw.fb->modifier); 6064 return -EINVAL; 6065 } 6066 6067 if (new_plane_state->hw.fb->format->num_planes > 1) { 6068 drm_dbg_kms(&i915->drm, 6069 "[PLANE:%d:%s] Planar formats do not support async flips\n", 6070 plane->base.base.id, plane->base.name); 6071 return -EINVAL; 6072 } 6073 6074 if (old_plane_state->view.color_plane[0].mapping_stride != 6075 new_plane_state->view.color_plane[0].mapping_stride) { 6076 drm_dbg_kms(&i915->drm, 6077 "[PLANE:%d:%s] Stride cannot be changed in async flip\n", 6078 plane->base.base.id, plane->base.name); 6079 return -EINVAL; 6080 } 6081 6082 if (old_plane_state->hw.fb->modifier != 6083 new_plane_state->hw.fb->modifier) { 6084 drm_dbg_kms(&i915->drm, 6085 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", 6086 plane->base.base.id, plane->base.name); 6087 return -EINVAL; 6088 } 6089 6090 if (old_plane_state->hw.fb->format != 6091 new_plane_state->hw.fb->format) { 6092 drm_dbg_kms(&i915->drm, 6093 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", 6094 plane->base.base.id, plane->base.name); 6095 return -EINVAL; 6096 } 6097 6098 if (old_plane_state->hw.rotation != 6099 new_plane_state->hw.rotation) { 6100 drm_dbg_kms(&i915->drm, 6101 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", 6102 plane->base.base.id, plane->base.name); 6103 return -EINVAL; 6104 } 6105 6106 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 6107 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 6108 drm_dbg_kms(&i915->drm, 6109 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", 6110 plane->base.base.id, plane->base.name); 6111 return -EINVAL; 6112 } 6113 6114 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 6115 drm_dbg_kms(&i915->drm, 6116 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", 6117 plane->base.base.id, plane->base.name); 6118 return -EINVAL; 6119 } 6120 6121 if (old_plane_state->hw.pixel_blend_mode != 6122 new_plane_state->hw.pixel_blend_mode) { 6123 drm_dbg_kms(&i915->drm, 6124 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", 6125 plane->base.base.id, plane->base.name); 6126 return -EINVAL; 6127 } 6128 6129 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 6130 drm_dbg_kms(&i915->drm, 6131 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", 6132 plane->base.base.id, plane->base.name); 6133 return -EINVAL; 6134 } 6135 6136 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 6137 drm_dbg_kms(&i915->drm, 6138 "[PLANE:%d:%s] Color range cannot be changed in async flip\n", 6139 plane->base.base.id, plane->base.name); 6140 return -EINVAL; 6141 } 6142 6143 /* plane decryption is allow to change only in synchronous flips */ 6144 if (old_plane_state->decrypt != new_plane_state->decrypt) { 6145 drm_dbg_kms(&i915->drm, 6146 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", 6147 plane->base.base.id, plane->base.name); 6148 return -EINVAL; 6149 } 6150 } 6151 6152 return 0; 6153 } 6154 6155 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) 6156 { 6157 struct drm_i915_private *i915 = to_i915(state->base.dev); 6158 struct intel_crtc_state *crtc_state; 6159 struct intel_crtc *crtc; 6160 u8 affected_pipes = 0; 6161 u8 modeset_pipes = 0; 6162 int i; 6163 6164 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6165 affected_pipes |= crtc_state->bigjoiner_pipes; 6166 if (intel_crtc_needs_modeset(crtc_state)) 6167 modeset_pipes |= crtc_state->bigjoiner_pipes; 6168 } 6169 6170 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { 6171 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6172 if (IS_ERR(crtc_state)) 6173 return PTR_ERR(crtc_state); 6174 } 6175 6176 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { 6177 int ret; 6178 6179 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6180 6181 crtc_state->uapi.mode_changed = true; 6182 6183 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6184 if (ret) 6185 return ret; 6186 6187 ret = intel_atomic_add_affected_planes(state, crtc); 6188 if (ret) 6189 return ret; 6190 } 6191 6192 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6193 /* Kill old bigjoiner link, we may re-establish afterwards */ 6194 if (intel_crtc_needs_modeset(crtc_state) && 6195 intel_crtc_is_bigjoiner_master(crtc_state)) 6196 kill_bigjoiner_slave(state, crtc); 6197 } 6198 6199 return 0; 6200 } 6201 6202 static int intel_atomic_check_config(struct intel_atomic_state *state, 6203 struct intel_link_bw_limits *limits, 6204 enum pipe *failed_pipe) 6205 { 6206 struct drm_i915_private *i915 = to_i915(state->base.dev); 6207 struct intel_crtc_state *new_crtc_state; 6208 struct intel_crtc *crtc; 6209 int ret; 6210 int i; 6211 6212 *failed_pipe = INVALID_PIPE; 6213 6214 ret = intel_bigjoiner_add_affected_crtcs(state); 6215 if (ret) 6216 return ret; 6217 6218 ret = intel_fdi_add_affected_crtcs(state); 6219 if (ret) 6220 return ret; 6221 6222 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6223 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6224 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 6225 copy_bigjoiner_crtc_state_nomodeset(state, crtc); 6226 else 6227 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 6228 continue; 6229 } 6230 6231 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) { 6232 drm_WARN_ON(&i915->drm, new_crtc_state->uapi.enable); 6233 continue; 6234 } 6235 6236 ret = intel_crtc_prepare_cleared_state(state, crtc); 6237 if (ret) 6238 break; 6239 6240 if (!new_crtc_state->hw.enable) 6241 continue; 6242 6243 ret = intel_modeset_pipe_config(state, crtc, limits); 6244 if (ret) 6245 break; 6246 6247 ret = intel_atomic_check_bigjoiner(state, crtc); 6248 if (ret) 6249 break; 6250 } 6251 6252 if (ret) 6253 *failed_pipe = crtc->pipe; 6254 6255 return ret; 6256 } 6257 6258 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state) 6259 { 6260 struct intel_link_bw_limits new_limits; 6261 struct intel_link_bw_limits old_limits; 6262 int ret; 6263 6264 intel_link_bw_init_limits(state, &new_limits); 6265 old_limits = new_limits; 6266 6267 while (true) { 6268 enum pipe failed_pipe; 6269 6270 ret = intel_atomic_check_config(state, &new_limits, 6271 &failed_pipe); 6272 if (ret) { 6273 /* 6274 * The bpp limit for a pipe is below the minimum it supports, set the 6275 * limit to the minimum and recalculate the config. 6276 */ 6277 if (ret == -EINVAL && 6278 intel_link_bw_set_bpp_limit_for_pipe(state, 6279 &old_limits, 6280 &new_limits, 6281 failed_pipe)) 6282 continue; 6283 6284 break; 6285 } 6286 6287 old_limits = new_limits; 6288 6289 ret = intel_link_bw_atomic_check(state, &new_limits); 6290 if (ret != -EAGAIN) 6291 break; 6292 } 6293 6294 return ret; 6295 } 6296 /** 6297 * intel_atomic_check - validate state object 6298 * @dev: drm device 6299 * @_state: state to validate 6300 */ 6301 int intel_atomic_check(struct drm_device *dev, 6302 struct drm_atomic_state *_state) 6303 { 6304 struct drm_i915_private *dev_priv = to_i915(dev); 6305 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6306 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6307 struct intel_crtc *crtc; 6308 int ret, i; 6309 bool any_ms = false; 6310 6311 if (!intel_display_driver_check_access(dev_priv)) 6312 return -ENODEV; 6313 6314 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6315 new_crtc_state, i) { 6316 /* 6317 * crtc's state no longer considered to be inherited 6318 * after the first userspace/client initiated commit. 6319 */ 6320 if (!state->internal) 6321 new_crtc_state->inherited = false; 6322 6323 if (new_crtc_state->inherited != old_crtc_state->inherited) 6324 new_crtc_state->uapi.mode_changed = true; 6325 6326 if (new_crtc_state->uapi.scaling_filter != 6327 old_crtc_state->uapi.scaling_filter) 6328 new_crtc_state->uapi.mode_changed = true; 6329 } 6330 6331 intel_vrr_check_modeset(state); 6332 6333 ret = drm_atomic_helper_check_modeset(dev, &state->base); 6334 if (ret) 6335 goto fail; 6336 6337 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6338 ret = intel_async_flip_check_uapi(state, crtc); 6339 if (ret) 6340 return ret; 6341 } 6342 6343 ret = intel_atomic_check_config_and_link(state); 6344 if (ret) 6345 goto fail; 6346 6347 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6348 new_crtc_state, i) { 6349 if (!intel_crtc_needs_modeset(new_crtc_state)) 6350 continue; 6351 6352 if (new_crtc_state->hw.enable) { 6353 ret = intel_modeset_pipe_config_late(state, crtc); 6354 if (ret) 6355 goto fail; 6356 } 6357 6358 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 6359 } 6360 6361 /** 6362 * Check if fastset is allowed by external dependencies like other 6363 * pipes and transcoders. 6364 * 6365 * Right now it only forces a fullmodeset when the MST master 6366 * transcoder did not changed but the pipe of the master transcoder 6367 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 6368 * in case of port synced crtcs, if one of the synced crtcs 6369 * needs a full modeset, all other synced crtcs should be 6370 * forced a full modeset. 6371 */ 6372 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6373 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6374 continue; 6375 6376 if (intel_dp_mst_crtc_needs_modeset(state, crtc)) 6377 intel_crtc_flag_modeset(new_crtc_state); 6378 6379 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6380 enum transcoder master = new_crtc_state->mst_master_transcoder; 6381 6382 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) 6383 intel_crtc_flag_modeset(new_crtc_state); 6384 } 6385 6386 if (is_trans_port_sync_mode(new_crtc_state)) { 6387 u8 trans = new_crtc_state->sync_mode_slaves_mask; 6388 6389 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6390 trans |= BIT(new_crtc_state->master_transcoder); 6391 6392 if (intel_cpu_transcoders_need_modeset(state, trans)) 6393 intel_crtc_flag_modeset(new_crtc_state); 6394 } 6395 6396 if (new_crtc_state->bigjoiner_pipes) { 6397 if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) 6398 intel_crtc_flag_modeset(new_crtc_state); 6399 } 6400 } 6401 6402 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6403 new_crtc_state, i) { 6404 if (!intel_crtc_needs_modeset(new_crtc_state)) 6405 continue; 6406 6407 any_ms = true; 6408 6409 intel_release_shared_dplls(state, crtc); 6410 } 6411 6412 if (any_ms && !check_digital_port_conflicts(state)) { 6413 drm_dbg_kms(&dev_priv->drm, 6414 "rejecting conflicting digital port configuration\n"); 6415 ret = -EINVAL; 6416 goto fail; 6417 } 6418 6419 ret = intel_atomic_check_planes(state); 6420 if (ret) 6421 goto fail; 6422 6423 ret = intel_compute_global_watermarks(state); 6424 if (ret) 6425 goto fail; 6426 6427 ret = intel_bw_atomic_check(state); 6428 if (ret) 6429 goto fail; 6430 6431 ret = intel_cdclk_atomic_check(state, &any_ms); 6432 if (ret) 6433 goto fail; 6434 6435 if (intel_any_crtc_needs_modeset(state)) 6436 any_ms = true; 6437 6438 if (any_ms) { 6439 ret = intel_modeset_checks(state); 6440 if (ret) 6441 goto fail; 6442 6443 ret = intel_modeset_calc_cdclk(state); 6444 if (ret) 6445 return ret; 6446 } 6447 6448 ret = intel_pmdemand_atomic_check(state); 6449 if (ret) 6450 goto fail; 6451 6452 ret = intel_atomic_check_crtcs(state); 6453 if (ret) 6454 goto fail; 6455 6456 ret = intel_fbc_atomic_check(state); 6457 if (ret) 6458 goto fail; 6459 6460 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6461 new_crtc_state, i) { 6462 intel_color_assert_luts(new_crtc_state); 6463 6464 ret = intel_async_flip_check_hw(state, crtc); 6465 if (ret) 6466 goto fail; 6467 6468 /* Either full modeset or fastset (or neither), never both */ 6469 drm_WARN_ON(&dev_priv->drm, 6470 intel_crtc_needs_modeset(new_crtc_state) && 6471 intel_crtc_needs_fastset(new_crtc_state)); 6472 6473 if (!intel_crtc_needs_modeset(new_crtc_state) && 6474 !intel_crtc_needs_fastset(new_crtc_state)) 6475 continue; 6476 6477 intel_crtc_state_dump(new_crtc_state, state, 6478 intel_crtc_needs_modeset(new_crtc_state) ? 6479 "modeset" : "fastset"); 6480 } 6481 6482 return 0; 6483 6484 fail: 6485 if (ret == -EDEADLK) 6486 return ret; 6487 6488 /* 6489 * FIXME would probably be nice to know which crtc specifically 6490 * caused the failure, in cases where we can pinpoint it. 6491 */ 6492 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6493 new_crtc_state, i) 6494 intel_crtc_state_dump(new_crtc_state, state, "failed"); 6495 6496 return ret; 6497 } 6498 6499 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 6500 { 6501 struct intel_crtc_state *crtc_state; 6502 struct intel_crtc *crtc; 6503 int i, ret; 6504 6505 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 6506 if (ret < 0) 6507 return ret; 6508 6509 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6510 if (intel_crtc_needs_color_update(crtc_state)) 6511 intel_color_prepare_commit(crtc_state); 6512 } 6513 6514 return 0; 6515 } 6516 6517 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 6518 struct intel_crtc_state *crtc_state) 6519 { 6520 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6521 6522 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes) 6523 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 6524 6525 if (crtc_state->has_pch_encoder) { 6526 enum pipe pch_transcoder = 6527 intel_crtc_pch_transcoder(crtc); 6528 6529 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 6530 } 6531 } 6532 6533 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 6534 const struct intel_crtc_state *new_crtc_state) 6535 { 6536 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6537 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6538 6539 /* 6540 * Update pipe size and adjust fitter if needed: the reason for this is 6541 * that in compute_mode_changes we check the native mode (not the pfit 6542 * mode) to see if we can flip rather than do a full mode set. In the 6543 * fastboot case, we'll flip, but if we don't update the pipesrc and 6544 * pfit state, we'll end up with a big fb scanned out into the wrong 6545 * sized surface. 6546 */ 6547 intel_set_pipe_src_size(new_crtc_state); 6548 6549 /* on skylake this is done by detaching scalers */ 6550 if (DISPLAY_VER(dev_priv) >= 9) { 6551 if (new_crtc_state->pch_pfit.enabled) 6552 skl_pfit_enable(new_crtc_state); 6553 } else if (HAS_PCH_SPLIT(dev_priv)) { 6554 if (new_crtc_state->pch_pfit.enabled) 6555 ilk_pfit_enable(new_crtc_state); 6556 else if (old_crtc_state->pch_pfit.enabled) 6557 ilk_pfit_disable(old_crtc_state); 6558 } 6559 6560 /* 6561 * The register is supposedly single buffered so perhaps 6562 * not 100% correct to do this here. But SKL+ calculate 6563 * this based on the adjust pixel rate so pfit changes do 6564 * affect it and so it must be updated for fastsets. 6565 * HSW/BDW only really need this here for fastboot, after 6566 * that the value should not change without a full modeset. 6567 */ 6568 if (DISPLAY_VER(dev_priv) >= 9 || 6569 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 6570 hsw_set_linetime_wm(new_crtc_state); 6571 6572 if (new_crtc_state->update_m_n) 6573 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, 6574 &new_crtc_state->dp_m_n); 6575 6576 if (new_crtc_state->update_lrr) 6577 intel_set_transcoder_timings_lrr(new_crtc_state); 6578 } 6579 6580 static void commit_pipe_pre_planes(struct intel_atomic_state *state, 6581 struct intel_crtc *crtc) 6582 { 6583 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6584 const struct intel_crtc_state *old_crtc_state = 6585 intel_atomic_get_old_crtc_state(state, crtc); 6586 const struct intel_crtc_state *new_crtc_state = 6587 intel_atomic_get_new_crtc_state(state, crtc); 6588 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6589 6590 /* 6591 * During modesets pipe configuration was programmed as the 6592 * CRTC was enabled. 6593 */ 6594 if (!modeset) { 6595 if (intel_crtc_needs_color_update(new_crtc_state)) 6596 intel_color_commit_arm(new_crtc_state); 6597 6598 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6599 bdw_set_pipe_misc(new_crtc_state); 6600 6601 if (intel_crtc_needs_fastset(new_crtc_state)) 6602 intel_pipe_fastset(old_crtc_state, new_crtc_state); 6603 } 6604 6605 intel_psr2_program_trans_man_trk_ctl(new_crtc_state); 6606 6607 intel_atomic_update_watermarks(state, crtc); 6608 } 6609 6610 static void commit_pipe_post_planes(struct intel_atomic_state *state, 6611 struct intel_crtc *crtc) 6612 { 6613 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6614 const struct intel_crtc_state *old_crtc_state = 6615 intel_atomic_get_old_crtc_state(state, crtc); 6616 const struct intel_crtc_state *new_crtc_state = 6617 intel_atomic_get_new_crtc_state(state, crtc); 6618 6619 /* 6620 * Disable the scaler(s) after the plane(s) so that we don't 6621 * get a catastrophic underrun even if the two operations 6622 * end up happening in two different frames. 6623 */ 6624 if (DISPLAY_VER(dev_priv) >= 9 && 6625 !intel_crtc_needs_modeset(new_crtc_state)) 6626 skl_detach_scalers(new_crtc_state); 6627 6628 if (vrr_enabling(old_crtc_state, new_crtc_state)) 6629 intel_vrr_enable(new_crtc_state); 6630 } 6631 6632 static void intel_enable_crtc(struct intel_atomic_state *state, 6633 struct intel_crtc *crtc) 6634 { 6635 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6636 const struct intel_crtc_state *new_crtc_state = 6637 intel_atomic_get_new_crtc_state(state, crtc); 6638 6639 if (!intel_crtc_needs_modeset(new_crtc_state)) 6640 return; 6641 6642 /* VRR will be enable later, if required */ 6643 intel_crtc_update_active_timings(new_crtc_state, false); 6644 6645 dev_priv->display.funcs.display->crtc_enable(state, crtc); 6646 6647 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 6648 return; 6649 6650 /* vblanks work again, re-enable pipe CRC. */ 6651 intel_crtc_enable_pipe_crc(crtc); 6652 } 6653 6654 static void intel_pre_update_crtc(struct intel_atomic_state *state, 6655 struct intel_crtc *crtc) 6656 { 6657 struct drm_i915_private *i915 = to_i915(state->base.dev); 6658 const struct intel_crtc_state *old_crtc_state = 6659 intel_atomic_get_old_crtc_state(state, crtc); 6660 struct intel_crtc_state *new_crtc_state = 6661 intel_atomic_get_new_crtc_state(state, crtc); 6662 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6663 6664 if (old_crtc_state->inherited || 6665 intel_crtc_needs_modeset(new_crtc_state)) { 6666 if (HAS_DPT(i915)) 6667 intel_dpt_configure(crtc); 6668 } 6669 6670 if (!modeset) { 6671 if (new_crtc_state->preload_luts && 6672 intel_crtc_needs_color_update(new_crtc_state)) 6673 intel_color_load_luts(new_crtc_state); 6674 6675 intel_pre_plane_update(state, crtc); 6676 6677 if (intel_crtc_needs_fastset(new_crtc_state)) 6678 intel_encoders_update_pipe(state, crtc); 6679 6680 if (DISPLAY_VER(i915) >= 11 && 6681 intel_crtc_needs_fastset(new_crtc_state)) 6682 icl_set_pipe_chicken(new_crtc_state); 6683 6684 if (vrr_params_changed(old_crtc_state, new_crtc_state)) 6685 intel_vrr_set_transcoder_timings(new_crtc_state); 6686 } 6687 6688 intel_fbc_update(state, crtc); 6689 6690 drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); 6691 6692 if (!modeset && 6693 intel_crtc_needs_color_update(new_crtc_state)) 6694 intel_color_commit_noarm(new_crtc_state); 6695 6696 intel_crtc_planes_update_noarm(state, crtc); 6697 } 6698 6699 static void intel_update_crtc(struct intel_atomic_state *state, 6700 struct intel_crtc *crtc) 6701 { 6702 const struct intel_crtc_state *old_crtc_state = 6703 intel_atomic_get_old_crtc_state(state, crtc); 6704 struct intel_crtc_state *new_crtc_state = 6705 intel_atomic_get_new_crtc_state(state, crtc); 6706 6707 /* Perform vblank evasion around commit operation */ 6708 intel_pipe_update_start(state, crtc); 6709 6710 commit_pipe_pre_planes(state, crtc); 6711 6712 intel_crtc_planes_update_arm(state, crtc); 6713 6714 commit_pipe_post_planes(state, crtc); 6715 6716 intel_pipe_update_end(state, crtc); 6717 6718 /* 6719 * VRR/Seamless M/N update may need to update frame timings. 6720 * 6721 * FIXME Should be synchronized with the start of vblank somehow... 6722 */ 6723 if (vrr_enabling(old_crtc_state, new_crtc_state) || 6724 new_crtc_state->update_m_n || new_crtc_state->update_lrr) 6725 intel_crtc_update_active_timings(new_crtc_state, 6726 new_crtc_state->vrr.enable); 6727 6728 /* 6729 * We usually enable FIFO underrun interrupts as part of the 6730 * CRTC enable sequence during modesets. But when we inherit a 6731 * valid pipe configuration from the BIOS we need to take care 6732 * of enabling them on the CRTC's first fastset. 6733 */ 6734 if (intel_crtc_needs_fastset(new_crtc_state) && 6735 old_crtc_state->inherited) 6736 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 6737 } 6738 6739 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 6740 struct intel_crtc_state *old_crtc_state, 6741 struct intel_crtc_state *new_crtc_state, 6742 struct intel_crtc *crtc) 6743 { 6744 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6745 6746 /* 6747 * We need to disable pipe CRC before disabling the pipe, 6748 * or we race against vblank off. 6749 */ 6750 intel_crtc_disable_pipe_crc(crtc); 6751 6752 dev_priv->display.funcs.display->crtc_disable(state, crtc); 6753 crtc->active = false; 6754 intel_fbc_disable(crtc); 6755 6756 if (!new_crtc_state->hw.active) 6757 intel_initial_watermarks(state, crtc); 6758 } 6759 6760 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 6761 { 6762 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 6763 struct intel_crtc *crtc; 6764 u32 handled = 0; 6765 int i; 6766 6767 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6768 new_crtc_state, i) { 6769 if (!intel_crtc_needs_modeset(new_crtc_state)) 6770 continue; 6771 6772 intel_pre_plane_update(state, crtc); 6773 6774 if (!old_crtc_state->hw.active) 6775 continue; 6776 6777 intel_crtc_disable_planes(state, crtc); 6778 } 6779 6780 /* Only disable port sync and MST slaves */ 6781 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6782 new_crtc_state, i) { 6783 if (!intel_crtc_needs_modeset(new_crtc_state)) 6784 continue; 6785 6786 if (!old_crtc_state->hw.active) 6787 continue; 6788 6789 /* In case of Transcoder port Sync master slave CRTCs can be 6790 * assigned in any order and we need to make sure that 6791 * slave CRTCs are disabled first and then master CRTC since 6792 * Slave vblanks are masked till Master Vblanks. 6793 */ 6794 if (!is_trans_port_sync_slave(old_crtc_state) && 6795 !intel_dp_mst_is_slave_trans(old_crtc_state) && 6796 !intel_crtc_is_bigjoiner_slave(old_crtc_state)) 6797 continue; 6798 6799 intel_old_crtc_state_disables(state, old_crtc_state, 6800 new_crtc_state, crtc); 6801 handled |= BIT(crtc->pipe); 6802 } 6803 6804 /* Disable everything else left on */ 6805 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6806 new_crtc_state, i) { 6807 if (!intel_crtc_needs_modeset(new_crtc_state) || 6808 (handled & BIT(crtc->pipe))) 6809 continue; 6810 6811 if (!old_crtc_state->hw.active) 6812 continue; 6813 6814 intel_old_crtc_state_disables(state, old_crtc_state, 6815 new_crtc_state, crtc); 6816 } 6817 } 6818 6819 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 6820 { 6821 struct intel_crtc_state *new_crtc_state; 6822 struct intel_crtc *crtc; 6823 int i; 6824 6825 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6826 if (!new_crtc_state->hw.active) 6827 continue; 6828 6829 intel_enable_crtc(state, crtc); 6830 intel_pre_update_crtc(state, crtc); 6831 } 6832 6833 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6834 if (!new_crtc_state->hw.active) 6835 continue; 6836 6837 intel_update_crtc(state, crtc); 6838 } 6839 } 6840 6841 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 6842 { 6843 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6844 struct intel_crtc *crtc; 6845 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6846 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 6847 u8 update_pipes = 0, modeset_pipes = 0; 6848 int i; 6849 6850 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 6851 enum pipe pipe = crtc->pipe; 6852 6853 if (!new_crtc_state->hw.active) 6854 continue; 6855 6856 /* ignore allocations for crtc's that have been turned off. */ 6857 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6858 entries[pipe] = old_crtc_state->wm.skl.ddb; 6859 update_pipes |= BIT(pipe); 6860 } else { 6861 modeset_pipes |= BIT(pipe); 6862 } 6863 } 6864 6865 /* 6866 * Whenever the number of active pipes changes, we need to make sure we 6867 * update the pipes in the right order so that their ddb allocations 6868 * never overlap with each other between CRTC updates. Otherwise we'll 6869 * cause pipe underruns and other bad stuff. 6870 * 6871 * So first lets enable all pipes that do not need a fullmodeset as 6872 * those don't have any external dependency. 6873 */ 6874 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6875 enum pipe pipe = crtc->pipe; 6876 6877 if ((update_pipes & BIT(pipe)) == 0) 6878 continue; 6879 6880 intel_pre_update_crtc(state, crtc); 6881 } 6882 6883 while (update_pipes) { 6884 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6885 new_crtc_state, i) { 6886 enum pipe pipe = crtc->pipe; 6887 6888 if ((update_pipes & BIT(pipe)) == 0) 6889 continue; 6890 6891 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 6892 entries, I915_MAX_PIPES, pipe)) 6893 continue; 6894 6895 entries[pipe] = new_crtc_state->wm.skl.ddb; 6896 update_pipes &= ~BIT(pipe); 6897 6898 intel_update_crtc(state, crtc); 6899 6900 /* 6901 * If this is an already active pipe, it's DDB changed, 6902 * and this isn't the last pipe that needs updating 6903 * then we need to wait for a vblank to pass for the 6904 * new ddb allocation to take effect. 6905 */ 6906 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 6907 &old_crtc_state->wm.skl.ddb) && 6908 (update_pipes | modeset_pipes)) 6909 intel_crtc_wait_for_next_vblank(crtc); 6910 } 6911 } 6912 6913 update_pipes = modeset_pipes; 6914 6915 /* 6916 * Enable all pipes that needs a modeset and do not depends on other 6917 * pipes 6918 */ 6919 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6920 enum pipe pipe = crtc->pipe; 6921 6922 if ((modeset_pipes & BIT(pipe)) == 0) 6923 continue; 6924 6925 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 6926 is_trans_port_sync_master(new_crtc_state) || 6927 intel_crtc_is_bigjoiner_master(new_crtc_state)) 6928 continue; 6929 6930 modeset_pipes &= ~BIT(pipe); 6931 6932 intel_enable_crtc(state, crtc); 6933 } 6934 6935 /* 6936 * Then we enable all remaining pipes that depend on other 6937 * pipes: MST slaves and port sync masters, big joiner master 6938 */ 6939 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6940 enum pipe pipe = crtc->pipe; 6941 6942 if ((modeset_pipes & BIT(pipe)) == 0) 6943 continue; 6944 6945 modeset_pipes &= ~BIT(pipe); 6946 6947 intel_enable_crtc(state, crtc); 6948 } 6949 6950 /* 6951 * Finally we do the plane updates/etc. for all pipes that got enabled. 6952 */ 6953 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6954 enum pipe pipe = crtc->pipe; 6955 6956 if ((update_pipes & BIT(pipe)) == 0) 6957 continue; 6958 6959 intel_pre_update_crtc(state, crtc); 6960 } 6961 6962 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6963 enum pipe pipe = crtc->pipe; 6964 6965 if ((update_pipes & BIT(pipe)) == 0) 6966 continue; 6967 6968 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 6969 entries, I915_MAX_PIPES, pipe)); 6970 6971 entries[pipe] = new_crtc_state->wm.skl.ddb; 6972 update_pipes &= ~BIT(pipe); 6973 6974 intel_update_crtc(state, crtc); 6975 } 6976 6977 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 6978 drm_WARN_ON(&dev_priv->drm, update_pipes); 6979 } 6980 6981 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 6982 { 6983 struct drm_i915_private *i915 = to_i915(intel_state->base.dev); 6984 struct drm_plane *plane; 6985 struct drm_plane_state *new_plane_state; 6986 int ret, i; 6987 6988 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { 6989 if (new_plane_state->fence) { 6990 ret = dma_fence_wait_timeout(new_plane_state->fence, false, 6991 i915_fence_timeout(i915)); 6992 if (ret <= 0) 6993 break; 6994 6995 dma_fence_put(new_plane_state->fence); 6996 new_plane_state->fence = NULL; 6997 } 6998 } 6999 } 7000 7001 static void intel_atomic_cleanup_work(struct work_struct *work) 7002 { 7003 struct intel_atomic_state *state = 7004 container_of(work, struct intel_atomic_state, base.commit_work); 7005 struct drm_i915_private *i915 = to_i915(state->base.dev); 7006 struct intel_crtc_state *old_crtc_state; 7007 struct intel_crtc *crtc; 7008 int i; 7009 7010 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) 7011 intel_color_cleanup_commit(old_crtc_state); 7012 7013 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 7014 drm_atomic_helper_commit_cleanup_done(&state->base); 7015 drm_atomic_state_put(&state->base); 7016 } 7017 7018 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 7019 { 7020 struct drm_i915_private *i915 = to_i915(state->base.dev); 7021 struct intel_plane *plane; 7022 struct intel_plane_state *plane_state; 7023 int i; 7024 7025 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7026 struct drm_framebuffer *fb = plane_state->hw.fb; 7027 int cc_plane; 7028 int ret; 7029 7030 if (!fb) 7031 continue; 7032 7033 cc_plane = intel_fb_rc_ccs_cc_plane(fb); 7034 if (cc_plane < 0) 7035 continue; 7036 7037 /* 7038 * The layout of the fast clear color value expected by HW 7039 * (the DRM ABI requiring this value to be located in fb at 7040 * offset 0 of cc plane, plane #2 previous generations or 7041 * plane #1 for flat ccs): 7042 * - 4 x 4 bytes per-channel value 7043 * (in surface type specific float/int format provided by the fb user) 7044 * - 8 bytes native color value used by the display 7045 * (converted/written by GPU during a fast clear operation using the 7046 * above per-channel values) 7047 * 7048 * The commit's FB prepare hook already ensured that FB obj is pinned and the 7049 * caller made sure that the object is synced wrt. the related color clear value 7050 * GPU write on it. 7051 */ 7052 ret = i915_gem_object_read_from_page(intel_fb_obj(fb), 7053 fb->offsets[cc_plane] + 16, 7054 &plane_state->ccval, 7055 sizeof(plane_state->ccval)); 7056 /* The above could only fail if the FB obj has an unexpected backing store type. */ 7057 drm_WARN_ON(&i915->drm, ret); 7058 } 7059 } 7060 7061 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 7062 { 7063 struct drm_device *dev = state->base.dev; 7064 struct drm_i915_private *dev_priv = to_i915(dev); 7065 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7066 struct intel_crtc *crtc; 7067 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; 7068 intel_wakeref_t wakeref = 0; 7069 int i; 7070 7071 intel_atomic_commit_fence_wait(state); 7072 7073 drm_atomic_helper_wait_for_dependencies(&state->base); 7074 drm_dp_mst_atomic_wait_for_dependencies(&state->base); 7075 intel_atomic_global_state_wait_for_dependencies(state); 7076 7077 /* 7078 * During full modesets we write a lot of registers, wait 7079 * for PLLs, etc. Doing that while DC states are enabled 7080 * is not a good idea. 7081 * 7082 * During fastsets and other updates we also need to 7083 * disable DC states due to the following scenario: 7084 * 1. DC5 exit and PSR exit happen 7085 * 2. Some or all _noarm() registers are written 7086 * 3. Due to some long delay PSR is re-entered 7087 * 4. DC5 entry -> DMC saves the already written new 7088 * _noarm() registers and the old not yet written 7089 * _arm() registers 7090 * 5. DC5 exit -> DMC restores a mixture of old and 7091 * new register values and arms the update 7092 * 6. PSR exit -> hardware latches a mixture of old and 7093 * new register values -> corrupted frame, or worse 7094 * 7. New _arm() registers are finally written 7095 * 8. Hardware finally latches a complete set of new 7096 * register values, and subsequent frames will be OK again 7097 * 7098 * Also note that due to the pipe CSC hardware issues on 7099 * SKL/GLK DC states must remain off until the pipe CSC 7100 * state readout has happened. Otherwise we risk corrupting 7101 * the CSC latched register values with the readout (see 7102 * skl_read_csc() and skl_color_commit_noarm()). 7103 */ 7104 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); 7105 7106 intel_atomic_prepare_plane_clear_colors(state); 7107 7108 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7109 new_crtc_state, i) { 7110 if (intel_crtc_needs_modeset(new_crtc_state) || 7111 intel_crtc_needs_fastset(new_crtc_state)) 7112 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); 7113 } 7114 7115 intel_commit_modeset_disables(state); 7116 7117 intel_dp_tunnel_atomic_alloc_bw(state); 7118 7119 /* FIXME: Eventually get rid of our crtc->config pointer */ 7120 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7121 crtc->config = new_crtc_state; 7122 7123 /* 7124 * In XE_LPD+ Pmdemand combines many parameters such as voltage index, 7125 * plls, cdclk frequency, QGV point selection parameter etc. Voltage 7126 * index, cdclk/ddiclk frequencies are supposed to be configured before 7127 * the cdclk config is set. 7128 */ 7129 intel_pmdemand_pre_plane_update(state); 7130 7131 if (state->modeset) { 7132 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 7133 7134 intel_set_cdclk_pre_plane_update(state); 7135 7136 intel_modeset_verify_disabled(state); 7137 } 7138 7139 intel_sagv_pre_plane_update(state); 7140 7141 /* Complete the events for pipes that have now been disabled */ 7142 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7143 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7144 7145 /* Complete events for now disable pipes here. */ 7146 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 7147 spin_lock_irq(&dev->event_lock); 7148 drm_crtc_send_vblank_event(&crtc->base, 7149 new_crtc_state->uapi.event); 7150 spin_unlock_irq(&dev->event_lock); 7151 7152 new_crtc_state->uapi.event = NULL; 7153 } 7154 } 7155 7156 intel_encoders_update_prepare(state); 7157 7158 intel_dbuf_pre_plane_update(state); 7159 intel_mbus_dbox_update(state); 7160 7161 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7162 if (new_crtc_state->do_async_flip) 7163 intel_crtc_enable_flip_done(state, crtc); 7164 } 7165 7166 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7167 dev_priv->display.funcs.display->commit_modeset_enables(state); 7168 7169 if (state->modeset) 7170 intel_set_cdclk_post_plane_update(state); 7171 7172 intel_wait_for_vblank_workers(state); 7173 7174 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 7175 * already, but still need the state for the delayed optimization. To 7176 * fix this: 7177 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 7178 * - schedule that vblank worker _before_ calling hw_done 7179 * - at the start of commit_tail, cancel it _synchrously 7180 * - switch over to the vblank wait helper in the core after that since 7181 * we don't need out special handling any more. 7182 */ 7183 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 7184 7185 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7186 if (new_crtc_state->do_async_flip) 7187 intel_crtc_disable_flip_done(state, crtc); 7188 7189 intel_color_wait_commit(new_crtc_state); 7190 } 7191 7192 /* 7193 * Now that the vblank has passed, we can go ahead and program the 7194 * optimal watermarks on platforms that need two-step watermark 7195 * programming. 7196 * 7197 * TODO: Move this (and other cleanup) to an async worker eventually. 7198 */ 7199 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7200 new_crtc_state, i) { 7201 /* 7202 * Gen2 reports pipe underruns whenever all planes are disabled. 7203 * So re-enable underrun reporting after some planes get enabled. 7204 * 7205 * We do this before .optimize_watermarks() so that we have a 7206 * chance of catching underruns with the intermediate watermarks 7207 * vs. the new plane configuration. 7208 */ 7209 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 7210 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7211 7212 intel_optimize_watermarks(state, crtc); 7213 } 7214 7215 intel_dbuf_post_plane_update(state); 7216 7217 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7218 intel_post_plane_update(state, crtc); 7219 7220 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); 7221 7222 intel_modeset_verify_crtc(state, crtc); 7223 7224 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ 7225 hsw_ips_post_update(state, crtc); 7226 7227 /* 7228 * Activate DRRS after state readout to avoid 7229 * dp_m_n vs. dp_m2_n2 confusion on BDW+. 7230 */ 7231 intel_drrs_activate(new_crtc_state); 7232 7233 /* 7234 * DSB cleanup is done in cleanup_work aligning with framebuffer 7235 * cleanup. So copy and reset the dsb structure to sync with 7236 * commit_done and later do dsb cleanup in cleanup_work. 7237 * 7238 * FIXME get rid of this funny new->old swapping 7239 */ 7240 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); 7241 } 7242 7243 /* Underruns don't always raise interrupts, so check manually */ 7244 intel_check_cpu_fifo_underruns(dev_priv); 7245 intel_check_pch_fifo_underruns(dev_priv); 7246 7247 if (state->modeset) 7248 intel_verify_planes(state); 7249 7250 intel_sagv_post_plane_update(state); 7251 intel_pmdemand_post_plane_update(state); 7252 7253 drm_atomic_helper_commit_hw_done(&state->base); 7254 intel_atomic_global_state_commit_done(state); 7255 7256 if (state->modeset) { 7257 /* As one of the primary mmio accessors, KMS has a high 7258 * likelihood of triggering bugs in unclaimed access. After we 7259 * finish modesetting, see if an error has been flagged, and if 7260 * so enable debugging for the next modeset - and hope we catch 7261 * the culprit. 7262 */ 7263 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 7264 } 7265 /* 7266 * Delay re-enabling DC states by 17 ms to avoid the off->on->off 7267 * toggling overhead at and above 60 FPS. 7268 */ 7269 intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17); 7270 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7271 7272 /* 7273 * Defer the cleanup of the old state to a separate worker to not 7274 * impede the current task (userspace for blocking modesets) that 7275 * are executed inline. For out-of-line asynchronous modesets/flips, 7276 * deferring to a new worker seems overkill, but we would place a 7277 * schedule point (cond_resched()) here anyway to keep latencies 7278 * down. 7279 */ 7280 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 7281 queue_work(system_highpri_wq, &state->base.commit_work); 7282 } 7283 7284 static void intel_atomic_commit_work(struct work_struct *work) 7285 { 7286 struct intel_atomic_state *state = 7287 container_of(work, struct intel_atomic_state, base.commit_work); 7288 7289 intel_atomic_commit_tail(state); 7290 } 7291 7292 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7293 { 7294 struct intel_plane_state *old_plane_state, *new_plane_state; 7295 struct intel_plane *plane; 7296 int i; 7297 7298 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7299 new_plane_state, i) 7300 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 7301 to_intel_frontbuffer(new_plane_state->hw.fb), 7302 plane->frontbuffer_bit); 7303 } 7304 7305 static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock) 7306 { 7307 int ret; 7308 7309 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 7310 if (ret) 7311 return ret; 7312 7313 ret = intel_atomic_global_state_setup_commit(state); 7314 if (ret) 7315 return ret; 7316 7317 return 0; 7318 } 7319 7320 static int intel_atomic_swap_state(struct intel_atomic_state *state) 7321 { 7322 int ret; 7323 7324 ret = drm_atomic_helper_swap_state(&state->base, true); 7325 if (ret) 7326 return ret; 7327 7328 intel_atomic_swap_global_state(state); 7329 7330 intel_shared_dpll_swap_state(state); 7331 7332 intel_atomic_track_fbs(state); 7333 7334 return 0; 7335 } 7336 7337 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, 7338 bool nonblock) 7339 { 7340 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7341 struct drm_i915_private *dev_priv = to_i915(dev); 7342 int ret = 0; 7343 7344 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 7345 7346 /* 7347 * The intel_legacy_cursor_update() fast path takes care 7348 * of avoiding the vblank waits for simple cursor 7349 * movement and flips. For cursor on/off and size changes, 7350 * we want to perform the vblank waits so that watermark 7351 * updates happen during the correct frames. Gen9+ have 7352 * double buffered watermarks and so shouldn't need this. 7353 * 7354 * Unset state->legacy_cursor_update before the call to 7355 * drm_atomic_helper_setup_commit() because otherwise 7356 * drm_atomic_helper_wait_for_flip_done() is a noop and 7357 * we get FIFO underruns because we didn't wait 7358 * for vblank. 7359 * 7360 * FIXME doing watermarks and fb cleanup from a vblank worker 7361 * (assuming we had any) would solve these problems. 7362 */ 7363 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) { 7364 struct intel_crtc_state *new_crtc_state; 7365 struct intel_crtc *crtc; 7366 int i; 7367 7368 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7369 if (new_crtc_state->wm.need_postvbl_update || 7370 new_crtc_state->update_wm_post) 7371 state->base.legacy_cursor_update = false; 7372 } 7373 7374 ret = intel_atomic_prepare_commit(state); 7375 if (ret) { 7376 drm_dbg_atomic(&dev_priv->drm, 7377 "Preparing state failed with %i\n", ret); 7378 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7379 return ret; 7380 } 7381 7382 ret = intel_atomic_setup_commit(state, nonblock); 7383 if (!ret) 7384 ret = intel_atomic_swap_state(state); 7385 7386 if (ret) { 7387 struct intel_crtc_state *new_crtc_state; 7388 struct intel_crtc *crtc; 7389 int i; 7390 7391 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7392 intel_color_cleanup_commit(new_crtc_state); 7393 7394 drm_atomic_helper_unprepare_planes(dev, &state->base); 7395 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7396 return ret; 7397 } 7398 7399 drm_atomic_state_get(&state->base); 7400 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 7401 7402 if (nonblock && state->modeset) { 7403 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); 7404 } else if (nonblock) { 7405 queue_work(dev_priv->display.wq.flip, &state->base.commit_work); 7406 } else { 7407 if (state->modeset) 7408 flush_workqueue(dev_priv->display.wq.modeset); 7409 intel_atomic_commit_tail(state); 7410 } 7411 7412 return 0; 7413 } 7414 7415 /** 7416 * intel_plane_destroy - destroy a plane 7417 * @plane: plane to destroy 7418 * 7419 * Common destruction function for all types of planes (primary, cursor, 7420 * sprite). 7421 */ 7422 void intel_plane_destroy(struct drm_plane *plane) 7423 { 7424 drm_plane_cleanup(plane); 7425 kfree(to_intel_plane(plane)); 7426 } 7427 7428 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 7429 struct drm_file *file) 7430 { 7431 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 7432 struct drm_crtc *drmmode_crtc; 7433 struct intel_crtc *crtc; 7434 7435 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 7436 if (!drmmode_crtc) 7437 return -ENOENT; 7438 7439 crtc = to_intel_crtc(drmmode_crtc); 7440 pipe_from_crtc_id->pipe = crtc->pipe; 7441 7442 return 0; 7443 } 7444 7445 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 7446 { 7447 struct drm_device *dev = encoder->base.dev; 7448 struct intel_encoder *source_encoder; 7449 u32 possible_clones = 0; 7450 7451 for_each_intel_encoder(dev, source_encoder) { 7452 if (encoders_cloneable(encoder, source_encoder)) 7453 possible_clones |= drm_encoder_mask(&source_encoder->base); 7454 } 7455 7456 return possible_clones; 7457 } 7458 7459 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 7460 { 7461 struct drm_device *dev = encoder->base.dev; 7462 struct intel_crtc *crtc; 7463 u32 possible_crtcs = 0; 7464 7465 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) 7466 possible_crtcs |= drm_crtc_mask(&crtc->base); 7467 7468 return possible_crtcs; 7469 } 7470 7471 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 7472 { 7473 if (!IS_MOBILE(dev_priv)) 7474 return false; 7475 7476 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 7477 return false; 7478 7479 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 7480 return false; 7481 7482 return true; 7483 } 7484 7485 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 7486 { 7487 if (DISPLAY_VER(dev_priv) >= 9) 7488 return false; 7489 7490 if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)) 7491 return false; 7492 7493 if (HAS_PCH_LPT_H(dev_priv) && 7494 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 7495 return false; 7496 7497 /* DDI E can't be used if DDI A requires 4 lanes */ 7498 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 7499 return false; 7500 7501 if (!dev_priv->display.vbt.int_crt_support) 7502 return false; 7503 7504 return true; 7505 } 7506 7507 bool assert_port_valid(struct drm_i915_private *i915, enum port port) 7508 { 7509 return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)), 7510 "Platform does not support port %c\n", port_name(port)); 7511 } 7512 7513 void intel_setup_outputs(struct drm_i915_private *dev_priv) 7514 { 7515 struct intel_encoder *encoder; 7516 bool dpd_is_edp = false; 7517 7518 intel_pps_unlock_regs_wa(dev_priv); 7519 7520 if (!HAS_DISPLAY(dev_priv)) 7521 return; 7522 7523 if (HAS_DDI(dev_priv)) { 7524 if (intel_ddi_crt_present(dev_priv)) 7525 intel_crt_init(dev_priv); 7526 7527 intel_bios_for_each_encoder(dev_priv, intel_ddi_init); 7528 7529 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 7530 vlv_dsi_init(dev_priv); 7531 } else if (HAS_PCH_SPLIT(dev_priv)) { 7532 int found; 7533 7534 /* 7535 * intel_edp_init_connector() depends on this completing first, 7536 * to prevent the registration of both eDP and LVDS and the 7537 * incorrect sharing of the PPS. 7538 */ 7539 intel_lvds_init(dev_priv); 7540 intel_crt_init(dev_priv); 7541 7542 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 7543 7544 if (ilk_has_edp_a(dev_priv)) 7545 g4x_dp_init(dev_priv, DP_A, PORT_A); 7546 7547 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 7548 /* PCH SDVOB multiplex with HDMIB */ 7549 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 7550 if (!found) 7551 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 7552 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 7553 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B); 7554 } 7555 7556 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 7557 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 7558 7559 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 7560 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 7561 7562 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 7563 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C); 7564 7565 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 7566 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D); 7567 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7568 bool has_edp, has_port; 7569 7570 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) 7571 intel_crt_init(dev_priv); 7572 7573 /* 7574 * The DP_DETECTED bit is the latched state of the DDC 7575 * SDA pin at boot. However since eDP doesn't require DDC 7576 * (no way to plug in a DP->HDMI dongle) the DDC pins for 7577 * eDP ports may have been muxed to an alternate function. 7578 * Thus we can't rely on the DP_DETECTED bit alone to detect 7579 * eDP ports. Consult the VBT as well as DP_DETECTED to 7580 * detect eDP ports. 7581 * 7582 * Sadly the straps seem to be missing sometimes even for HDMI 7583 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 7584 * and VBT for the presence of the port. Additionally we can't 7585 * trust the port type the VBT declares as we've seen at least 7586 * HDMI ports that the VBT claim are DP or eDP. 7587 */ 7588 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 7589 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 7590 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 7591 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B); 7592 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 7593 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 7594 7595 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 7596 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 7597 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 7598 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C); 7599 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 7600 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 7601 7602 if (IS_CHERRYVIEW(dev_priv)) { 7603 /* 7604 * eDP not supported on port D, 7605 * so no need to worry about it 7606 */ 7607 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 7608 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 7609 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D); 7610 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 7611 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 7612 } 7613 7614 vlv_dsi_init(dev_priv); 7615 } else if (IS_PINEVIEW(dev_priv)) { 7616 intel_lvds_init(dev_priv); 7617 intel_crt_init(dev_priv); 7618 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { 7619 bool found = false; 7620 7621 if (IS_MOBILE(dev_priv)) 7622 intel_lvds_init(dev_priv); 7623 7624 intel_crt_init(dev_priv); 7625 7626 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 7627 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 7628 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 7629 if (!found && IS_G4X(dev_priv)) { 7630 drm_dbg_kms(&dev_priv->drm, 7631 "probing HDMI on SDVOB\n"); 7632 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 7633 } 7634 7635 if (!found && IS_G4X(dev_priv)) 7636 g4x_dp_init(dev_priv, DP_B, PORT_B); 7637 } 7638 7639 /* Before G4X SDVOC doesn't have its own detect register */ 7640 7641 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 7642 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 7643 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 7644 } 7645 7646 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 7647 7648 if (IS_G4X(dev_priv)) { 7649 drm_dbg_kms(&dev_priv->drm, 7650 "probing HDMI on SDVOC\n"); 7651 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 7652 } 7653 if (IS_G4X(dev_priv)) 7654 g4x_dp_init(dev_priv, DP_C, PORT_C); 7655 } 7656 7657 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 7658 g4x_dp_init(dev_priv, DP_D, PORT_D); 7659 7660 if (SUPPORTS_TV(dev_priv)) 7661 intel_tv_init(dev_priv); 7662 } else if (DISPLAY_VER(dev_priv) == 2) { 7663 if (IS_I85X(dev_priv)) 7664 intel_lvds_init(dev_priv); 7665 7666 intel_crt_init(dev_priv); 7667 intel_dvo_init(dev_priv); 7668 } 7669 7670 for_each_intel_encoder(&dev_priv->drm, encoder) { 7671 encoder->base.possible_crtcs = 7672 intel_encoder_possible_crtcs(encoder); 7673 encoder->base.possible_clones = 7674 intel_encoder_possible_clones(encoder); 7675 } 7676 7677 intel_init_pch_refclk(dev_priv); 7678 7679 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 7680 } 7681 7682 static int max_dotclock(struct drm_i915_private *i915) 7683 { 7684 int max_dotclock = i915->max_dotclk_freq; 7685 7686 /* icl+ might use bigjoiner */ 7687 if (DISPLAY_VER(i915) >= 11) 7688 max_dotclock *= 2; 7689 7690 return max_dotclock; 7691 } 7692 7693 enum drm_mode_status intel_mode_valid(struct drm_device *dev, 7694 const struct drm_display_mode *mode) 7695 { 7696 struct drm_i915_private *dev_priv = to_i915(dev); 7697 int hdisplay_max, htotal_max; 7698 int vdisplay_max, vtotal_max; 7699 7700 /* 7701 * Can't reject DBLSCAN here because Xorg ddxen can add piles 7702 * of DBLSCAN modes to the output's mode list when they detect 7703 * the scaling mode property on the connector. And they don't 7704 * ask the kernel to validate those modes in any way until 7705 * modeset time at which point the client gets a protocol error. 7706 * So in order to not upset those clients we silently ignore the 7707 * DBLSCAN flag on such connectors. For other connectors we will 7708 * reject modes with the DBLSCAN flag in encoder->compute_config(). 7709 * And we always reject DBLSCAN modes in connector->mode_valid() 7710 * as we never want such modes on the connector's mode list. 7711 */ 7712 7713 if (mode->vscan > 1) 7714 return MODE_NO_VSCAN; 7715 7716 if (mode->flags & DRM_MODE_FLAG_HSKEW) 7717 return MODE_H_ILLEGAL; 7718 7719 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 7720 DRM_MODE_FLAG_NCSYNC | 7721 DRM_MODE_FLAG_PCSYNC)) 7722 return MODE_HSYNC; 7723 7724 if (mode->flags & (DRM_MODE_FLAG_BCAST | 7725 DRM_MODE_FLAG_PIXMUX | 7726 DRM_MODE_FLAG_CLKDIV2)) 7727 return MODE_BAD; 7728 7729 /* 7730 * Reject clearly excessive dotclocks early to 7731 * avoid having to worry about huge integers later. 7732 */ 7733 if (mode->clock > max_dotclock(dev_priv)) 7734 return MODE_CLOCK_HIGH; 7735 7736 /* Transcoder timing limits */ 7737 if (DISPLAY_VER(dev_priv) >= 11) { 7738 hdisplay_max = 16384; 7739 vdisplay_max = 8192; 7740 htotal_max = 16384; 7741 vtotal_max = 8192; 7742 } else if (DISPLAY_VER(dev_priv) >= 9 || 7743 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 7744 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 7745 vdisplay_max = 4096; 7746 htotal_max = 8192; 7747 vtotal_max = 8192; 7748 } else if (DISPLAY_VER(dev_priv) >= 3) { 7749 hdisplay_max = 4096; 7750 vdisplay_max = 4096; 7751 htotal_max = 8192; 7752 vtotal_max = 8192; 7753 } else { 7754 hdisplay_max = 2048; 7755 vdisplay_max = 2048; 7756 htotal_max = 4096; 7757 vtotal_max = 4096; 7758 } 7759 7760 if (mode->hdisplay > hdisplay_max || 7761 mode->hsync_start > htotal_max || 7762 mode->hsync_end > htotal_max || 7763 mode->htotal > htotal_max) 7764 return MODE_H_ILLEGAL; 7765 7766 if (mode->vdisplay > vdisplay_max || 7767 mode->vsync_start > vtotal_max || 7768 mode->vsync_end > vtotal_max || 7769 mode->vtotal > vtotal_max) 7770 return MODE_V_ILLEGAL; 7771 7772 return MODE_OK; 7773 } 7774 7775 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv, 7776 const struct drm_display_mode *mode) 7777 { 7778 /* 7779 * Additional transcoder timing limits, 7780 * excluding BXT/GLK DSI transcoders. 7781 */ 7782 if (DISPLAY_VER(dev_priv) >= 5) { 7783 if (mode->hdisplay < 64 || 7784 mode->htotal - mode->hdisplay < 32) 7785 return MODE_H_ILLEGAL; 7786 7787 if (mode->vtotal - mode->vdisplay < 5) 7788 return MODE_V_ILLEGAL; 7789 } else { 7790 if (mode->htotal - mode->hdisplay < 32) 7791 return MODE_H_ILLEGAL; 7792 7793 if (mode->vtotal - mode->vdisplay < 3) 7794 return MODE_V_ILLEGAL; 7795 } 7796 7797 /* 7798 * Cantiga+ cannot handle modes with a hsync front porch of 0. 7799 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 7800 */ 7801 if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) && 7802 mode->hsync_start == mode->hdisplay) 7803 return MODE_H_ILLEGAL; 7804 7805 return MODE_OK; 7806 } 7807 7808 enum drm_mode_status 7809 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 7810 const struct drm_display_mode *mode, 7811 bool bigjoiner) 7812 { 7813 int plane_width_max, plane_height_max; 7814 7815 /* 7816 * intel_mode_valid() should be 7817 * sufficient on older platforms. 7818 */ 7819 if (DISPLAY_VER(dev_priv) < 9) 7820 return MODE_OK; 7821 7822 /* 7823 * Most people will probably want a fullscreen 7824 * plane so let's not advertize modes that are 7825 * too big for that. 7826 */ 7827 if (DISPLAY_VER(dev_priv) >= 11) { 7828 plane_width_max = 5120 << bigjoiner; 7829 plane_height_max = 4320; 7830 } else { 7831 plane_width_max = 5120; 7832 plane_height_max = 4096; 7833 } 7834 7835 if (mode->hdisplay > plane_width_max) 7836 return MODE_H_ILLEGAL; 7837 7838 if (mode->vdisplay > plane_height_max) 7839 return MODE_V_ILLEGAL; 7840 7841 return MODE_OK; 7842 } 7843 7844 static const struct intel_display_funcs skl_display_funcs = { 7845 .get_pipe_config = hsw_get_pipe_config, 7846 .crtc_enable = hsw_crtc_enable, 7847 .crtc_disable = hsw_crtc_disable, 7848 .commit_modeset_enables = skl_commit_modeset_enables, 7849 .get_initial_plane_config = skl_get_initial_plane_config, 7850 .fixup_initial_plane_config = skl_fixup_initial_plane_config, 7851 }; 7852 7853 static const struct intel_display_funcs ddi_display_funcs = { 7854 .get_pipe_config = hsw_get_pipe_config, 7855 .crtc_enable = hsw_crtc_enable, 7856 .crtc_disable = hsw_crtc_disable, 7857 .commit_modeset_enables = intel_commit_modeset_enables, 7858 .get_initial_plane_config = i9xx_get_initial_plane_config, 7859 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 7860 }; 7861 7862 static const struct intel_display_funcs pch_split_display_funcs = { 7863 .get_pipe_config = ilk_get_pipe_config, 7864 .crtc_enable = ilk_crtc_enable, 7865 .crtc_disable = ilk_crtc_disable, 7866 .commit_modeset_enables = intel_commit_modeset_enables, 7867 .get_initial_plane_config = i9xx_get_initial_plane_config, 7868 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 7869 }; 7870 7871 static const struct intel_display_funcs vlv_display_funcs = { 7872 .get_pipe_config = i9xx_get_pipe_config, 7873 .crtc_enable = valleyview_crtc_enable, 7874 .crtc_disable = i9xx_crtc_disable, 7875 .commit_modeset_enables = intel_commit_modeset_enables, 7876 .get_initial_plane_config = i9xx_get_initial_plane_config, 7877 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 7878 }; 7879 7880 static const struct intel_display_funcs i9xx_display_funcs = { 7881 .get_pipe_config = i9xx_get_pipe_config, 7882 .crtc_enable = i9xx_crtc_enable, 7883 .crtc_disable = i9xx_crtc_disable, 7884 .commit_modeset_enables = intel_commit_modeset_enables, 7885 .get_initial_plane_config = i9xx_get_initial_plane_config, 7886 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 7887 }; 7888 7889 /** 7890 * intel_init_display_hooks - initialize the display modesetting hooks 7891 * @dev_priv: device private 7892 */ 7893 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 7894 { 7895 if (DISPLAY_VER(dev_priv) >= 9) { 7896 dev_priv->display.funcs.display = &skl_display_funcs; 7897 } else if (HAS_DDI(dev_priv)) { 7898 dev_priv->display.funcs.display = &ddi_display_funcs; 7899 } else if (HAS_PCH_SPLIT(dev_priv)) { 7900 dev_priv->display.funcs.display = &pch_split_display_funcs; 7901 } else if (IS_CHERRYVIEW(dev_priv) || 7902 IS_VALLEYVIEW(dev_priv)) { 7903 dev_priv->display.funcs.display = &vlv_display_funcs; 7904 } else { 7905 dev_priv->display.funcs.display = &i9xx_display_funcs; 7906 } 7907 } 7908 7909 int intel_initial_commit(struct drm_device *dev) 7910 { 7911 struct drm_atomic_state *state = NULL; 7912 struct drm_modeset_acquire_ctx ctx; 7913 struct intel_crtc *crtc; 7914 int ret = 0; 7915 7916 state = drm_atomic_state_alloc(dev); 7917 if (!state) 7918 return -ENOMEM; 7919 7920 drm_modeset_acquire_init(&ctx, 0); 7921 7922 state->acquire_ctx = &ctx; 7923 to_intel_atomic_state(state)->internal = true; 7924 7925 retry: 7926 for_each_intel_crtc(dev, crtc) { 7927 struct intel_crtc_state *crtc_state = 7928 intel_atomic_get_crtc_state(state, crtc); 7929 7930 if (IS_ERR(crtc_state)) { 7931 ret = PTR_ERR(crtc_state); 7932 goto out; 7933 } 7934 7935 if (crtc_state->hw.active) { 7936 struct intel_encoder *encoder; 7937 7938 ret = drm_atomic_add_affected_planes(state, &crtc->base); 7939 if (ret) 7940 goto out; 7941 7942 /* 7943 * FIXME hack to force a LUT update to avoid the 7944 * plane update forcing the pipe gamma on without 7945 * having a proper LUT loaded. Remove once we 7946 * have readout for pipe gamma enable. 7947 */ 7948 crtc_state->uapi.color_mgmt_changed = true; 7949 7950 for_each_intel_encoder_mask(dev, encoder, 7951 crtc_state->uapi.encoder_mask) { 7952 if (encoder->initial_fastset_check && 7953 !encoder->initial_fastset_check(encoder, crtc_state)) { 7954 ret = drm_atomic_add_affected_connectors(state, 7955 &crtc->base); 7956 if (ret) 7957 goto out; 7958 } 7959 } 7960 } 7961 } 7962 7963 ret = drm_atomic_commit(state); 7964 7965 out: 7966 if (ret == -EDEADLK) { 7967 drm_atomic_state_clear(state); 7968 drm_modeset_backoff(&ctx); 7969 goto retry; 7970 } 7971 7972 drm_atomic_state_put(state); 7973 7974 drm_modeset_drop_locks(&ctx); 7975 drm_modeset_acquire_fini(&ctx); 7976 7977 return ret; 7978 } 7979 7980 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 7981 { 7982 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 7983 enum transcoder cpu_transcoder = (enum transcoder)pipe; 7984 /* 640x480@60Hz, ~25175 kHz */ 7985 struct dpll clock = { 7986 .m1 = 18, 7987 .m2 = 7, 7988 .p1 = 13, 7989 .p2 = 4, 7990 .n = 2, 7991 }; 7992 u32 dpll, fp; 7993 int i; 7994 7995 drm_WARN_ON(&dev_priv->drm, 7996 i9xx_calc_dpll_params(48000, &clock) != 25154); 7997 7998 drm_dbg_kms(&dev_priv->drm, 7999 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 8000 pipe_name(pipe), clock.vco, clock.dot); 8001 8002 fp = i9xx_dpll_compute_fp(&clock); 8003 dpll = DPLL_DVO_2X_MODE | 8004 DPLL_VGA_MODE_DIS | 8005 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 8006 PLL_P2_DIVIDE_BY_4 | 8007 PLL_REF_INPUT_DREFCLK | 8008 DPLL_VCO_ENABLE; 8009 8010 intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), 8011 HACTIVE(640 - 1) | HTOTAL(800 - 1)); 8012 intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), 8013 HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); 8014 intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), 8015 HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); 8016 intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), 8017 VACTIVE(480 - 1) | VTOTAL(525 - 1)); 8018 intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 8019 VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); 8020 intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), 8021 VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); 8022 intel_de_write(dev_priv, PIPESRC(pipe), 8023 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); 8024 8025 intel_de_write(dev_priv, FP0(pipe), fp); 8026 intel_de_write(dev_priv, FP1(pipe), fp); 8027 8028 /* 8029 * Apparently we need to have VGA mode enabled prior to changing 8030 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 8031 * dividers, even though the register value does change. 8032 */ 8033 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 8034 intel_de_write(dev_priv, DPLL(pipe), dpll); 8035 8036 /* Wait for the clocks to stabilize. */ 8037 intel_de_posting_read(dev_priv, DPLL(pipe)); 8038 udelay(150); 8039 8040 /* The pixel multiplier can only be updated once the 8041 * DPLL is enabled and the clocks are stable. 8042 * 8043 * So write it again. 8044 */ 8045 intel_de_write(dev_priv, DPLL(pipe), dpll); 8046 8047 /* We do this three times for luck */ 8048 for (i = 0; i < 3 ; i++) { 8049 intel_de_write(dev_priv, DPLL(pipe), dpll); 8050 intel_de_posting_read(dev_priv, DPLL(pipe)); 8051 udelay(150); /* wait for warmup */ 8052 } 8053 8054 intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE); 8055 intel_de_posting_read(dev_priv, TRANSCONF(pipe)); 8056 8057 intel_wait_for_pipe_scanline_moving(crtc); 8058 } 8059 8060 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 8061 { 8062 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 8063 8064 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", 8065 pipe_name(pipe)); 8066 8067 drm_WARN_ON(&dev_priv->drm, 8068 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE); 8069 drm_WARN_ON(&dev_priv->drm, 8070 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE); 8071 drm_WARN_ON(&dev_priv->drm, 8072 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE); 8073 drm_WARN_ON(&dev_priv->drm, 8074 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK); 8075 drm_WARN_ON(&dev_priv->drm, 8076 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); 8077 8078 intel_de_write(dev_priv, TRANSCONF(pipe), 0); 8079 intel_de_posting_read(dev_priv, TRANSCONF(pipe)); 8080 8081 intel_wait_for_pipe_scanline_stopped(crtc); 8082 8083 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 8084 intel_de_posting_read(dev_priv, DPLL(pipe)); 8085 } 8086 8087 void intel_hpd_poll_fini(struct drm_i915_private *i915) 8088 { 8089 struct intel_connector *connector; 8090 struct drm_connector_list_iter conn_iter; 8091 8092 /* Kill all the work that may have been queued by hpd. */ 8093 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 8094 for_each_intel_connector_iter(connector, &conn_iter) { 8095 if (connector->modeset_retry_work.func && 8096 cancel_work_sync(&connector->modeset_retry_work)) 8097 drm_connector_put(&connector->base); 8098 if (connector->hdcp.shim) { 8099 cancel_delayed_work_sync(&connector->hdcp.check_work); 8100 cancel_work_sync(&connector->hdcp.prop_work); 8101 } 8102 } 8103 drm_connector_list_iter_end(&conn_iter); 8104 } 8105 8106 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915) 8107 { 8108 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915); 8109 } 8110