1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/i2c.h> 28 #include <linux/input.h> 29 #include <linux/intel-iommu.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/dma-resv.h> 33 #include <linux/slab.h> 34 35 #include <drm/drm_atomic.h> 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_atomic_uapi.h> 38 #include <drm/drm_damage_helper.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_fourcc.h> 42 #include <drm/drm_plane_helper.h> 43 #include <drm/drm_probe_helper.h> 44 #include <drm/drm_rect.h> 45 46 #include "display/intel_crt.h" 47 #include "display/intel_ddi.h" 48 #include "display/intel_display_debugfs.h" 49 #include "display/intel_dp.h" 50 #include "display/intel_dp_mst.h" 51 #include "display/intel_dpll.h" 52 #include "display/intel_dpll_mgr.h" 53 #include "display/intel_dsi.h" 54 #include "display/intel_dvo.h" 55 #include "display/intel_gmbus.h" 56 #include "display/intel_hdmi.h" 57 #include "display/intel_lvds.h" 58 #include "display/intel_sdvo.h" 59 #include "display/intel_tv.h" 60 #include "display/intel_vdsc.h" 61 #include "display/intel_vrr.h" 62 63 #include "gem/i915_gem_object.h" 64 65 #include "gt/intel_rps.h" 66 67 #include "i915_drv.h" 68 #include "i915_trace.h" 69 #include "intel_acpi.h" 70 #include "intel_atomic.h" 71 #include "intel_atomic_plane.h" 72 #include "intel_bw.h" 73 #include "intel_cdclk.h" 74 #include "intel_color.h" 75 #include "intel_crtc.h" 76 #include "intel_csr.h" 77 #include "intel_display_types.h" 78 #include "intel_dp_link_training.h" 79 #include "intel_fbc.h" 80 #include "intel_fdi.h" 81 #include "intel_fbdev.h" 82 #include "intel_fifo_underrun.h" 83 #include "intel_frontbuffer.h" 84 #include "intel_hdcp.h" 85 #include "intel_hotplug.h" 86 #include "intel_overlay.h" 87 #include "intel_pipe_crc.h" 88 #include "intel_pm.h" 89 #include "intel_pps.h" 90 #include "intel_psr.h" 91 #include "intel_quirks.h" 92 #include "intel_sideband.h" 93 #include "intel_sprite.h" 94 #include "intel_tc.h" 95 #include "intel_vga.h" 96 #include "i9xx_plane.h" 97 98 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 99 struct intel_crtc_state *pipe_config); 100 static void ilk_pch_clock_get(struct intel_crtc *crtc, 101 struct intel_crtc_state *pipe_config); 102 103 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 104 struct drm_i915_gem_object *obj, 105 struct drm_mode_fb_cmd2 *mode_cmd); 106 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 107 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 108 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 109 const struct intel_link_m_n *m_n, 110 const struct intel_link_m_n *m2_n2); 111 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 112 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); 113 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state); 114 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 115 static void vlv_prepare_pll(struct intel_crtc *crtc, 116 const struct intel_crtc_state *pipe_config); 117 static void chv_prepare_pll(struct intel_crtc *crtc, 118 const struct intel_crtc_state *pipe_config); 119 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state); 120 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); 121 static void intel_modeset_setup_hw_state(struct drm_device *dev, 122 struct drm_modeset_acquire_ctx *ctx); 123 124 /* returns HPLL frequency in kHz */ 125 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 126 { 127 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 128 129 /* Obtain SKU information */ 130 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 131 CCK_FUSE_HPLL_FREQ_MASK; 132 133 return vco_freq[hpll_freq] * 1000; 134 } 135 136 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 137 const char *name, u32 reg, int ref_freq) 138 { 139 u32 val; 140 int divider; 141 142 val = vlv_cck_read(dev_priv, reg); 143 divider = val & CCK_FREQUENCY_VALUES; 144 145 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 146 (divider << CCK_FREQUENCY_STATUS_SHIFT), 147 "%s change in progress\n", name); 148 149 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 150 } 151 152 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 153 const char *name, u32 reg) 154 { 155 int hpll; 156 157 vlv_cck_get(dev_priv); 158 159 if (dev_priv->hpll_freq == 0) 160 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 161 162 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 163 164 vlv_cck_put(dev_priv); 165 166 return hpll; 167 } 168 169 static void intel_update_czclk(struct drm_i915_private *dev_priv) 170 { 171 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 172 return; 173 174 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 175 CCK_CZ_CLOCK_CONTROL); 176 177 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 178 dev_priv->czclk_freq); 179 } 180 181 /* WA Display #0827: Gen9:all */ 182 static void 183 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 184 { 185 if (enable) 186 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 187 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS); 188 else 189 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 190 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 191 } 192 193 /* Wa_2006604312:icl,ehl */ 194 static void 195 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 196 bool enable) 197 { 198 if (enable) 199 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 200 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 201 else 202 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 203 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 204 } 205 206 static bool 207 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 208 { 209 return crtc_state->master_transcoder != INVALID_TRANSCODER; 210 } 211 212 static bool 213 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 214 { 215 return crtc_state->sync_mode_slaves_mask != 0; 216 } 217 218 bool 219 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 220 { 221 return is_trans_port_sync_master(crtc_state) || 222 is_trans_port_sync_slave(crtc_state); 223 } 224 225 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 226 enum pipe pipe) 227 { 228 i915_reg_t reg = PIPEDSL(pipe); 229 u32 line1, line2; 230 u32 line_mask; 231 232 if (IS_GEN(dev_priv, 2)) 233 line_mask = DSL_LINEMASK_GEN2; 234 else 235 line_mask = DSL_LINEMASK_GEN3; 236 237 line1 = intel_de_read(dev_priv, reg) & line_mask; 238 msleep(5); 239 line2 = intel_de_read(dev_priv, reg) & line_mask; 240 241 return line1 != line2; 242 } 243 244 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 245 { 246 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 247 enum pipe pipe = crtc->pipe; 248 249 /* Wait for the display line to settle/start moving */ 250 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 251 drm_err(&dev_priv->drm, 252 "pipe %c scanline %s wait timed out\n", 253 pipe_name(pipe), onoff(state)); 254 } 255 256 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 257 { 258 wait_for_pipe_scanline_moving(crtc, false); 259 } 260 261 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 262 { 263 wait_for_pipe_scanline_moving(crtc, true); 264 } 265 266 static void 267 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 268 { 269 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 270 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 271 272 if (INTEL_GEN(dev_priv) >= 4) { 273 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 274 i915_reg_t reg = PIPECONF(cpu_transcoder); 275 276 /* Wait for the Pipe State to go off */ 277 if (intel_de_wait_for_clear(dev_priv, reg, 278 I965_PIPECONF_ACTIVE, 100)) 279 drm_WARN(&dev_priv->drm, 1, 280 "pipe_off wait timed out\n"); 281 } else { 282 intel_wait_for_pipe_scanline_stopped(crtc); 283 } 284 } 285 286 /* Only for pre-ILK configs */ 287 void assert_pll(struct drm_i915_private *dev_priv, 288 enum pipe pipe, bool state) 289 { 290 u32 val; 291 bool cur_state; 292 293 val = intel_de_read(dev_priv, DPLL(pipe)); 294 cur_state = !!(val & DPLL_VCO_ENABLE); 295 I915_STATE_WARN(cur_state != state, 296 "PLL state assertion failure (expected %s, current %s)\n", 297 onoff(state), onoff(cur_state)); 298 } 299 300 /* XXX: the dsi pll is shared between MIPI DSI ports */ 301 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 302 { 303 u32 val; 304 bool cur_state; 305 306 vlv_cck_get(dev_priv); 307 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 308 vlv_cck_put(dev_priv); 309 310 cur_state = val & DSI_PLL_VCO_EN; 311 I915_STATE_WARN(cur_state != state, 312 "DSI PLL state assertion failure (expected %s, current %s)\n", 313 onoff(state), onoff(cur_state)); 314 } 315 316 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 317 enum pipe pipe, bool state) 318 { 319 bool cur_state; 320 321 if (HAS_DDI(dev_priv)) { 322 /* 323 * DDI does not have a specific FDI_TX register. 324 * 325 * FDI is never fed from EDP transcoder 326 * so pipe->transcoder cast is fine here. 327 */ 328 enum transcoder cpu_transcoder = (enum transcoder)pipe; 329 u32 val = intel_de_read(dev_priv, 330 TRANS_DDI_FUNC_CTL(cpu_transcoder)); 331 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 332 } else { 333 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); 334 cur_state = !!(val & FDI_TX_ENABLE); 335 } 336 I915_STATE_WARN(cur_state != state, 337 "FDI TX state assertion failure (expected %s, current %s)\n", 338 onoff(state), onoff(cur_state)); 339 } 340 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 341 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 342 343 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 344 enum pipe pipe, bool state) 345 { 346 u32 val; 347 bool cur_state; 348 349 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); 350 cur_state = !!(val & FDI_RX_ENABLE); 351 I915_STATE_WARN(cur_state != state, 352 "FDI RX state assertion failure (expected %s, current %s)\n", 353 onoff(state), onoff(cur_state)); 354 } 355 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 356 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 357 358 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 359 enum pipe pipe) 360 { 361 u32 val; 362 363 /* ILK FDI PLL is always enabled */ 364 if (IS_GEN(dev_priv, 5)) 365 return; 366 367 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 368 if (HAS_DDI(dev_priv)) 369 return; 370 371 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); 372 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 373 } 374 375 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 376 enum pipe pipe, bool state) 377 { 378 u32 val; 379 bool cur_state; 380 381 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); 382 cur_state = !!(val & FDI_RX_PLL_ENABLE); 383 I915_STATE_WARN(cur_state != state, 384 "FDI RX PLL assertion failure (expected %s, current %s)\n", 385 onoff(state), onoff(cur_state)); 386 } 387 388 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 389 { 390 i915_reg_t pp_reg; 391 u32 val; 392 enum pipe panel_pipe = INVALID_PIPE; 393 bool locked = true; 394 395 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv))) 396 return; 397 398 if (HAS_PCH_SPLIT(dev_priv)) { 399 u32 port_sel; 400 401 pp_reg = PP_CONTROL(0); 402 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 403 404 switch (port_sel) { 405 case PANEL_PORT_SELECT_LVDS: 406 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 407 break; 408 case PANEL_PORT_SELECT_DPA: 409 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 410 break; 411 case PANEL_PORT_SELECT_DPC: 412 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 413 break; 414 case PANEL_PORT_SELECT_DPD: 415 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 416 break; 417 default: 418 MISSING_CASE(port_sel); 419 break; 420 } 421 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 422 /* presumably write lock depends on pipe, not port select */ 423 pp_reg = PP_CONTROL(pipe); 424 panel_pipe = pipe; 425 } else { 426 u32 port_sel; 427 428 pp_reg = PP_CONTROL(0); 429 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 430 431 drm_WARN_ON(&dev_priv->drm, 432 port_sel != PANEL_PORT_SELECT_LVDS); 433 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 434 } 435 436 val = intel_de_read(dev_priv, pp_reg); 437 if (!(val & PANEL_POWER_ON) || 438 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 439 locked = false; 440 441 I915_STATE_WARN(panel_pipe == pipe && locked, 442 "panel assertion failure, pipe %c regs locked\n", 443 pipe_name(pipe)); 444 } 445 446 void assert_pipe(struct drm_i915_private *dev_priv, 447 enum transcoder cpu_transcoder, bool state) 448 { 449 bool cur_state; 450 enum intel_display_power_domain power_domain; 451 intel_wakeref_t wakeref; 452 453 /* we keep both pipes enabled on 830 */ 454 if (IS_I830(dev_priv)) 455 state = true; 456 457 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 458 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 459 if (wakeref) { 460 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 461 cur_state = !!(val & PIPECONF_ENABLE); 462 463 intel_display_power_put(dev_priv, power_domain, wakeref); 464 } else { 465 cur_state = false; 466 } 467 468 I915_STATE_WARN(cur_state != state, 469 "transcoder %s assertion failure (expected %s, current %s)\n", 470 transcoder_name(cpu_transcoder), 471 onoff(state), onoff(cur_state)); 472 } 473 474 static void assert_plane(struct intel_plane *plane, bool state) 475 { 476 enum pipe pipe; 477 bool cur_state; 478 479 cur_state = plane->get_hw_state(plane, &pipe); 480 481 I915_STATE_WARN(cur_state != state, 482 "%s assertion failure (expected %s, current %s)\n", 483 plane->base.name, onoff(state), onoff(cur_state)); 484 } 485 486 #define assert_plane_enabled(p) assert_plane(p, true) 487 #define assert_plane_disabled(p) assert_plane(p, false) 488 489 static void assert_planes_disabled(struct intel_crtc *crtc) 490 { 491 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 492 struct intel_plane *plane; 493 494 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 495 assert_plane_disabled(plane); 496 } 497 498 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 499 enum pipe pipe) 500 { 501 u32 val; 502 bool enabled; 503 504 val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); 505 enabled = !!(val & TRANS_ENABLE); 506 I915_STATE_WARN(enabled, 507 "transcoder assertion failed, should be off on pipe %c but is still active\n", 508 pipe_name(pipe)); 509 } 510 511 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 512 enum pipe pipe, enum port port, 513 i915_reg_t dp_reg) 514 { 515 enum pipe port_pipe; 516 bool state; 517 518 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 519 520 I915_STATE_WARN(state && port_pipe == pipe, 521 "PCH DP %c enabled on transcoder %c, should be disabled\n", 522 port_name(port), pipe_name(pipe)); 523 524 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 525 "IBX PCH DP %c still using transcoder B\n", 526 port_name(port)); 527 } 528 529 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 530 enum pipe pipe, enum port port, 531 i915_reg_t hdmi_reg) 532 { 533 enum pipe port_pipe; 534 bool state; 535 536 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 537 538 I915_STATE_WARN(state && port_pipe == pipe, 539 "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 540 port_name(port), pipe_name(pipe)); 541 542 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 543 "IBX PCH HDMI %c still using transcoder B\n", 544 port_name(port)); 545 } 546 547 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 548 enum pipe pipe) 549 { 550 enum pipe port_pipe; 551 552 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 553 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 554 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 555 556 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 557 port_pipe == pipe, 558 "PCH VGA enabled on transcoder %c, should be disabled\n", 559 pipe_name(pipe)); 560 561 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 562 port_pipe == pipe, 563 "PCH LVDS enabled on transcoder %c, should be disabled\n", 564 pipe_name(pipe)); 565 566 /* PCH SDVOB multiplex with HDMIB */ 567 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 568 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 569 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 570 } 571 572 static void _vlv_enable_pll(struct intel_crtc *crtc, 573 const struct intel_crtc_state *pipe_config) 574 { 575 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 576 enum pipe pipe = crtc->pipe; 577 578 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); 579 intel_de_posting_read(dev_priv, DPLL(pipe)); 580 udelay(150); 581 582 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 583 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); 584 } 585 586 static void vlv_enable_pll(struct intel_crtc *crtc, 587 const struct intel_crtc_state *pipe_config) 588 { 589 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 590 enum pipe pipe = crtc->pipe; 591 592 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 593 594 /* PLL is protected by panel, make sure we can write it */ 595 assert_panel_unlocked(dev_priv, pipe); 596 597 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 598 _vlv_enable_pll(crtc, pipe_config); 599 600 intel_de_write(dev_priv, DPLL_MD(pipe), 601 pipe_config->dpll_hw_state.dpll_md); 602 intel_de_posting_read(dev_priv, DPLL_MD(pipe)); 603 } 604 605 606 static void _chv_enable_pll(struct intel_crtc *crtc, 607 const struct intel_crtc_state *pipe_config) 608 { 609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 610 enum pipe pipe = crtc->pipe; 611 enum dpio_channel port = vlv_pipe_to_channel(pipe); 612 u32 tmp; 613 614 vlv_dpio_get(dev_priv); 615 616 /* Enable back the 10bit clock to display controller */ 617 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 618 tmp |= DPIO_DCLKP_EN; 619 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 620 621 vlv_dpio_put(dev_priv); 622 623 /* 624 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 625 */ 626 udelay(1); 627 628 /* Enable PLL */ 629 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); 630 631 /* Check PLL is locked */ 632 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 633 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); 634 } 635 636 static void chv_enable_pll(struct intel_crtc *crtc, 637 const struct intel_crtc_state *pipe_config) 638 { 639 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 640 enum pipe pipe = crtc->pipe; 641 642 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 643 644 /* PLL is protected by panel, make sure we can write it */ 645 assert_panel_unlocked(dev_priv, pipe); 646 647 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 648 _chv_enable_pll(crtc, pipe_config); 649 650 if (pipe != PIPE_A) { 651 /* 652 * WaPixelRepeatModeFixForC0:chv 653 * 654 * DPLLCMD is AWOL. Use chicken bits to propagate 655 * the value from DPLLBMD to either pipe B or C. 656 */ 657 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 658 intel_de_write(dev_priv, DPLL_MD(PIPE_B), 659 pipe_config->dpll_hw_state.dpll_md); 660 intel_de_write(dev_priv, CBR4_VLV, 0); 661 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 662 663 /* 664 * DPLLB VGA mode also seems to cause problems. 665 * We should always have it disabled. 666 */ 667 drm_WARN_ON(&dev_priv->drm, 668 (intel_de_read(dev_priv, DPLL(PIPE_B)) & 669 DPLL_VGA_MODE_DIS) == 0); 670 } else { 671 intel_de_write(dev_priv, DPLL_MD(pipe), 672 pipe_config->dpll_hw_state.dpll_md); 673 intel_de_posting_read(dev_priv, DPLL_MD(pipe)); 674 } 675 } 676 677 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 678 { 679 if (IS_I830(dev_priv)) 680 return false; 681 682 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 683 } 684 685 static void i9xx_enable_pll(struct intel_crtc *crtc, 686 const struct intel_crtc_state *crtc_state) 687 { 688 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 689 i915_reg_t reg = DPLL(crtc->pipe); 690 u32 dpll = crtc_state->dpll_hw_state.dpll; 691 int i; 692 693 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 694 695 /* PLL is protected by panel, make sure we can write it */ 696 if (i9xx_has_pps(dev_priv)) 697 assert_panel_unlocked(dev_priv, crtc->pipe); 698 699 /* 700 * Apparently we need to have VGA mode enabled prior to changing 701 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 702 * dividers, even though the register value does change. 703 */ 704 intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS); 705 intel_de_write(dev_priv, reg, dpll); 706 707 /* Wait for the clocks to stabilize. */ 708 intel_de_posting_read(dev_priv, reg); 709 udelay(150); 710 711 if (INTEL_GEN(dev_priv) >= 4) { 712 intel_de_write(dev_priv, DPLL_MD(crtc->pipe), 713 crtc_state->dpll_hw_state.dpll_md); 714 } else { 715 /* The pixel multiplier can only be updated once the 716 * DPLL is enabled and the clocks are stable. 717 * 718 * So write it again. 719 */ 720 intel_de_write(dev_priv, reg, dpll); 721 } 722 723 /* We do this three times for luck */ 724 for (i = 0; i < 3; i++) { 725 intel_de_write(dev_priv, reg, dpll); 726 intel_de_posting_read(dev_priv, reg); 727 udelay(150); /* wait for warmup */ 728 } 729 } 730 731 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 732 { 733 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 734 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 735 enum pipe pipe = crtc->pipe; 736 737 /* Don't disable pipe or pipe PLLs if needed */ 738 if (IS_I830(dev_priv)) 739 return; 740 741 /* Make sure the pipe isn't still relying on us */ 742 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 743 744 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 745 intel_de_posting_read(dev_priv, DPLL(pipe)); 746 } 747 748 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 749 { 750 u32 val; 751 752 /* Make sure the pipe isn't still relying on us */ 753 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 754 755 val = DPLL_INTEGRATED_REF_CLK_VLV | 756 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 757 if (pipe != PIPE_A) 758 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 759 760 intel_de_write(dev_priv, DPLL(pipe), val); 761 intel_de_posting_read(dev_priv, DPLL(pipe)); 762 } 763 764 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 765 { 766 enum dpio_channel port = vlv_pipe_to_channel(pipe); 767 u32 val; 768 769 /* Make sure the pipe isn't still relying on us */ 770 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 771 772 val = DPLL_SSC_REF_CLK_CHV | 773 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 774 if (pipe != PIPE_A) 775 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 776 777 intel_de_write(dev_priv, DPLL(pipe), val); 778 intel_de_posting_read(dev_priv, DPLL(pipe)); 779 780 vlv_dpio_get(dev_priv); 781 782 /* Disable 10bit clock to display controller */ 783 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 784 val &= ~DPIO_DCLKP_EN; 785 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 786 787 vlv_dpio_put(dev_priv); 788 } 789 790 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 791 struct intel_digital_port *dig_port, 792 unsigned int expected_mask) 793 { 794 u32 port_mask; 795 i915_reg_t dpll_reg; 796 797 switch (dig_port->base.port) { 798 case PORT_B: 799 port_mask = DPLL_PORTB_READY_MASK; 800 dpll_reg = DPLL(0); 801 break; 802 case PORT_C: 803 port_mask = DPLL_PORTC_READY_MASK; 804 dpll_reg = DPLL(0); 805 expected_mask <<= 4; 806 break; 807 case PORT_D: 808 port_mask = DPLL_PORTD_READY_MASK; 809 dpll_reg = DPIO_PHY_STATUS; 810 break; 811 default: 812 BUG(); 813 } 814 815 if (intel_de_wait_for_register(dev_priv, dpll_reg, 816 port_mask, expected_mask, 1000)) 817 drm_WARN(&dev_priv->drm, 1, 818 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 819 dig_port->base.base.base.id, dig_port->base.base.name, 820 intel_de_read(dev_priv, dpll_reg) & port_mask, 821 expected_mask); 822 } 823 824 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 825 { 826 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 827 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 828 enum pipe pipe = crtc->pipe; 829 i915_reg_t reg; 830 u32 val, pipeconf_val; 831 832 /* Make sure PCH DPLL is enabled */ 833 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 834 835 /* FDI must be feeding us bits for PCH ports */ 836 assert_fdi_tx_enabled(dev_priv, pipe); 837 assert_fdi_rx_enabled(dev_priv, pipe); 838 839 if (HAS_PCH_CPT(dev_priv)) { 840 reg = TRANS_CHICKEN2(pipe); 841 val = intel_de_read(dev_priv, reg); 842 /* 843 * Workaround: Set the timing override bit 844 * before enabling the pch transcoder. 845 */ 846 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 847 /* Configure frame start delay to match the CPU */ 848 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 849 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 850 intel_de_write(dev_priv, reg, val); 851 } 852 853 reg = PCH_TRANSCONF(pipe); 854 val = intel_de_read(dev_priv, reg); 855 pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); 856 857 if (HAS_PCH_IBX(dev_priv)) { 858 /* Configure frame start delay to match the CPU */ 859 val &= ~TRANS_FRAME_START_DELAY_MASK; 860 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 861 862 /* 863 * Make the BPC in transcoder be consistent with 864 * that in pipeconf reg. For HDMI we must use 8bpc 865 * here for both 8bpc and 12bpc. 866 */ 867 val &= ~PIPECONF_BPC_MASK; 868 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 869 val |= PIPECONF_8BPC; 870 else 871 val |= pipeconf_val & PIPECONF_BPC_MASK; 872 } 873 874 val &= ~TRANS_INTERLACE_MASK; 875 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 876 if (HAS_PCH_IBX(dev_priv) && 877 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 878 val |= TRANS_LEGACY_INTERLACED_ILK; 879 else 880 val |= TRANS_INTERLACED; 881 } else { 882 val |= TRANS_PROGRESSIVE; 883 } 884 885 intel_de_write(dev_priv, reg, val | TRANS_ENABLE); 886 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 887 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", 888 pipe_name(pipe)); 889 } 890 891 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 892 enum transcoder cpu_transcoder) 893 { 894 u32 val, pipeconf_val; 895 896 /* FDI must be feeding us bits for PCH ports */ 897 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 898 assert_fdi_rx_enabled(dev_priv, PIPE_A); 899 900 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 901 /* Workaround: set timing override bit. */ 902 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 903 /* Configure frame start delay to match the CPU */ 904 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 905 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 906 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 907 908 val = TRANS_ENABLE; 909 pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 910 911 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 912 PIPECONF_INTERLACED_ILK) 913 val |= TRANS_INTERLACED; 914 else 915 val |= TRANS_PROGRESSIVE; 916 917 intel_de_write(dev_priv, LPT_TRANSCONF, val); 918 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 919 TRANS_STATE_ENABLE, 100)) 920 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); 921 } 922 923 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, 924 enum pipe pipe) 925 { 926 i915_reg_t reg; 927 u32 val; 928 929 /* FDI relies on the transcoder */ 930 assert_fdi_tx_disabled(dev_priv, pipe); 931 assert_fdi_rx_disabled(dev_priv, pipe); 932 933 /* Ports must be off as well */ 934 assert_pch_ports_disabled(dev_priv, pipe); 935 936 reg = PCH_TRANSCONF(pipe); 937 val = intel_de_read(dev_priv, reg); 938 val &= ~TRANS_ENABLE; 939 intel_de_write(dev_priv, reg, val); 940 /* wait for PCH transcoder off, transcoder state */ 941 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 942 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", 943 pipe_name(pipe)); 944 945 if (HAS_PCH_CPT(dev_priv)) { 946 /* Workaround: Clear the timing override chicken bit again. */ 947 reg = TRANS_CHICKEN2(pipe); 948 val = intel_de_read(dev_priv, reg); 949 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 950 intel_de_write(dev_priv, reg, val); 951 } 952 } 953 954 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 955 { 956 u32 val; 957 958 val = intel_de_read(dev_priv, LPT_TRANSCONF); 959 val &= ~TRANS_ENABLE; 960 intel_de_write(dev_priv, LPT_TRANSCONF, val); 961 /* wait for PCH transcoder off, transcoder state */ 962 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 963 TRANS_STATE_ENABLE, 50)) 964 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); 965 966 /* Workaround: clear timing override bit. */ 967 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 968 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 969 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 970 } 971 972 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 973 { 974 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 975 976 if (HAS_PCH_LPT(dev_priv)) 977 return PIPE_A; 978 else 979 return crtc->pipe; 980 } 981 982 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) 983 { 984 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 985 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 986 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 987 enum pipe pipe = crtc->pipe; 988 i915_reg_t reg; 989 u32 val; 990 991 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 992 993 assert_planes_disabled(crtc); 994 995 /* 996 * A pipe without a PLL won't actually be able to drive bits from 997 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 998 * need the check. 999 */ 1000 if (HAS_GMCH(dev_priv)) { 1001 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 1002 assert_dsi_pll_enabled(dev_priv); 1003 else 1004 assert_pll_enabled(dev_priv, pipe); 1005 } else { 1006 if (new_crtc_state->has_pch_encoder) { 1007 /* if driving the PCH, we need FDI enabled */ 1008 assert_fdi_rx_pll_enabled(dev_priv, 1009 intel_crtc_pch_transcoder(crtc)); 1010 assert_fdi_tx_pll_enabled(dev_priv, 1011 (enum pipe) cpu_transcoder); 1012 } 1013 /* FIXME: assert CPU port conditions for SNB+ */ 1014 } 1015 1016 trace_intel_pipe_enable(crtc); 1017 1018 reg = PIPECONF(cpu_transcoder); 1019 val = intel_de_read(dev_priv, reg); 1020 if (val & PIPECONF_ENABLE) { 1021 /* we keep both pipes enabled on 830 */ 1022 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 1023 return; 1024 } 1025 1026 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE); 1027 intel_de_posting_read(dev_priv, reg); 1028 1029 /* 1030 * Until the pipe starts PIPEDSL reads will return a stale value, 1031 * which causes an apparent vblank timestamp jump when PIPEDSL 1032 * resets to its proper value. That also messes up the frame count 1033 * when it's derived from the timestamps. So let's wait for the 1034 * pipe to start properly before we call drm_crtc_vblank_on() 1035 */ 1036 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 1037 intel_wait_for_pipe_scanline_moving(crtc); 1038 } 1039 1040 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) 1041 { 1042 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1043 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1044 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1045 enum pipe pipe = crtc->pipe; 1046 i915_reg_t reg; 1047 u32 val; 1048 1049 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 1050 1051 /* 1052 * Make sure planes won't keep trying to pump pixels to us, 1053 * or we might hang the display. 1054 */ 1055 assert_planes_disabled(crtc); 1056 1057 trace_intel_pipe_disable(crtc); 1058 1059 reg = PIPECONF(cpu_transcoder); 1060 val = intel_de_read(dev_priv, reg); 1061 if ((val & PIPECONF_ENABLE) == 0) 1062 return; 1063 1064 /* 1065 * Double wide has implications for planes 1066 * so best keep it disabled when not needed. 1067 */ 1068 if (old_crtc_state->double_wide) 1069 val &= ~PIPECONF_DOUBLE_WIDE; 1070 1071 /* Don't disable pipe or pipe PLLs if needed */ 1072 if (!IS_I830(dev_priv)) 1073 val &= ~PIPECONF_ENABLE; 1074 1075 intel_de_write(dev_priv, reg, val); 1076 if ((val & PIPECONF_ENABLE) == 0) 1077 intel_wait_for_pipe_off(old_crtc_state); 1078 } 1079 1080 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1081 { 1082 return IS_GEN(dev_priv, 2) ? 2048 : 4096; 1083 } 1084 1085 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) 1086 { 1087 if (!is_ccs_modifier(fb->modifier)) 1088 return false; 1089 1090 return plane >= fb->format->num_planes / 2; 1091 } 1092 1093 static bool is_gen12_ccs_modifier(u64 modifier) 1094 { 1095 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 1096 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || 1097 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 1098 } 1099 1100 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) 1101 { 1102 return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); 1103 } 1104 1105 static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane) 1106 { 1107 return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC && 1108 plane == 2; 1109 } 1110 1111 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane) 1112 { 1113 if (is_ccs_modifier(fb->modifier)) 1114 return is_ccs_plane(fb, plane); 1115 1116 return plane == 1; 1117 } 1118 1119 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) 1120 { 1121 drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || 1122 (main_plane && main_plane >= fb->format->num_planes / 2)); 1123 1124 return fb->format->num_planes / 2 + main_plane; 1125 } 1126 1127 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) 1128 { 1129 drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || 1130 ccs_plane < fb->format->num_planes / 2); 1131 1132 if (is_gen12_ccs_cc_plane(fb, ccs_plane)) 1133 return 0; 1134 1135 return ccs_plane - fb->format->num_planes / 2; 1136 } 1137 1138 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) 1139 { 1140 struct drm_i915_private *i915 = to_i915(fb->dev); 1141 1142 if (is_ccs_modifier(fb->modifier)) 1143 return main_to_ccs_plane(fb, main_plane); 1144 else if (INTEL_GEN(i915) < 11 && 1145 intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) 1146 return 1; 1147 else 1148 return 0; 1149 } 1150 1151 bool 1152 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 1153 u64 modifier) 1154 { 1155 return info->is_yuv && 1156 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); 1157 } 1158 1159 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, 1160 int color_plane) 1161 { 1162 return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && 1163 color_plane == 1; 1164 } 1165 1166 static unsigned int 1167 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) 1168 { 1169 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1170 unsigned int cpp = fb->format->cpp[color_plane]; 1171 1172 switch (fb->modifier) { 1173 case DRM_FORMAT_MOD_LINEAR: 1174 return intel_tile_size(dev_priv); 1175 case I915_FORMAT_MOD_X_TILED: 1176 if (IS_GEN(dev_priv, 2)) 1177 return 128; 1178 else 1179 return 512; 1180 case I915_FORMAT_MOD_Y_TILED_CCS: 1181 if (is_ccs_plane(fb, color_plane)) 1182 return 128; 1183 fallthrough; 1184 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 1185 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: 1186 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 1187 if (is_ccs_plane(fb, color_plane)) 1188 return 64; 1189 fallthrough; 1190 case I915_FORMAT_MOD_Y_TILED: 1191 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) 1192 return 128; 1193 else 1194 return 512; 1195 case I915_FORMAT_MOD_Yf_TILED_CCS: 1196 if (is_ccs_plane(fb, color_plane)) 1197 return 128; 1198 fallthrough; 1199 case I915_FORMAT_MOD_Yf_TILED: 1200 switch (cpp) { 1201 case 1: 1202 return 64; 1203 case 2: 1204 case 4: 1205 return 128; 1206 case 8: 1207 case 16: 1208 return 256; 1209 default: 1210 MISSING_CASE(cpp); 1211 return cpp; 1212 } 1213 break; 1214 default: 1215 MISSING_CASE(fb->modifier); 1216 return cpp; 1217 } 1218 } 1219 1220 static unsigned int 1221 intel_tile_height(const struct drm_framebuffer *fb, int color_plane) 1222 { 1223 if (is_gen12_ccs_plane(fb, color_plane)) 1224 return 1; 1225 1226 return intel_tile_size(to_i915(fb->dev)) / 1227 intel_tile_width_bytes(fb, color_plane); 1228 } 1229 1230 /* Return the tile dimensions in pixel units */ 1231 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, 1232 unsigned int *tile_width, 1233 unsigned int *tile_height) 1234 { 1235 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); 1236 unsigned int cpp = fb->format->cpp[color_plane]; 1237 1238 *tile_width = tile_width_bytes / cpp; 1239 *tile_height = intel_tile_height(fb, color_plane); 1240 } 1241 1242 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, 1243 int color_plane) 1244 { 1245 unsigned int tile_width, tile_height; 1246 1247 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 1248 1249 return fb->pitches[color_plane] * tile_height; 1250 } 1251 1252 unsigned int 1253 intel_fb_align_height(const struct drm_framebuffer *fb, 1254 int color_plane, unsigned int height) 1255 { 1256 unsigned int tile_height = intel_tile_height(fb, color_plane); 1257 1258 return ALIGN(height, tile_height); 1259 } 1260 1261 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 1262 { 1263 unsigned int size = 0; 1264 int i; 1265 1266 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 1267 size += rot_info->plane[i].width * rot_info->plane[i].height; 1268 1269 return size; 1270 } 1271 1272 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 1273 { 1274 unsigned int size = 0; 1275 int i; 1276 1277 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 1278 size += rem_info->plane[i].width * rem_info->plane[i].height; 1279 1280 return size; 1281 } 1282 1283 static void 1284 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 1285 const struct drm_framebuffer *fb, 1286 unsigned int rotation) 1287 { 1288 view->type = I915_GGTT_VIEW_NORMAL; 1289 if (drm_rotation_90_or_270(rotation)) { 1290 view->type = I915_GGTT_VIEW_ROTATED; 1291 view->rotated = to_intel_framebuffer(fb)->rot_info; 1292 } 1293 } 1294 1295 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 1296 { 1297 if (IS_I830(dev_priv)) 1298 return 16 * 1024; 1299 else if (IS_I85X(dev_priv)) 1300 return 256; 1301 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 1302 return 32; 1303 else 1304 return 4 * 1024; 1305 } 1306 1307 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 1308 { 1309 if (INTEL_GEN(dev_priv) >= 9) 1310 return 256 * 1024; 1311 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 1312 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1313 return 128 * 1024; 1314 else if (INTEL_GEN(dev_priv) >= 4) 1315 return 4 * 1024; 1316 else 1317 return 0; 1318 } 1319 1320 static bool has_async_flips(struct drm_i915_private *i915) 1321 { 1322 return INTEL_GEN(i915) >= 5; 1323 } 1324 1325 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 1326 int color_plane) 1327 { 1328 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1329 1330 /* AUX_DIST needs only 4K alignment */ 1331 if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) || 1332 is_ccs_plane(fb, color_plane)) 1333 return 4096; 1334 1335 switch (fb->modifier) { 1336 case DRM_FORMAT_MOD_LINEAR: 1337 return intel_linear_alignment(dev_priv); 1338 case I915_FORMAT_MOD_X_TILED: 1339 if (has_async_flips(dev_priv)) 1340 return 256 * 1024; 1341 return 0; 1342 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 1343 if (is_semiplanar_uv_plane(fb, color_plane)) 1344 return intel_tile_row_size(fb, color_plane); 1345 fallthrough; 1346 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 1347 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: 1348 return 16 * 1024; 1349 case I915_FORMAT_MOD_Y_TILED_CCS: 1350 case I915_FORMAT_MOD_Yf_TILED_CCS: 1351 case I915_FORMAT_MOD_Y_TILED: 1352 if (INTEL_GEN(dev_priv) >= 12 && 1353 is_semiplanar_uv_plane(fb, color_plane)) 1354 return intel_tile_row_size(fb, color_plane); 1355 fallthrough; 1356 case I915_FORMAT_MOD_Yf_TILED: 1357 return 1 * 1024 * 1024; 1358 default: 1359 MISSING_CASE(fb->modifier); 1360 return 0; 1361 } 1362 } 1363 1364 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 1365 { 1366 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1367 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1368 1369 return INTEL_GEN(dev_priv) < 4 || 1370 (plane->has_fbc && 1371 plane_state->view.type == I915_GGTT_VIEW_NORMAL); 1372 } 1373 1374 struct i915_vma * 1375 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 1376 const struct i915_ggtt_view *view, 1377 bool uses_fence, 1378 unsigned long *out_flags) 1379 { 1380 struct drm_device *dev = fb->dev; 1381 struct drm_i915_private *dev_priv = to_i915(dev); 1382 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 1383 intel_wakeref_t wakeref; 1384 struct i915_vma *vma; 1385 unsigned int pinctl; 1386 u32 alignment; 1387 1388 if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj))) 1389 return ERR_PTR(-EINVAL); 1390 1391 alignment = intel_surf_alignment(fb, 0); 1392 if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment))) 1393 return ERR_PTR(-EINVAL); 1394 1395 /* Note that the w/a also requires 64 PTE of padding following the 1396 * bo. We currently fill all unused PTE with the shadow page and so 1397 * we should always have valid PTE following the scanout preventing 1398 * the VT-d warning. 1399 */ 1400 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 1401 alignment = 256 * 1024; 1402 1403 /* 1404 * Global gtt pte registers are special registers which actually forward 1405 * writes to a chunk of system memory. Which means that there is no risk 1406 * that the register values disappear as soon as we call 1407 * intel_runtime_pm_put(), so it is correct to wrap only the 1408 * pin/unpin/fence and not more. 1409 */ 1410 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1411 1412 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 1413 1414 /* 1415 * Valleyview is definitely limited to scanning out the first 1416 * 512MiB. Lets presume this behaviour was inherited from the 1417 * g4x display engine and that all earlier gen are similarly 1418 * limited. Testing suggests that it is a little more 1419 * complicated than this. For example, Cherryview appears quite 1420 * happy to scanout from anywhere within its global aperture. 1421 */ 1422 pinctl = 0; 1423 if (HAS_GMCH(dev_priv)) 1424 pinctl |= PIN_MAPPABLE; 1425 1426 vma = i915_gem_object_pin_to_display_plane(obj, 1427 alignment, view, pinctl); 1428 if (IS_ERR(vma)) 1429 goto err; 1430 1431 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 1432 int ret; 1433 1434 /* 1435 * Install a fence for tiled scan-out. Pre-i965 always needs a 1436 * fence, whereas 965+ only requires a fence if using 1437 * framebuffer compression. For simplicity, we always, when 1438 * possible, install a fence as the cost is not that onerous. 1439 * 1440 * If we fail to fence the tiled scanout, then either the 1441 * modeset will reject the change (which is highly unlikely as 1442 * the affected systems, all but one, do not have unmappable 1443 * space) or we will not be able to enable full powersaving 1444 * techniques (also likely not to apply due to various limits 1445 * FBC and the like impose on the size of the buffer, which 1446 * presumably we violated anyway with this unmappable buffer). 1447 * Anyway, it is presumably better to stumble onwards with 1448 * something and try to run the system in a "less than optimal" 1449 * mode that matches the user configuration. 1450 */ 1451 ret = i915_vma_pin_fence(vma); 1452 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 1453 i915_vma_unpin(vma); 1454 vma = ERR_PTR(ret); 1455 goto err; 1456 } 1457 1458 if (ret == 0 && vma->fence) 1459 *out_flags |= PLANE_HAS_FENCE; 1460 } 1461 1462 i915_vma_get(vma); 1463 err: 1464 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 1465 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1466 return vma; 1467 } 1468 1469 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 1470 { 1471 if (flags & PLANE_HAS_FENCE) 1472 i915_vma_unpin_fence(vma); 1473 i915_vma_unpin(vma); 1474 i915_vma_put(vma); 1475 } 1476 1477 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane, 1478 unsigned int rotation) 1479 { 1480 if (drm_rotation_90_or_270(rotation)) 1481 return to_intel_framebuffer(fb)->rotated[color_plane].pitch; 1482 else 1483 return fb->pitches[color_plane]; 1484 } 1485 1486 /* 1487 * Convert the x/y offsets into a linear offset. 1488 * Only valid with 0/180 degree rotation, which is fine since linear 1489 * offset is only used with linear buffers on pre-hsw and tiled buffers 1490 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 1491 */ 1492 u32 intel_fb_xy_to_linear(int x, int y, 1493 const struct intel_plane_state *state, 1494 int color_plane) 1495 { 1496 const struct drm_framebuffer *fb = state->hw.fb; 1497 unsigned int cpp = fb->format->cpp[color_plane]; 1498 unsigned int pitch = state->color_plane[color_plane].stride; 1499 1500 return y * pitch + x * cpp; 1501 } 1502 1503 /* 1504 * Add the x/y offsets derived from fb->offsets[] to the user 1505 * specified plane src x/y offsets. The resulting x/y offsets 1506 * specify the start of scanout from the beginning of the gtt mapping. 1507 */ 1508 void intel_add_fb_offsets(int *x, int *y, 1509 const struct intel_plane_state *state, 1510 int color_plane) 1511 1512 { 1513 *x += state->color_plane[color_plane].x; 1514 *y += state->color_plane[color_plane].y; 1515 } 1516 1517 static u32 intel_adjust_tile_offset(int *x, int *y, 1518 unsigned int tile_width, 1519 unsigned int tile_height, 1520 unsigned int tile_size, 1521 unsigned int pitch_tiles, 1522 u32 old_offset, 1523 u32 new_offset) 1524 { 1525 unsigned int pitch_pixels = pitch_tiles * tile_width; 1526 unsigned int tiles; 1527 1528 WARN_ON(old_offset & (tile_size - 1)); 1529 WARN_ON(new_offset & (tile_size - 1)); 1530 WARN_ON(new_offset > old_offset); 1531 1532 tiles = (old_offset - new_offset) / tile_size; 1533 1534 *y += tiles / pitch_tiles * tile_height; 1535 *x += tiles % pitch_tiles * tile_width; 1536 1537 /* minimize x in case it got needlessly big */ 1538 *y += *x / pitch_pixels * tile_height; 1539 *x %= pitch_pixels; 1540 1541 return new_offset; 1542 } 1543 1544 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) 1545 { 1546 return fb->modifier == DRM_FORMAT_MOD_LINEAR || 1547 is_gen12_ccs_plane(fb, color_plane); 1548 } 1549 1550 static u32 intel_adjust_aligned_offset(int *x, int *y, 1551 const struct drm_framebuffer *fb, 1552 int color_plane, 1553 unsigned int rotation, 1554 unsigned int pitch, 1555 u32 old_offset, u32 new_offset) 1556 { 1557 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1558 unsigned int cpp = fb->format->cpp[color_plane]; 1559 1560 drm_WARN_ON(&dev_priv->drm, new_offset > old_offset); 1561 1562 if (!is_surface_linear(fb, color_plane)) { 1563 unsigned int tile_size, tile_width, tile_height; 1564 unsigned int pitch_tiles; 1565 1566 tile_size = intel_tile_size(dev_priv); 1567 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 1568 1569 if (drm_rotation_90_or_270(rotation)) { 1570 pitch_tiles = pitch / tile_height; 1571 swap(tile_width, tile_height); 1572 } else { 1573 pitch_tiles = pitch / (tile_width * cpp); 1574 } 1575 1576 intel_adjust_tile_offset(x, y, tile_width, tile_height, 1577 tile_size, pitch_tiles, 1578 old_offset, new_offset); 1579 } else { 1580 old_offset += *y * pitch + *x * cpp; 1581 1582 *y = (old_offset - new_offset) / pitch; 1583 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 1584 } 1585 1586 return new_offset; 1587 } 1588 1589 /* 1590 * Adjust the tile offset by moving the difference into 1591 * the x/y offsets. 1592 */ 1593 u32 intel_plane_adjust_aligned_offset(int *x, int *y, 1594 const struct intel_plane_state *state, 1595 int color_plane, 1596 u32 old_offset, u32 new_offset) 1597 { 1598 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, 1599 state->hw.rotation, 1600 state->color_plane[color_plane].stride, 1601 old_offset, new_offset); 1602 } 1603 1604 /* 1605 * Computes the aligned offset to the base tile and adjusts 1606 * x, y. bytes per pixel is assumed to be a power-of-two. 1607 * 1608 * In the 90/270 rotated case, x and y are assumed 1609 * to be already rotated to match the rotated GTT view, and 1610 * pitch is the tile_height aligned framebuffer height. 1611 * 1612 * This function is used when computing the derived information 1613 * under intel_framebuffer, so using any of that information 1614 * here is not allowed. Anything under drm_framebuffer can be 1615 * used. This is why the user has to pass in the pitch since it 1616 * is specified in the rotated orientation. 1617 */ 1618 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, 1619 int *x, int *y, 1620 const struct drm_framebuffer *fb, 1621 int color_plane, 1622 unsigned int pitch, 1623 unsigned int rotation, 1624 u32 alignment) 1625 { 1626 unsigned int cpp = fb->format->cpp[color_plane]; 1627 u32 offset, offset_aligned; 1628 1629 if (!is_surface_linear(fb, color_plane)) { 1630 unsigned int tile_size, tile_width, tile_height; 1631 unsigned int tile_rows, tiles, pitch_tiles; 1632 1633 tile_size = intel_tile_size(dev_priv); 1634 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 1635 1636 if (drm_rotation_90_or_270(rotation)) { 1637 pitch_tiles = pitch / tile_height; 1638 swap(tile_width, tile_height); 1639 } else { 1640 pitch_tiles = pitch / (tile_width * cpp); 1641 } 1642 1643 tile_rows = *y / tile_height; 1644 *y %= tile_height; 1645 1646 tiles = *x / tile_width; 1647 *x %= tile_width; 1648 1649 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 1650 1651 offset_aligned = offset; 1652 if (alignment) 1653 offset_aligned = rounddown(offset_aligned, alignment); 1654 1655 intel_adjust_tile_offset(x, y, tile_width, tile_height, 1656 tile_size, pitch_tiles, 1657 offset, offset_aligned); 1658 } else { 1659 offset = *y * pitch + *x * cpp; 1660 offset_aligned = offset; 1661 if (alignment) { 1662 offset_aligned = rounddown(offset_aligned, alignment); 1663 *y = (offset % alignment) / pitch; 1664 *x = ((offset % alignment) - *y * pitch) / cpp; 1665 } else { 1666 *y = *x = 0; 1667 } 1668 } 1669 1670 return offset_aligned; 1671 } 1672 1673 u32 intel_plane_compute_aligned_offset(int *x, int *y, 1674 const struct intel_plane_state *state, 1675 int color_plane) 1676 { 1677 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane); 1678 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 1679 const struct drm_framebuffer *fb = state->hw.fb; 1680 unsigned int rotation = state->hw.rotation; 1681 int pitch = state->color_plane[color_plane].stride; 1682 u32 alignment; 1683 1684 if (intel_plane->id == PLANE_CURSOR) 1685 alignment = intel_cursor_alignment(dev_priv); 1686 else 1687 alignment = intel_surf_alignment(fb, color_plane); 1688 1689 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane, 1690 pitch, rotation, alignment); 1691 } 1692 1693 /* Convert the fb->offset[] into x/y offsets */ 1694 static int intel_fb_offset_to_xy(int *x, int *y, 1695 const struct drm_framebuffer *fb, 1696 int color_plane) 1697 { 1698 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1699 unsigned int height; 1700 u32 alignment; 1701 1702 if (INTEL_GEN(dev_priv) >= 12 && 1703 is_semiplanar_uv_plane(fb, color_plane)) 1704 alignment = intel_tile_row_size(fb, color_plane); 1705 else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) 1706 alignment = intel_tile_size(dev_priv); 1707 else 1708 alignment = 0; 1709 1710 if (alignment != 0 && fb->offsets[color_plane] % alignment) { 1711 drm_dbg_kms(&dev_priv->drm, 1712 "Misaligned offset 0x%08x for color plane %d\n", 1713 fb->offsets[color_plane], color_plane); 1714 return -EINVAL; 1715 } 1716 1717 height = drm_framebuffer_plane_height(fb->height, fb, color_plane); 1718 height = ALIGN(height, intel_tile_height(fb, color_plane)); 1719 1720 /* Catch potential overflows early */ 1721 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), 1722 fb->offsets[color_plane])) { 1723 drm_dbg_kms(&dev_priv->drm, 1724 "Bad offset 0x%08x or pitch %d for color plane %d\n", 1725 fb->offsets[color_plane], fb->pitches[color_plane], 1726 color_plane); 1727 return -ERANGE; 1728 } 1729 1730 *x = 0; 1731 *y = 0; 1732 1733 intel_adjust_aligned_offset(x, y, 1734 fb, color_plane, DRM_MODE_ROTATE_0, 1735 fb->pitches[color_plane], 1736 fb->offsets[color_plane], 0); 1737 1738 return 0; 1739 } 1740 1741 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 1742 { 1743 switch (fb_modifier) { 1744 case I915_FORMAT_MOD_X_TILED: 1745 return I915_TILING_X; 1746 case I915_FORMAT_MOD_Y_TILED: 1747 case I915_FORMAT_MOD_Y_TILED_CCS: 1748 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 1749 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: 1750 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 1751 return I915_TILING_Y; 1752 default: 1753 return I915_TILING_NONE; 1754 } 1755 } 1756 1757 /* 1758 * From the Sky Lake PRM: 1759 * "The Color Control Surface (CCS) contains the compression status of 1760 * the cache-line pairs. The compression state of the cache-line pair 1761 * is specified by 2 bits in the CCS. Each CCS cache-line represents 1762 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 1763 * cache-line-pairs. CCS is always Y tiled." 1764 * 1765 * Since cache line pairs refers to horizontally adjacent cache lines, 1766 * each cache line in the CCS corresponds to an area of 32x16 cache 1767 * lines on the main surface. Since each pixel is 4 bytes, this gives 1768 * us a ratio of one byte in the CCS for each 8x16 pixels in the 1769 * main surface. 1770 */ 1771 static const struct drm_format_info skl_ccs_formats[] = { 1772 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 1773 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 1774 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 1775 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 1776 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 1777 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 1778 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 1779 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 1780 }; 1781 1782 /* 1783 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the 1784 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles 1785 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of 1786 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in 1787 * the main surface. 1788 */ 1789 static const struct drm_format_info gen12_ccs_formats[] = { 1790 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 1791 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 1792 .hsub = 1, .vsub = 1, }, 1793 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 1794 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 1795 .hsub = 1, .vsub = 1, }, 1796 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 1797 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 1798 .hsub = 1, .vsub = 1, .has_alpha = true }, 1799 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 1800 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 1801 .hsub = 1, .vsub = 1, .has_alpha = true }, 1802 { .format = DRM_FORMAT_YUYV, .num_planes = 2, 1803 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 1804 .hsub = 2, .vsub = 1, .is_yuv = true }, 1805 { .format = DRM_FORMAT_YVYU, .num_planes = 2, 1806 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 1807 .hsub = 2, .vsub = 1, .is_yuv = true }, 1808 { .format = DRM_FORMAT_UYVY, .num_planes = 2, 1809 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 1810 .hsub = 2, .vsub = 1, .is_yuv = true }, 1811 { .format = DRM_FORMAT_VYUY, .num_planes = 2, 1812 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 1813 .hsub = 2, .vsub = 1, .is_yuv = true }, 1814 { .format = DRM_FORMAT_NV12, .num_planes = 4, 1815 .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, 1816 .hsub = 2, .vsub = 2, .is_yuv = true }, 1817 { .format = DRM_FORMAT_P010, .num_planes = 4, 1818 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 1819 .hsub = 2, .vsub = 2, .is_yuv = true }, 1820 { .format = DRM_FORMAT_P012, .num_planes = 4, 1821 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 1822 .hsub = 2, .vsub = 2, .is_yuv = true }, 1823 { .format = DRM_FORMAT_P016, .num_planes = 4, 1824 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 1825 .hsub = 2, .vsub = 2, .is_yuv = true }, 1826 }; 1827 1828 /* 1829 * Same as gen12_ccs_formats[] above, but with additional surface used 1830 * to pass Clear Color information in plane 2 with 64 bits of data. 1831 */ 1832 static const struct drm_format_info gen12_ccs_cc_formats[] = { 1833 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, 1834 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 1835 .hsub = 1, .vsub = 1, }, 1836 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, 1837 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 1838 .hsub = 1, .vsub = 1, }, 1839 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, 1840 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 1841 .hsub = 1, .vsub = 1, .has_alpha = true }, 1842 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, 1843 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 1844 .hsub = 1, .vsub = 1, .has_alpha = true }, 1845 }; 1846 1847 static const struct drm_format_info * 1848 lookup_format_info(const struct drm_format_info formats[], 1849 int num_formats, u32 format) 1850 { 1851 int i; 1852 1853 for (i = 0; i < num_formats; i++) { 1854 if (formats[i].format == format) 1855 return &formats[i]; 1856 } 1857 1858 return NULL; 1859 } 1860 1861 static const struct drm_format_info * 1862 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 1863 { 1864 switch (cmd->modifier[0]) { 1865 case I915_FORMAT_MOD_Y_TILED_CCS: 1866 case I915_FORMAT_MOD_Yf_TILED_CCS: 1867 return lookup_format_info(skl_ccs_formats, 1868 ARRAY_SIZE(skl_ccs_formats), 1869 cmd->pixel_format); 1870 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 1871 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 1872 return lookup_format_info(gen12_ccs_formats, 1873 ARRAY_SIZE(gen12_ccs_formats), 1874 cmd->pixel_format); 1875 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: 1876 return lookup_format_info(gen12_ccs_cc_formats, 1877 ARRAY_SIZE(gen12_ccs_cc_formats), 1878 cmd->pixel_format); 1879 default: 1880 return NULL; 1881 } 1882 } 1883 1884 bool is_ccs_modifier(u64 modifier) 1885 { 1886 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 1887 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || 1888 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || 1889 modifier == I915_FORMAT_MOD_Y_TILED_CCS || 1890 modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 1891 } 1892 1893 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane) 1894 { 1895 return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)], 1896 512) * 64; 1897 } 1898 1899 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 1900 u32 pixel_format, u64 modifier) 1901 { 1902 struct intel_crtc *crtc; 1903 struct intel_plane *plane; 1904 1905 /* 1906 * We assume the primary plane for pipe A has 1907 * the highest stride limits of them all, 1908 * if in case pipe A is disabled, use the first pipe from pipe_mask. 1909 */ 1910 crtc = intel_get_first_crtc(dev_priv); 1911 if (!crtc) 1912 return 0; 1913 1914 plane = to_intel_plane(crtc->base.primary); 1915 1916 return plane->max_stride(plane, pixel_format, modifier, 1917 DRM_MODE_ROTATE_0); 1918 } 1919 1920 static 1921 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, 1922 u32 pixel_format, u64 modifier) 1923 { 1924 /* 1925 * Arbitrary limit for gen4+ chosen to match the 1926 * render engine max stride. 1927 * 1928 * The new CCS hash mode makes remapping impossible 1929 */ 1930 if (!is_ccs_modifier(modifier)) { 1931 if (INTEL_GEN(dev_priv) >= 7) 1932 return 256*1024; 1933 else if (INTEL_GEN(dev_priv) >= 4) 1934 return 128*1024; 1935 } 1936 1937 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 1938 } 1939 1940 static u32 1941 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) 1942 { 1943 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1944 u32 tile_width; 1945 1946 if (is_surface_linear(fb, color_plane)) { 1947 u32 max_stride = intel_plane_fb_max_stride(dev_priv, 1948 fb->format->format, 1949 fb->modifier); 1950 1951 /* 1952 * To make remapping with linear generally feasible 1953 * we need the stride to be page aligned. 1954 */ 1955 if (fb->pitches[color_plane] > max_stride && 1956 !is_ccs_modifier(fb->modifier)) 1957 return intel_tile_size(dev_priv); 1958 else 1959 return 64; 1960 } 1961 1962 tile_width = intel_tile_width_bytes(fb, color_plane); 1963 if (is_ccs_modifier(fb->modifier)) { 1964 /* 1965 * Display WA #0531: skl,bxt,kbl,glk 1966 * 1967 * Render decompression and plane width > 3840 1968 * combined with horizontal panning requires the 1969 * plane stride to be a multiple of 4. We'll just 1970 * require the entire fb to accommodate that to avoid 1971 * potential runtime errors at plane configuration time. 1972 */ 1973 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840) 1974 tile_width *= 4; 1975 /* 1976 * The main surface pitch must be padded to a multiple of four 1977 * tile widths. 1978 */ 1979 else if (INTEL_GEN(dev_priv) >= 12) 1980 tile_width *= 4; 1981 } 1982 return tile_width; 1983 } 1984 1985 bool intel_plane_can_remap(const struct intel_plane_state *plane_state) 1986 { 1987 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1988 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1989 const struct drm_framebuffer *fb = plane_state->hw.fb; 1990 int i; 1991 1992 /* We don't want to deal with remapping with cursors */ 1993 if (plane->id == PLANE_CURSOR) 1994 return false; 1995 1996 /* 1997 * The display engine limits already match/exceed the 1998 * render engine limits, so not much point in remapping. 1999 * Would also need to deal with the fence POT alignment 2000 * and gen2 2KiB GTT tile size. 2001 */ 2002 if (INTEL_GEN(dev_priv) < 4) 2003 return false; 2004 2005 /* 2006 * The new CCS hash mode isn't compatible with remapping as 2007 * the virtual address of the pages affects the compressed data. 2008 */ 2009 if (is_ccs_modifier(fb->modifier)) 2010 return false; 2011 2012 /* Linear needs a page aligned stride for remapping */ 2013 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2014 unsigned int alignment = intel_tile_size(dev_priv) - 1; 2015 2016 for (i = 0; i < fb->format->num_planes; i++) { 2017 if (fb->pitches[i] & alignment) 2018 return false; 2019 } 2020 } 2021 2022 return true; 2023 } 2024 2025 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) 2026 { 2027 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2028 const struct drm_framebuffer *fb = plane_state->hw.fb; 2029 unsigned int rotation = plane_state->hw.rotation; 2030 u32 stride, max_stride; 2031 2032 /* 2033 * No remapping for invisible planes since we don't have 2034 * an actual source viewport to remap. 2035 */ 2036 if (!plane_state->uapi.visible) 2037 return false; 2038 2039 if (!intel_plane_can_remap(plane_state)) 2040 return false; 2041 2042 /* 2043 * FIXME: aux plane limits on gen9+ are 2044 * unclear in Bspec, for now no checking. 2045 */ 2046 stride = intel_fb_pitch(fb, 0, rotation); 2047 max_stride = plane->max_stride(plane, fb->format->format, 2048 fb->modifier, rotation); 2049 2050 return stride > max_stride; 2051 } 2052 2053 static void 2054 intel_fb_plane_get_subsampling(int *hsub, int *vsub, 2055 const struct drm_framebuffer *fb, 2056 int color_plane) 2057 { 2058 int main_plane; 2059 2060 if (color_plane == 0) { 2061 *hsub = 1; 2062 *vsub = 1; 2063 2064 return; 2065 } 2066 2067 /* 2068 * TODO: Deduct the subsampling from the char block for all CCS 2069 * formats and planes. 2070 */ 2071 if (!is_gen12_ccs_plane(fb, color_plane)) { 2072 *hsub = fb->format->hsub; 2073 *vsub = fb->format->vsub; 2074 2075 return; 2076 } 2077 2078 main_plane = ccs_to_main_plane(fb, color_plane); 2079 *hsub = drm_format_info_block_width(fb->format, color_plane) / 2080 drm_format_info_block_width(fb->format, main_plane); 2081 2082 /* 2083 * The min stride check in the core framebuffer_check() function 2084 * assumes that format->hsub applies to every plane except for the 2085 * first plane. That's incorrect for the CCS AUX plane of the first 2086 * plane, but for the above check to pass we must define the block 2087 * width with that subsampling applied to it. Adjust the width here 2088 * accordingly, so we can calculate the actual subsampling factor. 2089 */ 2090 if (main_plane == 0) 2091 *hsub *= fb->format->hsub; 2092 2093 *vsub = 32; 2094 } 2095 static int 2096 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) 2097 { 2098 struct drm_i915_private *i915 = to_i915(fb->dev); 2099 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2100 int main_plane; 2101 int hsub, vsub; 2102 int tile_width, tile_height; 2103 int ccs_x, ccs_y; 2104 int main_x, main_y; 2105 2106 if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane)) 2107 return 0; 2108 2109 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height); 2110 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 2111 2112 tile_width *= hsub; 2113 tile_height *= vsub; 2114 2115 ccs_x = (x * hsub) % tile_width; 2116 ccs_y = (y * vsub) % tile_height; 2117 2118 main_plane = ccs_to_main_plane(fb, ccs_plane); 2119 main_x = intel_fb->normal[main_plane].x % tile_width; 2120 main_y = intel_fb->normal[main_plane].y % tile_height; 2121 2122 /* 2123 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2124 * x/y offsets must match between CCS and the main surface. 2125 */ 2126 if (main_x != ccs_x || main_y != ccs_y) { 2127 drm_dbg_kms(&i915->drm, 2128 "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2129 main_x, main_y, 2130 ccs_x, ccs_y, 2131 intel_fb->normal[main_plane].x, 2132 intel_fb->normal[main_plane].y, 2133 x, y); 2134 return -EINVAL; 2135 } 2136 2137 return 0; 2138 } 2139 2140 static void 2141 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane) 2142 { 2143 int main_plane = is_ccs_plane(fb, color_plane) ? 2144 ccs_to_main_plane(fb, color_plane) : 0; 2145 int main_hsub, main_vsub; 2146 int hsub, vsub; 2147 2148 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane); 2149 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane); 2150 *w = fb->width / main_hsub / hsub; 2151 *h = fb->height / main_vsub / vsub; 2152 } 2153 2154 /* 2155 * Setup the rotated view for an FB plane and return the size the GTT mapping 2156 * requires for this view. 2157 */ 2158 static u32 2159 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info, 2160 u32 gtt_offset_rotated, int x, int y, 2161 unsigned int width, unsigned int height, 2162 unsigned int tile_size, 2163 unsigned int tile_width, unsigned int tile_height, 2164 struct drm_framebuffer *fb) 2165 { 2166 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2167 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2168 unsigned int pitch_tiles; 2169 struct drm_rect r; 2170 2171 /* Y or Yf modifiers required for 90/270 rotation */ 2172 if (fb->modifier != I915_FORMAT_MOD_Y_TILED && 2173 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 2174 return 0; 2175 2176 if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane))) 2177 return 0; 2178 2179 rot_info->plane[plane] = *plane_info; 2180 2181 intel_fb->rotated[plane].pitch = plane_info->height * tile_height; 2182 2183 /* rotate the x/y offsets to match the GTT view */ 2184 drm_rect_init(&r, x, y, width, height); 2185 drm_rect_rotate(&r, 2186 plane_info->width * tile_width, 2187 plane_info->height * tile_height, 2188 DRM_MODE_ROTATE_270); 2189 x = r.x1; 2190 y = r.y1; 2191 2192 /* rotate the tile dimensions to match the GTT view */ 2193 pitch_tiles = intel_fb->rotated[plane].pitch / tile_height; 2194 swap(tile_width, tile_height); 2195 2196 /* 2197 * We only keep the x/y offsets, so push all of the 2198 * gtt offset into the x/y offsets. 2199 */ 2200 intel_adjust_tile_offset(&x, &y, 2201 tile_width, tile_height, 2202 tile_size, pitch_tiles, 2203 gtt_offset_rotated * tile_size, 0); 2204 2205 /* 2206 * First pixel of the framebuffer from 2207 * the start of the rotated gtt mapping. 2208 */ 2209 intel_fb->rotated[plane].x = x; 2210 intel_fb->rotated[plane].y = y; 2211 2212 return plane_info->width * plane_info->height; 2213 } 2214 2215 static int 2216 intel_fill_fb_info(struct drm_i915_private *dev_priv, 2217 struct drm_framebuffer *fb) 2218 { 2219 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2220 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2221 u32 gtt_offset_rotated = 0; 2222 unsigned int max_size = 0; 2223 int i, num_planes = fb->format->num_planes; 2224 unsigned int tile_size = intel_tile_size(dev_priv); 2225 2226 for (i = 0; i < num_planes; i++) { 2227 unsigned int width, height; 2228 unsigned int cpp, size; 2229 u32 offset; 2230 int x, y; 2231 int ret; 2232 2233 /* 2234 * Plane 2 of Render Compression with Clear Color fb modifier 2235 * is consumed by the driver and not passed to DE. Skip the 2236 * arithmetic related to alignment and offset calculation. 2237 */ 2238 if (is_gen12_ccs_cc_plane(fb, i)) { 2239 if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE)) 2240 continue; 2241 else 2242 return -EINVAL; 2243 } 2244 2245 cpp = fb->format->cpp[i]; 2246 intel_fb_plane_dims(&width, &height, fb, i); 2247 2248 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 2249 if (ret) { 2250 drm_dbg_kms(&dev_priv->drm, 2251 "bad fb plane %d offset: 0x%x\n", 2252 i, fb->offsets[i]); 2253 return ret; 2254 } 2255 2256 ret = intel_fb_check_ccs_xy(fb, i, x, y); 2257 if (ret) 2258 return ret; 2259 2260 /* 2261 * The fence (if used) is aligned to the start of the object 2262 * so having the framebuffer wrap around across the edge of the 2263 * fenced region doesn't really work. We have no API to configure 2264 * the fence start offset within the object (nor could we probably 2265 * on gen2/3). So it's just easier if we just require that the 2266 * fb layout agrees with the fence layout. We already check that the 2267 * fb stride matches the fence stride elsewhere. 2268 */ 2269 if (i == 0 && i915_gem_object_is_tiled(obj) && 2270 (x + width) * cpp > fb->pitches[i]) { 2271 drm_dbg_kms(&dev_priv->drm, 2272 "bad fb plane %d offset: 0x%x\n", 2273 i, fb->offsets[i]); 2274 return -EINVAL; 2275 } 2276 2277 /* 2278 * First pixel of the framebuffer from 2279 * the start of the normal gtt mapping. 2280 */ 2281 intel_fb->normal[i].x = x; 2282 intel_fb->normal[i].y = y; 2283 2284 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i, 2285 fb->pitches[i], 2286 DRM_MODE_ROTATE_0, 2287 tile_size); 2288 offset /= tile_size; 2289 2290 if (!is_surface_linear(fb, i)) { 2291 struct intel_remapped_plane_info plane_info; 2292 unsigned int tile_width, tile_height; 2293 2294 intel_tile_dims(fb, i, &tile_width, &tile_height); 2295 2296 plane_info.offset = offset; 2297 plane_info.stride = DIV_ROUND_UP(fb->pitches[i], 2298 tile_width * cpp); 2299 plane_info.width = DIV_ROUND_UP(x + width, tile_width); 2300 plane_info.height = DIV_ROUND_UP(y + height, 2301 tile_height); 2302 2303 /* how many tiles does this plane need */ 2304 size = plane_info.stride * plane_info.height; 2305 /* 2306 * If the plane isn't horizontally tile aligned, 2307 * we need one more tile. 2308 */ 2309 if (x != 0) 2310 size++; 2311 2312 gtt_offset_rotated += 2313 setup_fb_rotation(i, &plane_info, 2314 gtt_offset_rotated, 2315 x, y, width, height, 2316 tile_size, 2317 tile_width, tile_height, 2318 fb); 2319 } else { 2320 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 2321 x * cpp, tile_size); 2322 } 2323 2324 /* how many tiles in total needed in the bo */ 2325 max_size = max(max_size, offset + size); 2326 } 2327 2328 if (mul_u32_u32(max_size, tile_size) > obj->base.size) { 2329 drm_dbg_kms(&dev_priv->drm, 2330 "fb too big for bo (need %llu bytes, have %zu bytes)\n", 2331 mul_u32_u32(max_size, tile_size), obj->base.size); 2332 return -EINVAL; 2333 } 2334 2335 return 0; 2336 } 2337 2338 static void 2339 intel_plane_remap_gtt(struct intel_plane_state *plane_state) 2340 { 2341 struct drm_i915_private *dev_priv = 2342 to_i915(plane_state->uapi.plane->dev); 2343 struct drm_framebuffer *fb = plane_state->hw.fb; 2344 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2345 struct intel_rotation_info *info = &plane_state->view.rotated; 2346 unsigned int rotation = plane_state->hw.rotation; 2347 int i, num_planes = fb->format->num_planes; 2348 unsigned int tile_size = intel_tile_size(dev_priv); 2349 unsigned int src_x, src_y; 2350 unsigned int src_w, src_h; 2351 u32 gtt_offset = 0; 2352 2353 memset(&plane_state->view, 0, sizeof(plane_state->view)); 2354 plane_state->view.type = drm_rotation_90_or_270(rotation) ? 2355 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; 2356 2357 src_x = plane_state->uapi.src.x1 >> 16; 2358 src_y = plane_state->uapi.src.y1 >> 16; 2359 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 2360 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 2361 2362 drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier)); 2363 2364 /* Make src coordinates relative to the viewport */ 2365 drm_rect_translate(&plane_state->uapi.src, 2366 -(src_x << 16), -(src_y << 16)); 2367 2368 /* Rotate src coordinates to match rotated GTT view */ 2369 if (drm_rotation_90_or_270(rotation)) 2370 drm_rect_rotate(&plane_state->uapi.src, 2371 src_w << 16, src_h << 16, 2372 DRM_MODE_ROTATE_270); 2373 2374 for (i = 0; i < num_planes; i++) { 2375 unsigned int hsub = i ? fb->format->hsub : 1; 2376 unsigned int vsub = i ? fb->format->vsub : 1; 2377 unsigned int cpp = fb->format->cpp[i]; 2378 unsigned int tile_width, tile_height; 2379 unsigned int width, height; 2380 unsigned int pitch_tiles; 2381 unsigned int x, y; 2382 u32 offset; 2383 2384 intel_tile_dims(fb, i, &tile_width, &tile_height); 2385 2386 x = src_x / hsub; 2387 y = src_y / vsub; 2388 width = src_w / hsub; 2389 height = src_h / vsub; 2390 2391 /* 2392 * First pixel of the src viewport from the 2393 * start of the normal gtt mapping. 2394 */ 2395 x += intel_fb->normal[i].x; 2396 y += intel_fb->normal[i].y; 2397 2398 offset = intel_compute_aligned_offset(dev_priv, &x, &y, 2399 fb, i, fb->pitches[i], 2400 DRM_MODE_ROTATE_0, tile_size); 2401 offset /= tile_size; 2402 2403 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane)); 2404 info->plane[i].offset = offset; 2405 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], 2406 tile_width * cpp); 2407 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2408 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2409 2410 if (drm_rotation_90_or_270(rotation)) { 2411 struct drm_rect r; 2412 2413 /* rotate the x/y offsets to match the GTT view */ 2414 drm_rect_init(&r, x, y, width, height); 2415 drm_rect_rotate(&r, 2416 info->plane[i].width * tile_width, 2417 info->plane[i].height * tile_height, 2418 DRM_MODE_ROTATE_270); 2419 x = r.x1; 2420 y = r.y1; 2421 2422 pitch_tiles = info->plane[i].height; 2423 plane_state->color_plane[i].stride = pitch_tiles * tile_height; 2424 2425 /* rotate the tile dimensions to match the GTT view */ 2426 swap(tile_width, tile_height); 2427 } else { 2428 pitch_tiles = info->plane[i].width; 2429 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp; 2430 } 2431 2432 /* 2433 * We only keep the x/y offsets, so push all of the 2434 * gtt offset into the x/y offsets. 2435 */ 2436 intel_adjust_tile_offset(&x, &y, 2437 tile_width, tile_height, 2438 tile_size, pitch_tiles, 2439 gtt_offset * tile_size, 0); 2440 2441 gtt_offset += info->plane[i].width * info->plane[i].height; 2442 2443 plane_state->color_plane[i].offset = 0; 2444 plane_state->color_plane[i].x = x; 2445 plane_state->color_plane[i].y = y; 2446 } 2447 } 2448 2449 int 2450 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 2451 { 2452 const struct intel_framebuffer *fb = 2453 to_intel_framebuffer(plane_state->hw.fb); 2454 unsigned int rotation = plane_state->hw.rotation; 2455 int i, num_planes; 2456 2457 if (!fb) 2458 return 0; 2459 2460 num_planes = fb->base.format->num_planes; 2461 2462 if (intel_plane_needs_remap(plane_state)) { 2463 intel_plane_remap_gtt(plane_state); 2464 2465 /* 2466 * Sometimes even remapping can't overcome 2467 * the stride limitations :( Can happen with 2468 * big plane sizes and suitably misaligned 2469 * offsets. 2470 */ 2471 return intel_plane_check_stride(plane_state); 2472 } 2473 2474 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation); 2475 2476 for (i = 0; i < num_planes; i++) { 2477 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation); 2478 plane_state->color_plane[i].offset = 0; 2479 2480 if (drm_rotation_90_or_270(rotation)) { 2481 plane_state->color_plane[i].x = fb->rotated[i].x; 2482 plane_state->color_plane[i].y = fb->rotated[i].y; 2483 } else { 2484 plane_state->color_plane[i].x = fb->normal[i].x; 2485 plane_state->color_plane[i].y = fb->normal[i].y; 2486 } 2487 } 2488 2489 /* Rotate src coordinates to match rotated GTT view */ 2490 if (drm_rotation_90_or_270(rotation)) 2491 drm_rect_rotate(&plane_state->uapi.src, 2492 fb->base.width << 16, fb->base.height << 16, 2493 DRM_MODE_ROTATE_270); 2494 2495 return intel_plane_check_stride(plane_state); 2496 } 2497 2498 static int i9xx_format_to_fourcc(int format) 2499 { 2500 switch (format) { 2501 case DISPPLANE_8BPP: 2502 return DRM_FORMAT_C8; 2503 case DISPPLANE_BGRA555: 2504 return DRM_FORMAT_ARGB1555; 2505 case DISPPLANE_BGRX555: 2506 return DRM_FORMAT_XRGB1555; 2507 case DISPPLANE_BGRX565: 2508 return DRM_FORMAT_RGB565; 2509 default: 2510 case DISPPLANE_BGRX888: 2511 return DRM_FORMAT_XRGB8888; 2512 case DISPPLANE_RGBX888: 2513 return DRM_FORMAT_XBGR8888; 2514 case DISPPLANE_BGRA888: 2515 return DRM_FORMAT_ARGB8888; 2516 case DISPPLANE_RGBA888: 2517 return DRM_FORMAT_ABGR8888; 2518 case DISPPLANE_BGRX101010: 2519 return DRM_FORMAT_XRGB2101010; 2520 case DISPPLANE_RGBX101010: 2521 return DRM_FORMAT_XBGR2101010; 2522 case DISPPLANE_BGRA101010: 2523 return DRM_FORMAT_ARGB2101010; 2524 case DISPPLANE_RGBA101010: 2525 return DRM_FORMAT_ABGR2101010; 2526 case DISPPLANE_RGBX161616: 2527 return DRM_FORMAT_XBGR16161616F; 2528 } 2529 } 2530 2531 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2532 { 2533 switch (format) { 2534 case PLANE_CTL_FORMAT_RGB_565: 2535 return DRM_FORMAT_RGB565; 2536 case PLANE_CTL_FORMAT_NV12: 2537 return DRM_FORMAT_NV12; 2538 case PLANE_CTL_FORMAT_XYUV: 2539 return DRM_FORMAT_XYUV8888; 2540 case PLANE_CTL_FORMAT_P010: 2541 return DRM_FORMAT_P010; 2542 case PLANE_CTL_FORMAT_P012: 2543 return DRM_FORMAT_P012; 2544 case PLANE_CTL_FORMAT_P016: 2545 return DRM_FORMAT_P016; 2546 case PLANE_CTL_FORMAT_Y210: 2547 return DRM_FORMAT_Y210; 2548 case PLANE_CTL_FORMAT_Y212: 2549 return DRM_FORMAT_Y212; 2550 case PLANE_CTL_FORMAT_Y216: 2551 return DRM_FORMAT_Y216; 2552 case PLANE_CTL_FORMAT_Y410: 2553 return DRM_FORMAT_XVYU2101010; 2554 case PLANE_CTL_FORMAT_Y412: 2555 return DRM_FORMAT_XVYU12_16161616; 2556 case PLANE_CTL_FORMAT_Y416: 2557 return DRM_FORMAT_XVYU16161616; 2558 default: 2559 case PLANE_CTL_FORMAT_XRGB_8888: 2560 if (rgb_order) { 2561 if (alpha) 2562 return DRM_FORMAT_ABGR8888; 2563 else 2564 return DRM_FORMAT_XBGR8888; 2565 } else { 2566 if (alpha) 2567 return DRM_FORMAT_ARGB8888; 2568 else 2569 return DRM_FORMAT_XRGB8888; 2570 } 2571 case PLANE_CTL_FORMAT_XRGB_2101010: 2572 if (rgb_order) { 2573 if (alpha) 2574 return DRM_FORMAT_ABGR2101010; 2575 else 2576 return DRM_FORMAT_XBGR2101010; 2577 } else { 2578 if (alpha) 2579 return DRM_FORMAT_ARGB2101010; 2580 else 2581 return DRM_FORMAT_XRGB2101010; 2582 } 2583 case PLANE_CTL_FORMAT_XRGB_16161616F: 2584 if (rgb_order) { 2585 if (alpha) 2586 return DRM_FORMAT_ABGR16161616F; 2587 else 2588 return DRM_FORMAT_XBGR16161616F; 2589 } else { 2590 if (alpha) 2591 return DRM_FORMAT_ARGB16161616F; 2592 else 2593 return DRM_FORMAT_XRGB16161616F; 2594 } 2595 } 2596 } 2597 2598 static struct i915_vma * 2599 initial_plane_vma(struct drm_i915_private *i915, 2600 struct intel_initial_plane_config *plane_config) 2601 { 2602 struct drm_i915_gem_object *obj; 2603 struct i915_vma *vma; 2604 u32 base, size; 2605 2606 if (plane_config->size == 0) 2607 return NULL; 2608 2609 base = round_down(plane_config->base, 2610 I915_GTT_MIN_ALIGNMENT); 2611 size = round_up(plane_config->base + plane_config->size, 2612 I915_GTT_MIN_ALIGNMENT); 2613 size -= base; 2614 2615 /* 2616 * If the FB is too big, just don't use it since fbdev is not very 2617 * important and we should probably use that space with FBC or other 2618 * features. 2619 */ 2620 if (size * 2 > i915->stolen_usable_size) 2621 return NULL; 2622 2623 obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size); 2624 if (IS_ERR(obj)) 2625 return NULL; 2626 2627 /* 2628 * Mark it WT ahead of time to avoid changing the 2629 * cache_level during fbdev initialization. The 2630 * unbind there would get stuck waiting for rcu. 2631 */ 2632 i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ? 2633 I915_CACHE_WT : I915_CACHE_NONE); 2634 2635 switch (plane_config->tiling) { 2636 case I915_TILING_NONE: 2637 break; 2638 case I915_TILING_X: 2639 case I915_TILING_Y: 2640 obj->tiling_and_stride = 2641 plane_config->fb->base.pitches[0] | 2642 plane_config->tiling; 2643 break; 2644 default: 2645 MISSING_CASE(plane_config->tiling); 2646 goto err_obj; 2647 } 2648 2649 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); 2650 if (IS_ERR(vma)) 2651 goto err_obj; 2652 2653 if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base)) 2654 goto err_obj; 2655 2656 if (i915_gem_object_is_tiled(obj) && 2657 !i915_vma_is_map_and_fenceable(vma)) 2658 goto err_obj; 2659 2660 return vma; 2661 2662 err_obj: 2663 i915_gem_object_put(obj); 2664 return NULL; 2665 } 2666 2667 static bool 2668 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2669 struct intel_initial_plane_config *plane_config) 2670 { 2671 struct drm_device *dev = crtc->base.dev; 2672 struct drm_i915_private *dev_priv = to_i915(dev); 2673 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2674 struct drm_framebuffer *fb = &plane_config->fb->base; 2675 struct i915_vma *vma; 2676 2677 switch (fb->modifier) { 2678 case DRM_FORMAT_MOD_LINEAR: 2679 case I915_FORMAT_MOD_X_TILED: 2680 case I915_FORMAT_MOD_Y_TILED: 2681 break; 2682 default: 2683 drm_dbg(&dev_priv->drm, 2684 "Unsupported modifier for initial FB: 0x%llx\n", 2685 fb->modifier); 2686 return false; 2687 } 2688 2689 vma = initial_plane_vma(dev_priv, plane_config); 2690 if (!vma) 2691 return false; 2692 2693 mode_cmd.pixel_format = fb->format->format; 2694 mode_cmd.width = fb->width; 2695 mode_cmd.height = fb->height; 2696 mode_cmd.pitches[0] = fb->pitches[0]; 2697 mode_cmd.modifier[0] = fb->modifier; 2698 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2699 2700 if (intel_framebuffer_init(to_intel_framebuffer(fb), 2701 vma->obj, &mode_cmd)) { 2702 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); 2703 goto err_vma; 2704 } 2705 2706 plane_config->vma = vma; 2707 return true; 2708 2709 err_vma: 2710 i915_vma_put(vma); 2711 return false; 2712 } 2713 2714 static void 2715 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 2716 struct intel_plane_state *plane_state, 2717 bool visible) 2718 { 2719 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2720 2721 plane_state->uapi.visible = visible; 2722 2723 if (visible) 2724 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 2725 else 2726 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 2727 } 2728 2729 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state) 2730 { 2731 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2732 struct drm_plane *plane; 2733 2734 /* 2735 * Active_planes aliases if multiple "primary" or cursor planes 2736 * have been used on the same (or wrong) pipe. plane_mask uses 2737 * unique ids, hence we can use that to reconstruct active_planes. 2738 */ 2739 crtc_state->enabled_planes = 0; 2740 crtc_state->active_planes = 0; 2741 2742 drm_for_each_plane_mask(plane, &dev_priv->drm, 2743 crtc_state->uapi.plane_mask) { 2744 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 2745 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 2746 } 2747 } 2748 2749 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 2750 struct intel_plane *plane) 2751 { 2752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2753 struct intel_crtc_state *crtc_state = 2754 to_intel_crtc_state(crtc->base.state); 2755 struct intel_plane_state *plane_state = 2756 to_intel_plane_state(plane->base.state); 2757 2758 drm_dbg_kms(&dev_priv->drm, 2759 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 2760 plane->base.base.id, plane->base.name, 2761 crtc->base.base.id, crtc->base.name); 2762 2763 intel_set_plane_visible(crtc_state, plane_state, false); 2764 fixup_plane_bitmasks(crtc_state); 2765 crtc_state->data_rate[plane->id] = 0; 2766 crtc_state->min_cdclk[plane->id] = 0; 2767 2768 if (plane->id == PLANE_PRIMARY) 2769 hsw_disable_ips(crtc_state); 2770 2771 /* 2772 * Vblank time updates from the shadow to live plane control register 2773 * are blocked if the memory self-refresh mode is active at that 2774 * moment. So to make sure the plane gets truly disabled, disable 2775 * first the self-refresh mode. The self-refresh enable bit in turn 2776 * will be checked/applied by the HW only at the next frame start 2777 * event which is after the vblank start event, so we need to have a 2778 * wait-for-vblank between disabling the plane and the pipe. 2779 */ 2780 if (HAS_GMCH(dev_priv) && 2781 intel_set_memory_cxsr(dev_priv, false)) 2782 intel_wait_for_vblank(dev_priv, crtc->pipe); 2783 2784 /* 2785 * Gen2 reports pipe underruns whenever all planes are disabled. 2786 * So disable underrun reporting before all the planes get disabled. 2787 */ 2788 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes) 2789 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 2790 2791 intel_disable_plane(plane, crtc_state); 2792 } 2793 2794 static void 2795 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2796 struct intel_initial_plane_config *plane_config) 2797 { 2798 struct drm_device *dev = intel_crtc->base.dev; 2799 struct drm_i915_private *dev_priv = to_i915(dev); 2800 struct drm_crtc *c; 2801 struct drm_plane *primary = intel_crtc->base.primary; 2802 struct drm_plane_state *plane_state = primary->state; 2803 struct intel_plane *intel_plane = to_intel_plane(primary); 2804 struct intel_plane_state *intel_state = 2805 to_intel_plane_state(plane_state); 2806 struct intel_crtc_state *crtc_state = 2807 to_intel_crtc_state(intel_crtc->base.state); 2808 struct drm_framebuffer *fb; 2809 struct i915_vma *vma; 2810 2811 if (!plane_config->fb) 2812 return; 2813 2814 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2815 fb = &plane_config->fb->base; 2816 vma = plane_config->vma; 2817 goto valid_fb; 2818 } 2819 2820 /* 2821 * Failed to alloc the obj, check to see if we should share 2822 * an fb with another CRTC instead 2823 */ 2824 for_each_crtc(dev, c) { 2825 struct intel_plane_state *state; 2826 2827 if (c == &intel_crtc->base) 2828 continue; 2829 2830 if (!to_intel_crtc_state(c->state)->uapi.active) 2831 continue; 2832 2833 state = to_intel_plane_state(c->primary->state); 2834 if (!state->vma) 2835 continue; 2836 2837 if (intel_plane_ggtt_offset(state) == plane_config->base) { 2838 fb = state->hw.fb; 2839 vma = state->vma; 2840 goto valid_fb; 2841 } 2842 } 2843 2844 /* 2845 * We've failed to reconstruct the BIOS FB. Current display state 2846 * indicates that the primary plane is visible, but has a NULL FB, 2847 * which will lead to problems later if we don't fix it up. The 2848 * simplest solution is to just disable the primary plane now and 2849 * pretend the BIOS never had it enabled. 2850 */ 2851 intel_plane_disable_noatomic(intel_crtc, intel_plane); 2852 if (crtc_state->bigjoiner) { 2853 struct intel_crtc *slave = 2854 crtc_state->bigjoiner_linked_crtc; 2855 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary)); 2856 } 2857 2858 return; 2859 2860 valid_fb: 2861 intel_state->hw.rotation = plane_config->rotation; 2862 intel_fill_fb_ggtt_view(&intel_state->view, fb, 2863 intel_state->hw.rotation); 2864 intel_state->color_plane[0].stride = 2865 intel_fb_pitch(fb, 0, intel_state->hw.rotation); 2866 2867 __i915_vma_pin(vma); 2868 intel_state->vma = i915_vma_get(vma); 2869 if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0) 2870 if (vma->fence) 2871 intel_state->flags |= PLANE_HAS_FENCE; 2872 2873 plane_state->src_x = 0; 2874 plane_state->src_y = 0; 2875 plane_state->src_w = fb->width << 16; 2876 plane_state->src_h = fb->height << 16; 2877 2878 plane_state->crtc_x = 0; 2879 plane_state->crtc_y = 0; 2880 plane_state->crtc_w = fb->width; 2881 plane_state->crtc_h = fb->height; 2882 2883 intel_state->uapi.src = drm_plane_state_src(plane_state); 2884 intel_state->uapi.dst = drm_plane_state_dest(plane_state); 2885 2886 if (plane_config->tiling) 2887 dev_priv->preserve_bios_swizzle = true; 2888 2889 plane_state->fb = fb; 2890 drm_framebuffer_get(fb); 2891 2892 plane_state->crtc = &intel_crtc->base; 2893 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state, 2894 intel_crtc); 2895 2896 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 2897 2898 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 2899 &to_intel_frontbuffer(fb)->bits); 2900 } 2901 2902 2903 static bool 2904 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 2905 int main_x, int main_y, u32 main_offset, 2906 int ccs_plane) 2907 { 2908 const struct drm_framebuffer *fb = plane_state->hw.fb; 2909 int aux_x = plane_state->color_plane[ccs_plane].x; 2910 int aux_y = plane_state->color_plane[ccs_plane].y; 2911 u32 aux_offset = plane_state->color_plane[ccs_plane].offset; 2912 u32 alignment = intel_surf_alignment(fb, ccs_plane); 2913 int hsub; 2914 int vsub; 2915 2916 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 2917 while (aux_offset >= main_offset && aux_y <= main_y) { 2918 int x, y; 2919 2920 if (aux_x == main_x && aux_y == main_y) 2921 break; 2922 2923 if (aux_offset == 0) 2924 break; 2925 2926 x = aux_x / hsub; 2927 y = aux_y / vsub; 2928 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, 2929 plane_state, 2930 ccs_plane, 2931 aux_offset, 2932 aux_offset - 2933 alignment); 2934 aux_x = x * hsub + aux_x % hsub; 2935 aux_y = y * vsub + aux_y % vsub; 2936 } 2937 2938 if (aux_x != main_x || aux_y != main_y) 2939 return false; 2940 2941 plane_state->color_plane[ccs_plane].offset = aux_offset; 2942 plane_state->color_plane[ccs_plane].x = aux_x; 2943 plane_state->color_plane[ccs_plane].y = aux_y; 2944 2945 return true; 2946 } 2947 2948 unsigned int 2949 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 2950 { 2951 int x = 0, y = 0; 2952 2953 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 2954 plane_state->color_plane[0].offset, 0); 2955 2956 return y; 2957 } 2958 2959 static int intel_plane_min_width(struct intel_plane *plane, 2960 const struct drm_framebuffer *fb, 2961 int color_plane, 2962 unsigned int rotation) 2963 { 2964 if (plane->min_width) 2965 return plane->min_width(fb, color_plane, rotation); 2966 else 2967 return 1; 2968 } 2969 2970 static int intel_plane_max_width(struct intel_plane *plane, 2971 const struct drm_framebuffer *fb, 2972 int color_plane, 2973 unsigned int rotation) 2974 { 2975 if (plane->max_width) 2976 return plane->max_width(fb, color_plane, rotation); 2977 else 2978 return INT_MAX; 2979 } 2980 2981 static int intel_plane_max_height(struct intel_plane *plane, 2982 const struct drm_framebuffer *fb, 2983 int color_plane, 2984 unsigned int rotation) 2985 { 2986 if (plane->max_height) 2987 return plane->max_height(fb, color_plane, rotation); 2988 else 2989 return INT_MAX; 2990 } 2991 2992 int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, 2993 int *x, int *y, u32 *offset) 2994 { 2995 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2996 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2997 const struct drm_framebuffer *fb = plane_state->hw.fb; 2998 const int aux_plane = intel_main_to_aux_plane(fb, 0); 2999 const u32 aux_offset = plane_state->color_plane[aux_plane].offset; 3000 const u32 alignment = intel_surf_alignment(fb, 0); 3001 const int w = drm_rect_width(&plane_state->uapi.src) >> 16; 3002 3003 intel_add_fb_offsets(x, y, plane_state, 0); 3004 *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0); 3005 if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment))) 3006 return -EINVAL; 3007 3008 /* 3009 * AUX surface offset is specified as the distance from the 3010 * main surface offset, and it must be non-negative. Make 3011 * sure that is what we will get. 3012 */ 3013 if (aux_plane && *offset > aux_offset) 3014 *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0, 3015 *offset, 3016 aux_offset & ~(alignment - 1)); 3017 3018 /* 3019 * When using an X-tiled surface, the plane blows up 3020 * if the x offset + width exceed the stride. 3021 * 3022 * TODO: linear and Y-tiled seem fine, Yf untested, 3023 */ 3024 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3025 int cpp = fb->format->cpp[0]; 3026 3027 while ((*x + w) * cpp > plane_state->color_plane[0].stride) { 3028 if (*offset == 0) { 3029 drm_dbg_kms(&dev_priv->drm, 3030 "Unable to find suitable display surface offset due to X-tiling\n"); 3031 return -EINVAL; 3032 } 3033 3034 *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0, 3035 *offset, 3036 *offset - alignment); 3037 } 3038 } 3039 3040 return 0; 3041 } 3042 3043 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3044 { 3045 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 3046 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3047 const struct drm_framebuffer *fb = plane_state->hw.fb; 3048 const unsigned int rotation = plane_state->hw.rotation; 3049 int x = plane_state->uapi.src.x1 >> 16; 3050 int y = plane_state->uapi.src.y1 >> 16; 3051 const int w = drm_rect_width(&plane_state->uapi.src) >> 16; 3052 const int h = drm_rect_height(&plane_state->uapi.src) >> 16; 3053 const int min_width = intel_plane_min_width(plane, fb, 0, rotation); 3054 const int max_width = intel_plane_max_width(plane, fb, 0, rotation); 3055 const int max_height = intel_plane_max_height(plane, fb, 0, rotation); 3056 const int aux_plane = intel_main_to_aux_plane(fb, 0); 3057 const u32 alignment = intel_surf_alignment(fb, 0); 3058 u32 offset; 3059 int ret; 3060 3061 if (w > max_width || w < min_width || h > max_height) { 3062 drm_dbg_kms(&dev_priv->drm, 3063 "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n", 3064 w, h, min_width, max_width, max_height); 3065 return -EINVAL; 3066 } 3067 3068 ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset); 3069 if (ret) 3070 return ret; 3071 3072 /* 3073 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3074 * they match with the main surface x/y offsets. 3075 */ 3076 if (is_ccs_modifier(fb->modifier)) { 3077 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 3078 offset, aux_plane)) { 3079 if (offset == 0) 3080 break; 3081 3082 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3083 offset, offset - alignment); 3084 } 3085 3086 if (x != plane_state->color_plane[aux_plane].x || 3087 y != plane_state->color_plane[aux_plane].y) { 3088 drm_dbg_kms(&dev_priv->drm, 3089 "Unable to find suitable display surface offset due to CCS\n"); 3090 return -EINVAL; 3091 } 3092 } 3093 3094 drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191); 3095 3096 plane_state->color_plane[0].offset = offset; 3097 plane_state->color_plane[0].x = x; 3098 plane_state->color_plane[0].y = y; 3099 3100 /* 3101 * Put the final coordinates back so that the src 3102 * coordinate checks will see the right values. 3103 */ 3104 drm_rect_translate_to(&plane_state->uapi.src, 3105 x << 16, y << 16); 3106 3107 return 0; 3108 } 3109 3110 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3111 { 3112 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 3113 struct drm_i915_private *i915 = to_i915(plane->base.dev); 3114 const struct drm_framebuffer *fb = plane_state->hw.fb; 3115 unsigned int rotation = plane_state->hw.rotation; 3116 int uv_plane = 1; 3117 int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation); 3118 int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation); 3119 int x = plane_state->uapi.src.x1 >> 17; 3120 int y = plane_state->uapi.src.y1 >> 17; 3121 int w = drm_rect_width(&plane_state->uapi.src) >> 17; 3122 int h = drm_rect_height(&plane_state->uapi.src) >> 17; 3123 u32 offset; 3124 3125 /* FIXME not quite sure how/if these apply to the chroma plane */ 3126 if (w > max_width || h > max_height) { 3127 drm_dbg_kms(&i915->drm, 3128 "CbCr source size %dx%d too big (limit %dx%d)\n", 3129 w, h, max_width, max_height); 3130 return -EINVAL; 3131 } 3132 3133 intel_add_fb_offsets(&x, &y, plane_state, uv_plane); 3134 offset = intel_plane_compute_aligned_offset(&x, &y, 3135 plane_state, uv_plane); 3136 3137 if (is_ccs_modifier(fb->modifier)) { 3138 int ccs_plane = main_to_ccs_plane(fb, uv_plane); 3139 u32 aux_offset = plane_state->color_plane[ccs_plane].offset; 3140 u32 alignment = intel_surf_alignment(fb, uv_plane); 3141 3142 if (offset > aux_offset) 3143 offset = intel_plane_adjust_aligned_offset(&x, &y, 3144 plane_state, 3145 uv_plane, 3146 offset, 3147 aux_offset & ~(alignment - 1)); 3148 3149 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 3150 offset, ccs_plane)) { 3151 if (offset == 0) 3152 break; 3153 3154 offset = intel_plane_adjust_aligned_offset(&x, &y, 3155 plane_state, 3156 uv_plane, 3157 offset, offset - alignment); 3158 } 3159 3160 if (x != plane_state->color_plane[ccs_plane].x || 3161 y != plane_state->color_plane[ccs_plane].y) { 3162 drm_dbg_kms(&i915->drm, 3163 "Unable to find suitable display surface offset due to CCS\n"); 3164 return -EINVAL; 3165 } 3166 } 3167 3168 drm_WARN_ON(&i915->drm, x > 8191 || y > 8191); 3169 3170 plane_state->color_plane[uv_plane].offset = offset; 3171 plane_state->color_plane[uv_plane].x = x; 3172 plane_state->color_plane[uv_plane].y = y; 3173 3174 return 0; 3175 } 3176 3177 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 3178 { 3179 const struct drm_framebuffer *fb = plane_state->hw.fb; 3180 int src_x = plane_state->uapi.src.x1 >> 16; 3181 int src_y = plane_state->uapi.src.y1 >> 16; 3182 u32 offset; 3183 int ccs_plane; 3184 3185 for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) { 3186 int main_hsub, main_vsub; 3187 int hsub, vsub; 3188 int x, y; 3189 3190 if (!is_ccs_plane(fb, ccs_plane) || 3191 is_gen12_ccs_cc_plane(fb, ccs_plane)) 3192 continue; 3193 3194 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, 3195 ccs_to_main_plane(fb, ccs_plane)); 3196 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 3197 3198 hsub *= main_hsub; 3199 vsub *= main_vsub; 3200 x = src_x / hsub; 3201 y = src_y / vsub; 3202 3203 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane); 3204 3205 offset = intel_plane_compute_aligned_offset(&x, &y, 3206 plane_state, 3207 ccs_plane); 3208 3209 plane_state->color_plane[ccs_plane].offset = offset; 3210 plane_state->color_plane[ccs_plane].x = (x * hsub + 3211 src_x % hsub) / 3212 main_hsub; 3213 plane_state->color_plane[ccs_plane].y = (y * vsub + 3214 src_y % vsub) / 3215 main_vsub; 3216 } 3217 3218 return 0; 3219 } 3220 3221 int skl_check_plane_surface(struct intel_plane_state *plane_state) 3222 { 3223 const struct drm_framebuffer *fb = plane_state->hw.fb; 3224 int ret, i; 3225 3226 ret = intel_plane_compute_gtt(plane_state); 3227 if (ret) 3228 return ret; 3229 3230 if (!plane_state->uapi.visible) 3231 return 0; 3232 3233 /* 3234 * Handle the AUX surface first since the main surface setup depends on 3235 * it. 3236 */ 3237 if (is_ccs_modifier(fb->modifier)) { 3238 ret = skl_check_ccs_aux_surface(plane_state); 3239 if (ret) 3240 return ret; 3241 } 3242 3243 if (intel_format_info_is_yuv_semiplanar(fb->format, 3244 fb->modifier)) { 3245 ret = skl_check_nv12_aux_surface(plane_state); 3246 if (ret) 3247 return ret; 3248 } 3249 3250 for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) { 3251 plane_state->color_plane[i].offset = 0; 3252 plane_state->color_plane[i].x = 0; 3253 plane_state->color_plane[i].y = 0; 3254 } 3255 3256 ret = skl_check_main_surface(plane_state); 3257 if (ret) 3258 return ret; 3259 3260 return 0; 3261 } 3262 3263 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3264 { 3265 struct drm_device *dev = intel_crtc->base.dev; 3266 struct drm_i915_private *dev_priv = to_i915(dev); 3267 unsigned long irqflags; 3268 3269 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3270 3271 intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0); 3272 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 3273 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 3274 3275 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3276 } 3277 3278 /* 3279 * This function detaches (aka. unbinds) unused scalers in hardware 3280 */ 3281 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 3282 { 3283 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 3284 const struct intel_crtc_scaler_state *scaler_state = 3285 &crtc_state->scaler_state; 3286 int i; 3287 3288 /* loop through and disable scalers that aren't in use */ 3289 for (i = 0; i < intel_crtc->num_scalers; i++) { 3290 if (!scaler_state->scalers[i].in_use) 3291 skl_detach_scaler(intel_crtc, i); 3292 } 3293 } 3294 3295 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, 3296 int color_plane, unsigned int rotation) 3297 { 3298 /* 3299 * The stride is either expressed as a multiple of 64 bytes chunks for 3300 * linear buffers or in number of tiles for tiled buffers. 3301 */ 3302 if (is_surface_linear(fb, color_plane)) 3303 return 64; 3304 else if (drm_rotation_90_or_270(rotation)) 3305 return intel_tile_height(fb, color_plane); 3306 else 3307 return intel_tile_width_bytes(fb, color_plane); 3308 } 3309 3310 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 3311 int color_plane) 3312 { 3313 const struct drm_framebuffer *fb = plane_state->hw.fb; 3314 unsigned int rotation = plane_state->hw.rotation; 3315 u32 stride = plane_state->color_plane[color_plane].stride; 3316 3317 if (color_plane >= fb->format->num_planes) 3318 return 0; 3319 3320 return stride / skl_plane_stride_mult(fb, color_plane, rotation); 3321 } 3322 3323 static u32 skl_plane_ctl_format(u32 pixel_format) 3324 { 3325 switch (pixel_format) { 3326 case DRM_FORMAT_C8: 3327 return PLANE_CTL_FORMAT_INDEXED; 3328 case DRM_FORMAT_RGB565: 3329 return PLANE_CTL_FORMAT_RGB_565; 3330 case DRM_FORMAT_XBGR8888: 3331 case DRM_FORMAT_ABGR8888: 3332 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 3333 case DRM_FORMAT_XRGB8888: 3334 case DRM_FORMAT_ARGB8888: 3335 return PLANE_CTL_FORMAT_XRGB_8888; 3336 case DRM_FORMAT_XBGR2101010: 3337 case DRM_FORMAT_ABGR2101010: 3338 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; 3339 case DRM_FORMAT_XRGB2101010: 3340 case DRM_FORMAT_ARGB2101010: 3341 return PLANE_CTL_FORMAT_XRGB_2101010; 3342 case DRM_FORMAT_XBGR16161616F: 3343 case DRM_FORMAT_ABGR16161616F: 3344 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; 3345 case DRM_FORMAT_XRGB16161616F: 3346 case DRM_FORMAT_ARGB16161616F: 3347 return PLANE_CTL_FORMAT_XRGB_16161616F; 3348 case DRM_FORMAT_XYUV8888: 3349 return PLANE_CTL_FORMAT_XYUV; 3350 case DRM_FORMAT_YUYV: 3351 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3352 case DRM_FORMAT_YVYU: 3353 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 3354 case DRM_FORMAT_UYVY: 3355 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 3356 case DRM_FORMAT_VYUY: 3357 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 3358 case DRM_FORMAT_NV12: 3359 return PLANE_CTL_FORMAT_NV12; 3360 case DRM_FORMAT_P010: 3361 return PLANE_CTL_FORMAT_P010; 3362 case DRM_FORMAT_P012: 3363 return PLANE_CTL_FORMAT_P012; 3364 case DRM_FORMAT_P016: 3365 return PLANE_CTL_FORMAT_P016; 3366 case DRM_FORMAT_Y210: 3367 return PLANE_CTL_FORMAT_Y210; 3368 case DRM_FORMAT_Y212: 3369 return PLANE_CTL_FORMAT_Y212; 3370 case DRM_FORMAT_Y216: 3371 return PLANE_CTL_FORMAT_Y216; 3372 case DRM_FORMAT_XVYU2101010: 3373 return PLANE_CTL_FORMAT_Y410; 3374 case DRM_FORMAT_XVYU12_16161616: 3375 return PLANE_CTL_FORMAT_Y412; 3376 case DRM_FORMAT_XVYU16161616: 3377 return PLANE_CTL_FORMAT_Y416; 3378 default: 3379 MISSING_CASE(pixel_format); 3380 } 3381 3382 return 0; 3383 } 3384 3385 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) 3386 { 3387 if (!plane_state->hw.fb->format->has_alpha) 3388 return PLANE_CTL_ALPHA_DISABLE; 3389 3390 switch (plane_state->hw.pixel_blend_mode) { 3391 case DRM_MODE_BLEND_PIXEL_NONE: 3392 return PLANE_CTL_ALPHA_DISABLE; 3393 case DRM_MODE_BLEND_PREMULTI: 3394 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3395 case DRM_MODE_BLEND_COVERAGE: 3396 return PLANE_CTL_ALPHA_HW_PREMULTIPLY; 3397 default: 3398 MISSING_CASE(plane_state->hw.pixel_blend_mode); 3399 return PLANE_CTL_ALPHA_DISABLE; 3400 } 3401 } 3402 3403 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) 3404 { 3405 if (!plane_state->hw.fb->format->has_alpha) 3406 return PLANE_COLOR_ALPHA_DISABLE; 3407 3408 switch (plane_state->hw.pixel_blend_mode) { 3409 case DRM_MODE_BLEND_PIXEL_NONE: 3410 return PLANE_COLOR_ALPHA_DISABLE; 3411 case DRM_MODE_BLEND_PREMULTI: 3412 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 3413 case DRM_MODE_BLEND_COVERAGE: 3414 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; 3415 default: 3416 MISSING_CASE(plane_state->hw.pixel_blend_mode); 3417 return PLANE_COLOR_ALPHA_DISABLE; 3418 } 3419 } 3420 3421 static u32 skl_plane_ctl_tiling(u64 fb_modifier) 3422 { 3423 switch (fb_modifier) { 3424 case DRM_FORMAT_MOD_LINEAR: 3425 break; 3426 case I915_FORMAT_MOD_X_TILED: 3427 return PLANE_CTL_TILED_X; 3428 case I915_FORMAT_MOD_Y_TILED: 3429 return PLANE_CTL_TILED_Y; 3430 case I915_FORMAT_MOD_Y_TILED_CCS: 3431 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: 3432 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 3433 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 3434 return PLANE_CTL_TILED_Y | 3435 PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | 3436 PLANE_CTL_CLEAR_COLOR_DISABLE; 3437 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 3438 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; 3439 case I915_FORMAT_MOD_Yf_TILED: 3440 return PLANE_CTL_TILED_YF; 3441 case I915_FORMAT_MOD_Yf_TILED_CCS: 3442 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 3443 default: 3444 MISSING_CASE(fb_modifier); 3445 } 3446 3447 return 0; 3448 } 3449 3450 static u32 skl_plane_ctl_rotate(unsigned int rotate) 3451 { 3452 switch (rotate) { 3453 case DRM_MODE_ROTATE_0: 3454 break; 3455 /* 3456 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 3457 * while i915 HW rotation is clockwise, thats why this swapping. 3458 */ 3459 case DRM_MODE_ROTATE_90: 3460 return PLANE_CTL_ROTATE_270; 3461 case DRM_MODE_ROTATE_180: 3462 return PLANE_CTL_ROTATE_180; 3463 case DRM_MODE_ROTATE_270: 3464 return PLANE_CTL_ROTATE_90; 3465 default: 3466 MISSING_CASE(rotate); 3467 } 3468 3469 return 0; 3470 } 3471 3472 static u32 cnl_plane_ctl_flip(unsigned int reflect) 3473 { 3474 switch (reflect) { 3475 case 0: 3476 break; 3477 case DRM_MODE_REFLECT_X: 3478 return PLANE_CTL_FLIP_HORIZONTAL; 3479 case DRM_MODE_REFLECT_Y: 3480 default: 3481 MISSING_CASE(reflect); 3482 } 3483 3484 return 0; 3485 } 3486 3487 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 3488 { 3489 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 3490 u32 plane_ctl = 0; 3491 3492 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 3493 return plane_ctl; 3494 3495 if (crtc_state->gamma_enable) 3496 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; 3497 3498 if (crtc_state->csc_enable) 3499 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 3500 3501 return plane_ctl; 3502 } 3503 3504 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 3505 const struct intel_plane_state *plane_state) 3506 { 3507 struct drm_i915_private *dev_priv = 3508 to_i915(plane_state->uapi.plane->dev); 3509 const struct drm_framebuffer *fb = plane_state->hw.fb; 3510 unsigned int rotation = plane_state->hw.rotation; 3511 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 3512 u32 plane_ctl; 3513 3514 plane_ctl = PLANE_CTL_ENABLE; 3515 3516 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 3517 plane_ctl |= skl_plane_ctl_alpha(plane_state); 3518 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 3519 3520 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) 3521 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; 3522 3523 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 3524 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; 3525 } 3526 3527 plane_ctl |= skl_plane_ctl_format(fb->format->format); 3528 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 3529 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); 3530 3531 if (INTEL_GEN(dev_priv) >= 10) 3532 plane_ctl |= cnl_plane_ctl_flip(rotation & 3533 DRM_MODE_REFLECT_MASK); 3534 3535 if (key->flags & I915_SET_COLORKEY_DESTINATION) 3536 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 3537 else if (key->flags & I915_SET_COLORKEY_SOURCE) 3538 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 3539 3540 return plane_ctl; 3541 } 3542 3543 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) 3544 { 3545 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 3546 u32 plane_color_ctl = 0; 3547 3548 if (INTEL_GEN(dev_priv) >= 11) 3549 return plane_color_ctl; 3550 3551 if (crtc_state->gamma_enable) 3552 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; 3553 3554 if (crtc_state->csc_enable) 3555 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 3556 3557 return plane_color_ctl; 3558 } 3559 3560 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 3561 const struct intel_plane_state *plane_state) 3562 { 3563 struct drm_i915_private *dev_priv = 3564 to_i915(plane_state->uapi.plane->dev); 3565 const struct drm_framebuffer *fb = plane_state->hw.fb; 3566 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 3567 u32 plane_color_ctl = 0; 3568 3569 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 3570 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 3571 3572 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { 3573 switch (plane_state->hw.color_encoding) { 3574 case DRM_COLOR_YCBCR_BT709: 3575 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 3576 break; 3577 case DRM_COLOR_YCBCR_BT2020: 3578 plane_color_ctl |= 3579 PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020; 3580 break; 3581 default: 3582 plane_color_ctl |= 3583 PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601; 3584 } 3585 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 3586 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 3587 } else if (fb->format->is_yuv) { 3588 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; 3589 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 3590 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 3591 } 3592 3593 return plane_color_ctl; 3594 } 3595 3596 static int 3597 __intel_display_resume(struct drm_device *dev, 3598 struct drm_atomic_state *state, 3599 struct drm_modeset_acquire_ctx *ctx) 3600 { 3601 struct drm_crtc_state *crtc_state; 3602 struct drm_crtc *crtc; 3603 int i, ret; 3604 3605 intel_modeset_setup_hw_state(dev, ctx); 3606 intel_vga_redisable(to_i915(dev)); 3607 3608 if (!state) 3609 return 0; 3610 3611 /* 3612 * We've duplicated the state, pointers to the old state are invalid. 3613 * 3614 * Don't attempt to use the old state until we commit the duplicated state. 3615 */ 3616 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 3617 /* 3618 * Force recalculation even if we restore 3619 * current state. With fast modeset this may not result 3620 * in a modeset when the state is compatible. 3621 */ 3622 crtc_state->mode_changed = true; 3623 } 3624 3625 /* ignore any reset values/BIOS leftovers in the WM registers */ 3626 if (!HAS_GMCH(to_i915(dev))) 3627 to_intel_atomic_state(state)->skip_intermediate_wm = true; 3628 3629 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 3630 3631 drm_WARN_ON(dev, ret == -EDEADLK); 3632 return ret; 3633 } 3634 3635 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 3636 { 3637 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 3638 intel_has_gpu_reset(&dev_priv->gt)); 3639 } 3640 3641 void intel_display_prepare_reset(struct drm_i915_private *dev_priv) 3642 { 3643 struct drm_device *dev = &dev_priv->drm; 3644 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3645 struct drm_atomic_state *state; 3646 int ret; 3647 3648 if (!HAS_DISPLAY(dev_priv)) 3649 return; 3650 3651 /* reset doesn't touch the display */ 3652 if (!dev_priv->params.force_reset_modeset_test && 3653 !gpu_reset_clobbers_display(dev_priv)) 3654 return; 3655 3656 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 3657 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 3658 smp_mb__after_atomic(); 3659 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); 3660 3661 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 3662 drm_dbg_kms(&dev_priv->drm, 3663 "Modeset potentially stuck, unbreaking through wedging\n"); 3664 intel_gt_set_wedged(&dev_priv->gt); 3665 } 3666 3667 /* 3668 * Need mode_config.mutex so that we don't 3669 * trample ongoing ->detect() and whatnot. 3670 */ 3671 mutex_lock(&dev->mode_config.mutex); 3672 drm_modeset_acquire_init(ctx, 0); 3673 while (1) { 3674 ret = drm_modeset_lock_all_ctx(dev, ctx); 3675 if (ret != -EDEADLK) 3676 break; 3677 3678 drm_modeset_backoff(ctx); 3679 } 3680 /* 3681 * Disabling the crtcs gracefully seems nicer. Also the 3682 * g33 docs say we should at least disable all the planes. 3683 */ 3684 state = drm_atomic_helper_duplicate_state(dev, ctx); 3685 if (IS_ERR(state)) { 3686 ret = PTR_ERR(state); 3687 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n", 3688 ret); 3689 return; 3690 } 3691 3692 ret = drm_atomic_helper_disable_all(dev, ctx); 3693 if (ret) { 3694 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 3695 ret); 3696 drm_atomic_state_put(state); 3697 return; 3698 } 3699 3700 dev_priv->modeset_restore_state = state; 3701 state->acquire_ctx = ctx; 3702 } 3703 3704 void intel_display_finish_reset(struct drm_i915_private *dev_priv) 3705 { 3706 struct drm_device *dev = &dev_priv->drm; 3707 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3708 struct drm_atomic_state *state; 3709 int ret; 3710 3711 if (!HAS_DISPLAY(dev_priv)) 3712 return; 3713 3714 /* reset doesn't touch the display */ 3715 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 3716 return; 3717 3718 state = fetch_and_zero(&dev_priv->modeset_restore_state); 3719 if (!state) 3720 goto unlock; 3721 3722 /* reset doesn't touch the display */ 3723 if (!gpu_reset_clobbers_display(dev_priv)) { 3724 /* for testing only restore the display */ 3725 ret = __intel_display_resume(dev, state, ctx); 3726 if (ret) 3727 drm_err(&dev_priv->drm, 3728 "Restoring old state failed with %i\n", ret); 3729 } else { 3730 /* 3731 * The display has been reset as well, 3732 * so need a full re-initialization. 3733 */ 3734 intel_pps_unlock_regs_wa(dev_priv); 3735 intel_modeset_init_hw(dev_priv); 3736 intel_init_clock_gating(dev_priv); 3737 intel_hpd_init(dev_priv); 3738 3739 ret = __intel_display_resume(dev, state, ctx); 3740 if (ret) 3741 drm_err(&dev_priv->drm, 3742 "Restoring old state failed with %i\n", ret); 3743 3744 intel_hpd_poll_disable(dev_priv); 3745 } 3746 3747 drm_atomic_state_put(state); 3748 unlock: 3749 drm_modeset_drop_locks(ctx); 3750 drm_modeset_acquire_fini(ctx); 3751 mutex_unlock(&dev->mode_config.mutex); 3752 3753 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 3754 } 3755 3756 static void icl_set_pipe_chicken(struct intel_crtc *crtc) 3757 { 3758 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3759 enum pipe pipe = crtc->pipe; 3760 u32 tmp; 3761 3762 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 3763 3764 /* 3765 * Display WA #1153: icl 3766 * enable hardware to bypass the alpha math 3767 * and rounding for per-pixel values 00 and 0xff 3768 */ 3769 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 3770 /* 3771 * Display WA # 1605353570: icl 3772 * Set the pixel rounding bit to 1 for allowing 3773 * passthrough of Frame buffer pixels unmodified 3774 * across pipe 3775 */ 3776 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 3777 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 3778 } 3779 3780 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 3781 { 3782 struct drm_crtc *crtc; 3783 bool cleanup_done; 3784 3785 drm_for_each_crtc(crtc, &dev_priv->drm) { 3786 struct drm_crtc_commit *commit; 3787 spin_lock(&crtc->commit_lock); 3788 commit = list_first_entry_or_null(&crtc->commit_list, 3789 struct drm_crtc_commit, commit_entry); 3790 cleanup_done = commit ? 3791 try_wait_for_completion(&commit->cleanup_done) : true; 3792 spin_unlock(&crtc->commit_lock); 3793 3794 if (cleanup_done) 3795 continue; 3796 3797 drm_crtc_wait_one_vblank(crtc); 3798 3799 return true; 3800 } 3801 3802 return false; 3803 } 3804 3805 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 3806 { 3807 u32 temp; 3808 3809 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); 3810 3811 mutex_lock(&dev_priv->sb_lock); 3812 3813 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3814 temp |= SBI_SSCCTL_DISABLE; 3815 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3816 3817 mutex_unlock(&dev_priv->sb_lock); 3818 } 3819 3820 /* Program iCLKIP clock to the desired frequency */ 3821 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 3822 { 3823 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3824 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3825 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 3826 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3827 u32 temp; 3828 3829 lpt_disable_iclkip(dev_priv); 3830 3831 /* The iCLK virtual clock root frequency is in MHz, 3832 * but the adjusted_mode->crtc_clock in in KHz. To get the 3833 * divisors, it is necessary to divide one by another, so we 3834 * convert the virtual clock precision to KHz here for higher 3835 * precision. 3836 */ 3837 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 3838 u32 iclk_virtual_root_freq = 172800 * 1000; 3839 u32 iclk_pi_range = 64; 3840 u32 desired_divisor; 3841 3842 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 3843 clock << auxdiv); 3844 divsel = (desired_divisor / iclk_pi_range) - 2; 3845 phaseinc = desired_divisor % iclk_pi_range; 3846 3847 /* 3848 * Near 20MHz is a corner case which is 3849 * out of range for the 7-bit divisor 3850 */ 3851 if (divsel <= 0x7f) 3852 break; 3853 } 3854 3855 /* This should not happen with any sane values */ 3856 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3857 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3858 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & 3859 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3860 3861 drm_dbg_kms(&dev_priv->drm, 3862 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3863 clock, auxdiv, divsel, phasedir, phaseinc); 3864 3865 mutex_lock(&dev_priv->sb_lock); 3866 3867 /* Program SSCDIVINTPHASE6 */ 3868 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 3869 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3870 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3871 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3872 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3873 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3874 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3875 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 3876 3877 /* Program SSCAUXDIV */ 3878 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 3879 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3880 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3881 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 3882 3883 /* Enable modulator and associated divider */ 3884 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3885 temp &= ~SBI_SSCCTL_DISABLE; 3886 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3887 3888 mutex_unlock(&dev_priv->sb_lock); 3889 3890 /* Wait for initialization time */ 3891 udelay(24); 3892 3893 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); 3894 } 3895 3896 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 3897 { 3898 u32 divsel, phaseinc, auxdiv; 3899 u32 iclk_virtual_root_freq = 172800 * 1000; 3900 u32 iclk_pi_range = 64; 3901 u32 desired_divisor; 3902 u32 temp; 3903 3904 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 3905 return 0; 3906 3907 mutex_lock(&dev_priv->sb_lock); 3908 3909 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3910 if (temp & SBI_SSCCTL_DISABLE) { 3911 mutex_unlock(&dev_priv->sb_lock); 3912 return 0; 3913 } 3914 3915 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 3916 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 3917 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 3918 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 3919 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 3920 3921 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 3922 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 3923 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 3924 3925 mutex_unlock(&dev_priv->sb_lock); 3926 3927 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 3928 3929 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 3930 desired_divisor << auxdiv); 3931 } 3932 3933 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 3934 enum pipe pch_transcoder) 3935 { 3936 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3937 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3938 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3939 3940 intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), 3941 intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); 3942 intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), 3943 intel_de_read(dev_priv, HBLANK(cpu_transcoder))); 3944 intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), 3945 intel_de_read(dev_priv, HSYNC(cpu_transcoder))); 3946 3947 intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), 3948 intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 3949 intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), 3950 intel_de_read(dev_priv, VBLANK(cpu_transcoder))); 3951 intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), 3952 intel_de_read(dev_priv, VSYNC(cpu_transcoder))); 3953 intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), 3954 intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); 3955 } 3956 3957 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 3958 { 3959 u32 temp; 3960 3961 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1); 3962 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 3963 return; 3964 3965 drm_WARN_ON(&dev_priv->drm, 3966 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & 3967 FDI_RX_ENABLE); 3968 drm_WARN_ON(&dev_priv->drm, 3969 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & 3970 FDI_RX_ENABLE); 3971 3972 temp &= ~FDI_BC_BIFURCATION_SELECT; 3973 if (enable) 3974 temp |= FDI_BC_BIFURCATION_SELECT; 3975 3976 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n", 3977 enable ? "en" : "dis"); 3978 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp); 3979 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1); 3980 } 3981 3982 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 3983 { 3984 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3985 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3986 3987 switch (crtc->pipe) { 3988 case PIPE_A: 3989 break; 3990 case PIPE_B: 3991 if (crtc_state->fdi_lanes > 2) 3992 cpt_set_fdi_bc_bifurcation(dev_priv, false); 3993 else 3994 cpt_set_fdi_bc_bifurcation(dev_priv, true); 3995 3996 break; 3997 case PIPE_C: 3998 cpt_set_fdi_bc_bifurcation(dev_priv, true); 3999 4000 break; 4001 default: 4002 BUG(); 4003 } 4004 } 4005 4006 /* 4007 * Finds the encoder associated with the given CRTC. This can only be 4008 * used when we know that the CRTC isn't feeding multiple encoders! 4009 */ 4010 struct intel_encoder * 4011 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 4012 const struct intel_crtc_state *crtc_state) 4013 { 4014 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4015 const struct drm_connector_state *connector_state; 4016 const struct drm_connector *connector; 4017 struct intel_encoder *encoder = NULL; 4018 int num_encoders = 0; 4019 int i; 4020 4021 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4022 if (connector_state->crtc != &crtc->base) 4023 continue; 4024 4025 encoder = to_intel_encoder(connector_state->best_encoder); 4026 num_encoders++; 4027 } 4028 4029 drm_WARN(encoder->base.dev, num_encoders != 1, 4030 "%d encoders for pipe %c\n", 4031 num_encoders, pipe_name(crtc->pipe)); 4032 4033 return encoder; 4034 } 4035 4036 /* 4037 * Enable PCH resources required for PCH ports: 4038 * - PCH PLLs 4039 * - FDI training & RX/TX 4040 * - update transcoder timings 4041 * - DP transcoding bits 4042 * - transcoder 4043 */ 4044 static void ilk_pch_enable(const struct intel_atomic_state *state, 4045 const struct intel_crtc_state *crtc_state) 4046 { 4047 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4048 struct drm_device *dev = crtc->base.dev; 4049 struct drm_i915_private *dev_priv = to_i915(dev); 4050 enum pipe pipe = crtc->pipe; 4051 u32 temp; 4052 4053 assert_pch_transcoder_disabled(dev_priv, pipe); 4054 4055 if (IS_IVYBRIDGE(dev_priv)) 4056 ivb_update_fdi_bc_bifurcation(crtc_state); 4057 4058 /* Write the TU size bits before fdi link training, so that error 4059 * detection works. */ 4060 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), 4061 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4062 4063 /* For PCH output, training FDI link */ 4064 dev_priv->display.fdi_link_train(crtc, crtc_state); 4065 4066 /* We need to program the right clock selection before writing the pixel 4067 * mutliplier into the DPLL. */ 4068 if (HAS_PCH_CPT(dev_priv)) { 4069 u32 sel; 4070 4071 temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 4072 temp |= TRANS_DPLL_ENABLE(pipe); 4073 sel = TRANS_DPLLB_SEL(pipe); 4074 if (crtc_state->shared_dpll == 4075 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 4076 temp |= sel; 4077 else 4078 temp &= ~sel; 4079 intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 4080 } 4081 4082 /* XXX: pch pll's can be enabled any time before we enable the PCH 4083 * transcoder, and we actually should do this to not upset any PCH 4084 * transcoder that already use the clock when we share it. 4085 * 4086 * Note that enable_shared_dpll tries to do the right thing, but 4087 * get_shared_dpll unconditionally resets the pll - we need that to have 4088 * the right LVDS enable sequence. */ 4089 intel_enable_shared_dpll(crtc_state); 4090 4091 /* set transcoder timing, panel must allow it */ 4092 assert_panel_unlocked(dev_priv, pipe); 4093 ilk_pch_transcoder_set_timings(crtc_state, pipe); 4094 4095 intel_fdi_normal_train(crtc); 4096 4097 /* For PCH DP, enable TRANS_DP_CTL */ 4098 if (HAS_PCH_CPT(dev_priv) && 4099 intel_crtc_has_dp_encoder(crtc_state)) { 4100 const struct drm_display_mode *adjusted_mode = 4101 &crtc_state->hw.adjusted_mode; 4102 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4103 i915_reg_t reg = TRANS_DP_CTL(pipe); 4104 enum port port; 4105 4106 temp = intel_de_read(dev_priv, reg); 4107 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4108 TRANS_DP_SYNC_MASK | 4109 TRANS_DP_BPC_MASK); 4110 temp |= TRANS_DP_OUTPUT_ENABLE; 4111 temp |= bpc << 9; /* same format but at 11:9 */ 4112 4113 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 4114 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4115 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 4116 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4117 4118 port = intel_get_crtc_new_encoder(state, crtc_state)->port; 4119 drm_WARN_ON(dev, port < PORT_B || port > PORT_D); 4120 temp |= TRANS_DP_PORT_SEL(port); 4121 4122 intel_de_write(dev_priv, reg, temp); 4123 } 4124 4125 ilk_enable_pch_transcoder(crtc_state); 4126 } 4127 4128 void lpt_pch_enable(const struct intel_crtc_state *crtc_state) 4129 { 4130 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4131 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4132 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 4133 4134 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 4135 4136 lpt_program_iclkip(crtc_state); 4137 4138 /* Set transcoder timing. */ 4139 ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); 4140 4141 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4142 } 4143 4144 static void cpt_verify_modeset(struct drm_i915_private *dev_priv, 4145 enum pipe pipe) 4146 { 4147 i915_reg_t dslreg = PIPEDSL(pipe); 4148 u32 temp; 4149 4150 temp = intel_de_read(dev_priv, dslreg); 4151 udelay(500); 4152 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) { 4153 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) 4154 drm_err(&dev_priv->drm, 4155 "mode set failed: pipe %c stuck\n", 4156 pipe_name(pipe)); 4157 } 4158 } 4159 4160 /* 4161 * The hardware phase 0.0 refers to the center of the pixel. 4162 * We want to start from the top/left edge which is phase 4163 * -0.5. That matches how the hardware calculates the scaling 4164 * factors (from top-left of the first pixel to bottom-right 4165 * of the last pixel, as opposed to the pixel centers). 4166 * 4167 * For 4:2:0 subsampled chroma planes we obviously have to 4168 * adjust that so that the chroma sample position lands in 4169 * the right spot. 4170 * 4171 * Note that for packed YCbCr 4:2:2 formats there is no way to 4172 * control chroma siting. The hardware simply replicates the 4173 * chroma samples for both of the luma samples, and thus we don't 4174 * actually get the expected MPEG2 chroma siting convention :( 4175 * The same behaviour is observed on pre-SKL platforms as well. 4176 * 4177 * Theory behind the formula (note that we ignore sub-pixel 4178 * source coordinates): 4179 * s = source sample position 4180 * d = destination sample position 4181 * 4182 * Downscaling 4:1: 4183 * -0.5 4184 * | 0.0 4185 * | | 1.5 (initial phase) 4186 * | | | 4187 * v v v 4188 * | s | s | s | s | 4189 * | d | 4190 * 4191 * Upscaling 1:4: 4192 * -0.5 4193 * | -0.375 (initial phase) 4194 * | | 0.0 4195 * | | | 4196 * v v v 4197 * | s | 4198 * | d | d | d | d | 4199 */ 4200 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 4201 { 4202 int phase = -0x8000; 4203 u16 trip = 0; 4204 4205 if (chroma_cosited) 4206 phase += (sub - 1) * 0x8000 / sub; 4207 4208 phase += scale / (2 * sub); 4209 4210 /* 4211 * Hardware initial phase limited to [-0.5:1.5]. 4212 * Since the max hardware scale factor is 3.0, we 4213 * should never actually excdeed 1.0 here. 4214 */ 4215 WARN_ON(phase < -0x8000 || phase > 0x18000); 4216 4217 if (phase < 0) 4218 phase = 0x10000 + phase; 4219 else 4220 trip = PS_PHASE_TRIP; 4221 4222 return ((phase >> 2) & PS_PHASE_MASK) | trip; 4223 } 4224 4225 #define SKL_MIN_SRC_W 8 4226 #define SKL_MAX_SRC_W 4096 4227 #define SKL_MIN_SRC_H 8 4228 #define SKL_MAX_SRC_H 4096 4229 #define SKL_MIN_DST_W 8 4230 #define SKL_MAX_DST_W 4096 4231 #define SKL_MIN_DST_H 8 4232 #define SKL_MAX_DST_H 4096 4233 #define ICL_MAX_SRC_W 5120 4234 #define ICL_MAX_SRC_H 4096 4235 #define ICL_MAX_DST_W 5120 4236 #define ICL_MAX_DST_H 4096 4237 #define SKL_MIN_YUV_420_SRC_W 16 4238 #define SKL_MIN_YUV_420_SRC_H 16 4239 4240 static int 4241 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4242 unsigned int scaler_user, int *scaler_id, 4243 int src_w, int src_h, int dst_w, int dst_h, 4244 const struct drm_format_info *format, 4245 u64 modifier, bool need_scaler) 4246 { 4247 struct intel_crtc_scaler_state *scaler_state = 4248 &crtc_state->scaler_state; 4249 struct intel_crtc *intel_crtc = 4250 to_intel_crtc(crtc_state->uapi.crtc); 4251 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4252 const struct drm_display_mode *adjusted_mode = 4253 &crtc_state->hw.adjusted_mode; 4254 4255 /* 4256 * Src coordinates are already rotated by 270 degrees for 4257 * the 90/270 degree plane rotation cases (to match the 4258 * GTT mapping), hence no need to account for rotation here. 4259 */ 4260 if (src_w != dst_w || src_h != dst_h) 4261 need_scaler = true; 4262 4263 /* 4264 * Scaling/fitting not supported in IF-ID mode in GEN9+ 4265 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 4266 * Once NV12 is enabled, handle it here while allocating scaler 4267 * for NV12. 4268 */ 4269 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable && 4270 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4271 drm_dbg_kms(&dev_priv->drm, 4272 "Pipe/Plane scaling not supported with IF-ID mode\n"); 4273 return -EINVAL; 4274 } 4275 4276 /* 4277 * if plane is being disabled or scaler is no more required or force detach 4278 * - free scaler binded to this plane/crtc 4279 * - in order to do this, update crtc->scaler_usage 4280 * 4281 * Here scaler state in crtc_state is set free so that 4282 * scaler can be assigned to other user. Actual register 4283 * update to free the scaler is done in plane/panel-fit programming. 4284 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4285 */ 4286 if (force_detach || !need_scaler) { 4287 if (*scaler_id >= 0) { 4288 scaler_state->scaler_users &= ~(1 << scaler_user); 4289 scaler_state->scalers[*scaler_id].in_use = 0; 4290 4291 drm_dbg_kms(&dev_priv->drm, 4292 "scaler_user index %u.%u: " 4293 "Staged freeing scaler id %d scaler_users = 0x%x\n", 4294 intel_crtc->pipe, scaler_user, *scaler_id, 4295 scaler_state->scaler_users); 4296 *scaler_id = -1; 4297 } 4298 return 0; 4299 } 4300 4301 if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && 4302 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 4303 drm_dbg_kms(&dev_priv->drm, 4304 "Planar YUV: src dimensions not met\n"); 4305 return -EINVAL; 4306 } 4307 4308 /* range checks */ 4309 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4310 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4311 (INTEL_GEN(dev_priv) >= 11 && 4312 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 4313 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 4314 (INTEL_GEN(dev_priv) < 11 && 4315 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4316 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 4317 drm_dbg_kms(&dev_priv->drm, 4318 "scaler_user index %u.%u: src %ux%u dst %ux%u " 4319 "size is out of scaler range\n", 4320 intel_crtc->pipe, scaler_user, src_w, src_h, 4321 dst_w, dst_h); 4322 return -EINVAL; 4323 } 4324 4325 /* mark this plane as a scaler user in crtc_state */ 4326 scaler_state->scaler_users |= (1 << scaler_user); 4327 drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " 4328 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 4329 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 4330 scaler_state->scaler_users); 4331 4332 return 0; 4333 } 4334 4335 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) 4336 { 4337 const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 4338 int width, height; 4339 4340 if (crtc_state->pch_pfit.enabled) { 4341 width = drm_rect_width(&crtc_state->pch_pfit.dst); 4342 height = drm_rect_height(&crtc_state->pch_pfit.dst); 4343 } else { 4344 width = pipe_mode->crtc_hdisplay; 4345 height = pipe_mode->crtc_vdisplay; 4346 } 4347 return skl_update_scaler(crtc_state, !crtc_state->hw.active, 4348 SKL_CRTC_INDEX, 4349 &crtc_state->scaler_state.scaler_id, 4350 crtc_state->pipe_src_w, crtc_state->pipe_src_h, 4351 width, height, NULL, 0, 4352 crtc_state->pch_pfit.enabled); 4353 } 4354 4355 /** 4356 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4357 * @crtc_state: crtc's scaler state 4358 * @plane_state: atomic plane state to update 4359 * 4360 * Return 4361 * 0 - scaler_usage updated successfully 4362 * error - requested scaling cannot be supported or other error condition 4363 */ 4364 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4365 struct intel_plane_state *plane_state) 4366 { 4367 struct intel_plane *intel_plane = 4368 to_intel_plane(plane_state->uapi.plane); 4369 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 4370 struct drm_framebuffer *fb = plane_state->hw.fb; 4371 int ret; 4372 bool force_detach = !fb || !plane_state->uapi.visible; 4373 bool need_scaler = false; 4374 4375 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 4376 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && 4377 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) 4378 need_scaler = true; 4379 4380 ret = skl_update_scaler(crtc_state, force_detach, 4381 drm_plane_index(&intel_plane->base), 4382 &plane_state->scaler_id, 4383 drm_rect_width(&plane_state->uapi.src) >> 16, 4384 drm_rect_height(&plane_state->uapi.src) >> 16, 4385 drm_rect_width(&plane_state->uapi.dst), 4386 drm_rect_height(&plane_state->uapi.dst), 4387 fb ? fb->format : NULL, 4388 fb ? fb->modifier : 0, 4389 need_scaler); 4390 4391 if (ret || plane_state->scaler_id < 0) 4392 return ret; 4393 4394 /* check colorkey */ 4395 if (plane_state->ckey.flags) { 4396 drm_dbg_kms(&dev_priv->drm, 4397 "[PLANE:%d:%s] scaling with color key not allowed", 4398 intel_plane->base.base.id, 4399 intel_plane->base.name); 4400 return -EINVAL; 4401 } 4402 4403 /* Check src format */ 4404 switch (fb->format->format) { 4405 case DRM_FORMAT_RGB565: 4406 case DRM_FORMAT_XBGR8888: 4407 case DRM_FORMAT_XRGB8888: 4408 case DRM_FORMAT_ABGR8888: 4409 case DRM_FORMAT_ARGB8888: 4410 case DRM_FORMAT_XRGB2101010: 4411 case DRM_FORMAT_XBGR2101010: 4412 case DRM_FORMAT_ARGB2101010: 4413 case DRM_FORMAT_ABGR2101010: 4414 case DRM_FORMAT_YUYV: 4415 case DRM_FORMAT_YVYU: 4416 case DRM_FORMAT_UYVY: 4417 case DRM_FORMAT_VYUY: 4418 case DRM_FORMAT_NV12: 4419 case DRM_FORMAT_XYUV8888: 4420 case DRM_FORMAT_P010: 4421 case DRM_FORMAT_P012: 4422 case DRM_FORMAT_P016: 4423 case DRM_FORMAT_Y210: 4424 case DRM_FORMAT_Y212: 4425 case DRM_FORMAT_Y216: 4426 case DRM_FORMAT_XVYU2101010: 4427 case DRM_FORMAT_XVYU12_16161616: 4428 case DRM_FORMAT_XVYU16161616: 4429 break; 4430 case DRM_FORMAT_XBGR16161616F: 4431 case DRM_FORMAT_ABGR16161616F: 4432 case DRM_FORMAT_XRGB16161616F: 4433 case DRM_FORMAT_ARGB16161616F: 4434 if (INTEL_GEN(dev_priv) >= 11) 4435 break; 4436 fallthrough; 4437 default: 4438 drm_dbg_kms(&dev_priv->drm, 4439 "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 4440 intel_plane->base.base.id, intel_plane->base.name, 4441 fb->base.id, fb->format->format); 4442 return -EINVAL; 4443 } 4444 4445 return 0; 4446 } 4447 4448 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) 4449 { 4450 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4451 int i; 4452 4453 for (i = 0; i < crtc->num_scalers; i++) 4454 skl_detach_scaler(crtc, i); 4455 } 4456 4457 static int cnl_coef_tap(int i) 4458 { 4459 return i % 7; 4460 } 4461 4462 static u16 cnl_nearest_filter_coef(int t) 4463 { 4464 return t == 3 ? 0x0800 : 0x3000; 4465 } 4466 4467 /* 4468 * Theory behind setting nearest-neighbor integer scaling: 4469 * 4470 * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set. 4471 * The letter represents the filter tap (D is the center tap) and the number 4472 * represents the coefficient set for a phase (0-16). 4473 * 4474 * +------------+------------------------+------------------------+ 4475 * |Index value | Data value coeffient 1 | Data value coeffient 2 | 4476 * +------------+------------------------+------------------------+ 4477 * | 00h | B0 | A0 | 4478 * +------------+------------------------+------------------------+ 4479 * | 01h | D0 | C0 | 4480 * +------------+------------------------+------------------------+ 4481 * | 02h | F0 | E0 | 4482 * +------------+------------------------+------------------------+ 4483 * | 03h | A1 | G0 | 4484 * +------------+------------------------+------------------------+ 4485 * | 04h | C1 | B1 | 4486 * +------------+------------------------+------------------------+ 4487 * | ... | ... | ... | 4488 * +------------+------------------------+------------------------+ 4489 * | 38h | B16 | A16 | 4490 * +------------+------------------------+------------------------+ 4491 * | 39h | D16 | C16 | 4492 * +------------+------------------------+------------------------+ 4493 * | 3Ah | F16 | C16 | 4494 * +------------+------------------------+------------------------+ 4495 * | 3Bh | Reserved | G16 | 4496 * +------------+------------------------+------------------------+ 4497 * 4498 * To enable nearest-neighbor scaling: program scaler coefficents with 4499 * the center tap (Dxx) values set to 1 and all other values set to 0 as per 4500 * SCALER_COEFFICIENT_FORMAT 4501 * 4502 */ 4503 4504 static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv, 4505 enum pipe pipe, int id, int set) 4506 { 4507 int i; 4508 4509 intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 4510 PS_COEE_INDEX_AUTO_INC); 4511 4512 for (i = 0; i < 17 * 7; i += 2) { 4513 u32 tmp; 4514 int t; 4515 4516 t = cnl_coef_tap(i); 4517 tmp = cnl_nearest_filter_coef(t); 4518 4519 t = cnl_coef_tap(i + 1); 4520 tmp |= cnl_nearest_filter_coef(t) << 16; 4521 4522 intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set), 4523 tmp); 4524 } 4525 4526 intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0); 4527 } 4528 4529 u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set) 4530 { 4531 if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) { 4532 return (PS_FILTER_PROGRAMMED | 4533 PS_Y_VERT_FILTER_SELECT(set) | 4534 PS_Y_HORZ_FILTER_SELECT(set) | 4535 PS_UV_VERT_FILTER_SELECT(set) | 4536 PS_UV_HORZ_FILTER_SELECT(set)); 4537 } 4538 4539 return PS_FILTER_MEDIUM; 4540 } 4541 4542 void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe, 4543 int id, int set, enum drm_scaling_filter filter) 4544 { 4545 switch (filter) { 4546 case DRM_SCALING_FILTER_DEFAULT: 4547 break; 4548 case DRM_SCALING_FILTER_NEAREST_NEIGHBOR: 4549 cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set); 4550 break; 4551 default: 4552 MISSING_CASE(filter); 4553 } 4554 } 4555 4556 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) 4557 { 4558 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4559 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4560 const struct intel_crtc_scaler_state *scaler_state = 4561 &crtc_state->scaler_state; 4562 struct drm_rect src = { 4563 .x2 = crtc_state->pipe_src_w << 16, 4564 .y2 = crtc_state->pipe_src_h << 16, 4565 }; 4566 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 4567 u16 uv_rgb_hphase, uv_rgb_vphase; 4568 enum pipe pipe = crtc->pipe; 4569 int width = drm_rect_width(dst); 4570 int height = drm_rect_height(dst); 4571 int x = dst->x1; 4572 int y = dst->y1; 4573 int hscale, vscale; 4574 unsigned long irqflags; 4575 int id; 4576 u32 ps_ctrl; 4577 4578 if (!crtc_state->pch_pfit.enabled) 4579 return; 4580 4581 if (drm_WARN_ON(&dev_priv->drm, 4582 crtc_state->scaler_state.scaler_id < 0)) 4583 return; 4584 4585 hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX); 4586 vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX); 4587 4588 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 4589 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 4590 4591 id = scaler_state->scaler_id; 4592 4593 ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0); 4594 ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode; 4595 4596 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4597 4598 skl_scaler_setup_filter(dev_priv, pipe, id, 0, 4599 crtc_state->hw.scaling_filter); 4600 4601 intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl); 4602 4603 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), 4604 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 4605 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), 4606 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 4607 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), 4608 x << 16 | y); 4609 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), 4610 width << 16 | height); 4611 4612 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4613 } 4614 4615 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 4616 { 4617 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4618 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4619 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 4620 enum pipe pipe = crtc->pipe; 4621 int width = drm_rect_width(dst); 4622 int height = drm_rect_height(dst); 4623 int x = dst->x1; 4624 int y = dst->y1; 4625 4626 if (!crtc_state->pch_pfit.enabled) 4627 return; 4628 4629 /* Force use of hard-coded filter coefficients 4630 * as some pre-programmed values are broken, 4631 * e.g. x201. 4632 */ 4633 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 4634 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | 4635 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 4636 else 4637 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | 4638 PF_FILTER_MED_3x3); 4639 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y); 4640 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height); 4641 } 4642 4643 void hsw_enable_ips(const struct intel_crtc_state *crtc_state) 4644 { 4645 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4646 struct drm_device *dev = crtc->base.dev; 4647 struct drm_i915_private *dev_priv = to_i915(dev); 4648 4649 if (!crtc_state->ips_enabled) 4650 return; 4651 4652 /* 4653 * We can only enable IPS after we enable a plane and wait for a vblank 4654 * This function is called from post_plane_update, which is run after 4655 * a vblank wait. 4656 */ 4657 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 4658 4659 if (IS_BROADWELL(dev_priv)) { 4660 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 4661 IPS_ENABLE | IPS_PCODE_CONTROL)); 4662 /* Quoting Art Runyan: "its not safe to expect any particular 4663 * value in IPS_CTL bit 31 after enabling IPS through the 4664 * mailbox." Moreover, the mailbox may return a bogus state, 4665 * so we need to just enable it and continue on. 4666 */ 4667 } else { 4668 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE); 4669 /* The bit only becomes 1 in the next vblank, so this wait here 4670 * is essentially intel_wait_for_vblank. If we don't have this 4671 * and don't wait for vblanks until the end of crtc_enable, then 4672 * the HW state readout code will complain that the expected 4673 * IPS_CTL value is not the one we read. */ 4674 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) 4675 drm_err(&dev_priv->drm, 4676 "Timed out waiting for IPS enable\n"); 4677 } 4678 } 4679 4680 void hsw_disable_ips(const struct intel_crtc_state *crtc_state) 4681 { 4682 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4683 struct drm_device *dev = crtc->base.dev; 4684 struct drm_i915_private *dev_priv = to_i915(dev); 4685 4686 if (!crtc_state->ips_enabled) 4687 return; 4688 4689 if (IS_BROADWELL(dev_priv)) { 4690 drm_WARN_ON(dev, 4691 sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4692 /* 4693 * Wait for PCODE to finish disabling IPS. The BSpec specified 4694 * 42ms timeout value leads to occasional timeouts so use 100ms 4695 * instead. 4696 */ 4697 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) 4698 drm_err(&dev_priv->drm, 4699 "Timed out waiting for IPS disable\n"); 4700 } else { 4701 intel_de_write(dev_priv, IPS_CTL, 0); 4702 intel_de_posting_read(dev_priv, IPS_CTL); 4703 } 4704 4705 /* We need to wait for a vblank before we can disable the plane. */ 4706 intel_wait_for_vblank(dev_priv, crtc->pipe); 4707 } 4708 4709 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4710 { 4711 if (intel_crtc->overlay) 4712 (void) intel_overlay_switch_off(intel_crtc->overlay); 4713 4714 /* Let userspace switch the overlay on again. In most cases userspace 4715 * has to recompute where to put it anyway. 4716 */ 4717 } 4718 4719 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, 4720 const struct intel_crtc_state *new_crtc_state) 4721 { 4722 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 4723 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4724 4725 if (!old_crtc_state->ips_enabled) 4726 return false; 4727 4728 if (intel_crtc_needs_modeset(new_crtc_state)) 4729 return true; 4730 4731 /* 4732 * Workaround : Do not read or write the pipe palette/gamma data while 4733 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 4734 * 4735 * Disable IPS before we program the LUT. 4736 */ 4737 if (IS_HASWELL(dev_priv) && 4738 (new_crtc_state->uapi.color_mgmt_changed || 4739 new_crtc_state->update_pipe) && 4740 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 4741 return true; 4742 4743 return !new_crtc_state->ips_enabled; 4744 } 4745 4746 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, 4747 const struct intel_crtc_state *new_crtc_state) 4748 { 4749 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 4750 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4751 4752 if (!new_crtc_state->ips_enabled) 4753 return false; 4754 4755 if (intel_crtc_needs_modeset(new_crtc_state)) 4756 return true; 4757 4758 /* 4759 * Workaround : Do not read or write the pipe palette/gamma data while 4760 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 4761 * 4762 * Re-enable IPS after the LUT has been programmed. 4763 */ 4764 if (IS_HASWELL(dev_priv) && 4765 (new_crtc_state->uapi.color_mgmt_changed || 4766 new_crtc_state->update_pipe) && 4767 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 4768 return true; 4769 4770 /* 4771 * We can't read out IPS on broadwell, assume the worst and 4772 * forcibly enable IPS on the first fastset. 4773 */ 4774 if (new_crtc_state->update_pipe && old_crtc_state->inherited) 4775 return true; 4776 4777 return !old_crtc_state->ips_enabled; 4778 } 4779 4780 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 4781 { 4782 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4783 4784 if (!crtc_state->nv12_planes) 4785 return false; 4786 4787 /* WA Display #0827: Gen9:all */ 4788 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) 4789 return true; 4790 4791 return false; 4792 } 4793 4794 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 4795 { 4796 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4797 4798 /* Wa_2006604312:icl,ehl */ 4799 if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11)) 4800 return true; 4801 4802 return false; 4803 } 4804 4805 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 4806 const struct intel_crtc_state *new_crtc_state) 4807 { 4808 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) && 4809 new_crtc_state->active_planes; 4810 } 4811 4812 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 4813 const struct intel_crtc_state *new_crtc_state) 4814 { 4815 return old_crtc_state->active_planes && 4816 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)); 4817 } 4818 4819 static void intel_post_plane_update(struct intel_atomic_state *state, 4820 struct intel_crtc *crtc) 4821 { 4822 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4823 const struct intel_crtc_state *old_crtc_state = 4824 intel_atomic_get_old_crtc_state(state, crtc); 4825 const struct intel_crtc_state *new_crtc_state = 4826 intel_atomic_get_new_crtc_state(state, crtc); 4827 enum pipe pipe = crtc->pipe; 4828 4829 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 4830 4831 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 4832 intel_update_watermarks(crtc); 4833 4834 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state)) 4835 hsw_enable_ips(new_crtc_state); 4836 4837 intel_fbc_post_update(state, crtc); 4838 4839 if (needs_nv12_wa(old_crtc_state) && 4840 !needs_nv12_wa(new_crtc_state)) 4841 skl_wa_827(dev_priv, pipe, false); 4842 4843 if (needs_scalerclk_wa(old_crtc_state) && 4844 !needs_scalerclk_wa(new_crtc_state)) 4845 icl_wa_scalerclkgating(dev_priv, pipe, false); 4846 } 4847 4848 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 4849 struct intel_crtc *crtc) 4850 { 4851 const struct intel_crtc_state *crtc_state = 4852 intel_atomic_get_new_crtc_state(state, crtc); 4853 u8 update_planes = crtc_state->update_planes; 4854 const struct intel_plane_state *plane_state; 4855 struct intel_plane *plane; 4856 int i; 4857 4858 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4859 if (plane->enable_flip_done && 4860 plane->pipe == crtc->pipe && 4861 update_planes & BIT(plane->id)) 4862 plane->enable_flip_done(plane); 4863 } 4864 } 4865 4866 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 4867 struct intel_crtc *crtc) 4868 { 4869 const struct intel_crtc_state *crtc_state = 4870 intel_atomic_get_new_crtc_state(state, crtc); 4871 u8 update_planes = crtc_state->update_planes; 4872 const struct intel_plane_state *plane_state; 4873 struct intel_plane *plane; 4874 int i; 4875 4876 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4877 if (plane->disable_flip_done && 4878 plane->pipe == crtc->pipe && 4879 update_planes & BIT(plane->id)) 4880 plane->disable_flip_done(plane); 4881 } 4882 } 4883 4884 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 4885 struct intel_crtc *crtc) 4886 { 4887 struct drm_i915_private *i915 = to_i915(state->base.dev); 4888 const struct intel_crtc_state *old_crtc_state = 4889 intel_atomic_get_old_crtc_state(state, crtc); 4890 const struct intel_crtc_state *new_crtc_state = 4891 intel_atomic_get_new_crtc_state(state, crtc); 4892 u8 update_planes = new_crtc_state->update_planes; 4893 const struct intel_plane_state *old_plane_state; 4894 struct intel_plane *plane; 4895 bool need_vbl_wait = false; 4896 int i; 4897 4898 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 4899 if (plane->need_async_flip_disable_wa && 4900 plane->pipe == crtc->pipe && 4901 update_planes & BIT(plane->id)) { 4902 /* 4903 * Apart from the async flip bit we want to 4904 * preserve the old state for the plane. 4905 */ 4906 plane->async_flip(plane, old_crtc_state, 4907 old_plane_state, false); 4908 need_vbl_wait = true; 4909 } 4910 } 4911 4912 if (need_vbl_wait) 4913 intel_wait_for_vblank(i915, crtc->pipe); 4914 } 4915 4916 static void intel_pre_plane_update(struct intel_atomic_state *state, 4917 struct intel_crtc *crtc) 4918 { 4919 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4920 const struct intel_crtc_state *old_crtc_state = 4921 intel_atomic_get_old_crtc_state(state, crtc); 4922 const struct intel_crtc_state *new_crtc_state = 4923 intel_atomic_get_new_crtc_state(state, crtc); 4924 enum pipe pipe = crtc->pipe; 4925 4926 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) 4927 hsw_disable_ips(old_crtc_state); 4928 4929 if (intel_fbc_pre_update(state, crtc)) 4930 intel_wait_for_vblank(dev_priv, pipe); 4931 4932 /* Display WA 827 */ 4933 if (!needs_nv12_wa(old_crtc_state) && 4934 needs_nv12_wa(new_crtc_state)) 4935 skl_wa_827(dev_priv, pipe, true); 4936 4937 /* Wa_2006604312:icl,ehl */ 4938 if (!needs_scalerclk_wa(old_crtc_state) && 4939 needs_scalerclk_wa(new_crtc_state)) 4940 icl_wa_scalerclkgating(dev_priv, pipe, true); 4941 4942 /* 4943 * Vblank time updates from the shadow to live plane control register 4944 * are blocked if the memory self-refresh mode is active at that 4945 * moment. So to make sure the plane gets truly disabled, disable 4946 * first the self-refresh mode. The self-refresh enable bit in turn 4947 * will be checked/applied by the HW only at the next frame start 4948 * event which is after the vblank start event, so we need to have a 4949 * wait-for-vblank between disabling the plane and the pipe. 4950 */ 4951 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 4952 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 4953 intel_wait_for_vblank(dev_priv, pipe); 4954 4955 /* 4956 * IVB workaround: must disable low power watermarks for at least 4957 * one frame before enabling scaling. LP watermarks can be re-enabled 4958 * when scaling is disabled. 4959 * 4960 * WaCxSRDisabledForSpriteScaling:ivb 4961 */ 4962 if (old_crtc_state->hw.active && 4963 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 4964 intel_wait_for_vblank(dev_priv, pipe); 4965 4966 /* 4967 * If we're doing a modeset we don't need to do any 4968 * pre-vblank watermark programming here. 4969 */ 4970 if (!intel_crtc_needs_modeset(new_crtc_state)) { 4971 /* 4972 * For platforms that support atomic watermarks, program the 4973 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 4974 * will be the intermediate values that are safe for both pre- and 4975 * post- vblank; when vblank happens, the 'active' values will be set 4976 * to the final 'target' values and we'll do this again to get the 4977 * optimal watermarks. For gen9+ platforms, the values we program here 4978 * will be the final target values which will get automatically latched 4979 * at vblank time; no further programming will be necessary. 4980 * 4981 * If a platform hasn't been transitioned to atomic watermarks yet, 4982 * we'll continue to update watermarks the old way, if flags tell 4983 * us to. 4984 */ 4985 if (dev_priv->display.initial_watermarks) 4986 dev_priv->display.initial_watermarks(state, crtc); 4987 else if (new_crtc_state->update_wm_pre) 4988 intel_update_watermarks(crtc); 4989 } 4990 4991 /* 4992 * Gen2 reports pipe underruns whenever all planes are disabled. 4993 * So disable underrun reporting before all the planes get disabled. 4994 * 4995 * We do this after .initial_watermarks() so that we have a 4996 * chance of catching underruns with the intermediate watermarks 4997 * vs. the old plane configuration. 4998 */ 4999 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state)) 5000 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5001 5002 /* 5003 * WA for platforms where async address update enable bit 5004 * is double buffered and only latched at start of vblank. 5005 */ 5006 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip) 5007 intel_crtc_async_flip_disable_wa(state, crtc); 5008 } 5009 5010 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 5011 struct intel_crtc *crtc) 5012 { 5013 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5014 const struct intel_crtc_state *new_crtc_state = 5015 intel_atomic_get_new_crtc_state(state, crtc); 5016 unsigned int update_mask = new_crtc_state->update_planes; 5017 const struct intel_plane_state *old_plane_state; 5018 struct intel_plane *plane; 5019 unsigned fb_bits = 0; 5020 int i; 5021 5022 intel_crtc_dpms_overlay_disable(crtc); 5023 5024 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 5025 if (crtc->pipe != plane->pipe || 5026 !(update_mask & BIT(plane->id))) 5027 continue; 5028 5029 intel_disable_plane(plane, new_crtc_state); 5030 5031 if (old_plane_state->uapi.visible) 5032 fb_bits |= plane->frontbuffer_bit; 5033 } 5034 5035 intel_frontbuffer_flip(dev_priv, fb_bits); 5036 } 5037 5038 /* 5039 * intel_connector_primary_encoder - get the primary encoder for a connector 5040 * @connector: connector for which to return the encoder 5041 * 5042 * Returns the primary encoder for a connector. There is a 1:1 mapping from 5043 * all connectors to their encoder, except for DP-MST connectors which have 5044 * both a virtual and a primary encoder. These DP-MST primary encoders can be 5045 * pointed to by as many DP-MST connectors as there are pipes. 5046 */ 5047 static struct intel_encoder * 5048 intel_connector_primary_encoder(struct intel_connector *connector) 5049 { 5050 struct intel_encoder *encoder; 5051 5052 if (connector->mst_port) 5053 return &dp_to_dig_port(connector->mst_port)->base; 5054 5055 encoder = intel_attached_encoder(connector); 5056 drm_WARN_ON(connector->base.dev, !encoder); 5057 5058 return encoder; 5059 } 5060 5061 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 5062 { 5063 struct drm_connector_state *new_conn_state; 5064 struct drm_connector *connector; 5065 int i; 5066 5067 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 5068 i) { 5069 struct intel_connector *intel_connector; 5070 struct intel_encoder *encoder; 5071 struct intel_crtc *crtc; 5072 5073 if (!intel_connector_needs_modeset(state, connector)) 5074 continue; 5075 5076 intel_connector = to_intel_connector(connector); 5077 encoder = intel_connector_primary_encoder(intel_connector); 5078 if (!encoder->update_prepare) 5079 continue; 5080 5081 crtc = new_conn_state->crtc ? 5082 to_intel_crtc(new_conn_state->crtc) : NULL; 5083 encoder->update_prepare(state, encoder, crtc); 5084 } 5085 } 5086 5087 static void intel_encoders_update_complete(struct intel_atomic_state *state) 5088 { 5089 struct drm_connector_state *new_conn_state; 5090 struct drm_connector *connector; 5091 int i; 5092 5093 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 5094 i) { 5095 struct intel_connector *intel_connector; 5096 struct intel_encoder *encoder; 5097 struct intel_crtc *crtc; 5098 5099 if (!intel_connector_needs_modeset(state, connector)) 5100 continue; 5101 5102 intel_connector = to_intel_connector(connector); 5103 encoder = intel_connector_primary_encoder(intel_connector); 5104 if (!encoder->update_complete) 5105 continue; 5106 5107 crtc = new_conn_state->crtc ? 5108 to_intel_crtc(new_conn_state->crtc) : NULL; 5109 encoder->update_complete(state, encoder, crtc); 5110 } 5111 } 5112 5113 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 5114 struct intel_crtc *crtc) 5115 { 5116 const struct intel_crtc_state *crtc_state = 5117 intel_atomic_get_new_crtc_state(state, crtc); 5118 const struct drm_connector_state *conn_state; 5119 struct drm_connector *conn; 5120 int i; 5121 5122 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 5123 struct intel_encoder *encoder = 5124 to_intel_encoder(conn_state->best_encoder); 5125 5126 if (conn_state->crtc != &crtc->base) 5127 continue; 5128 5129 if (encoder->pre_pll_enable) 5130 encoder->pre_pll_enable(state, encoder, 5131 crtc_state, conn_state); 5132 } 5133 } 5134 5135 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 5136 struct intel_crtc *crtc) 5137 { 5138 const struct intel_crtc_state *crtc_state = 5139 intel_atomic_get_new_crtc_state(state, crtc); 5140 const struct drm_connector_state *conn_state; 5141 struct drm_connector *conn; 5142 int i; 5143 5144 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 5145 struct intel_encoder *encoder = 5146 to_intel_encoder(conn_state->best_encoder); 5147 5148 if (conn_state->crtc != &crtc->base) 5149 continue; 5150 5151 if (encoder->pre_enable) 5152 encoder->pre_enable(state, encoder, 5153 crtc_state, conn_state); 5154 } 5155 } 5156 5157 static void intel_encoders_enable(struct intel_atomic_state *state, 5158 struct intel_crtc *crtc) 5159 { 5160 const struct intel_crtc_state *crtc_state = 5161 intel_atomic_get_new_crtc_state(state, crtc); 5162 const struct drm_connector_state *conn_state; 5163 struct drm_connector *conn; 5164 int i; 5165 5166 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 5167 struct intel_encoder *encoder = 5168 to_intel_encoder(conn_state->best_encoder); 5169 5170 if (conn_state->crtc != &crtc->base) 5171 continue; 5172 5173 if (encoder->enable) 5174 encoder->enable(state, encoder, 5175 crtc_state, conn_state); 5176 intel_opregion_notify_encoder(encoder, true); 5177 } 5178 } 5179 5180 static void intel_encoders_disable(struct intel_atomic_state *state, 5181 struct intel_crtc *crtc) 5182 { 5183 const struct intel_crtc_state *old_crtc_state = 5184 intel_atomic_get_old_crtc_state(state, crtc); 5185 const struct drm_connector_state *old_conn_state; 5186 struct drm_connector *conn; 5187 int i; 5188 5189 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 5190 struct intel_encoder *encoder = 5191 to_intel_encoder(old_conn_state->best_encoder); 5192 5193 if (old_conn_state->crtc != &crtc->base) 5194 continue; 5195 5196 intel_opregion_notify_encoder(encoder, false); 5197 if (encoder->disable) 5198 encoder->disable(state, encoder, 5199 old_crtc_state, old_conn_state); 5200 } 5201 } 5202 5203 static void intel_encoders_post_disable(struct intel_atomic_state *state, 5204 struct intel_crtc *crtc) 5205 { 5206 const struct intel_crtc_state *old_crtc_state = 5207 intel_atomic_get_old_crtc_state(state, crtc); 5208 const struct drm_connector_state *old_conn_state; 5209 struct drm_connector *conn; 5210 int i; 5211 5212 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 5213 struct intel_encoder *encoder = 5214 to_intel_encoder(old_conn_state->best_encoder); 5215 5216 if (old_conn_state->crtc != &crtc->base) 5217 continue; 5218 5219 if (encoder->post_disable) 5220 encoder->post_disable(state, encoder, 5221 old_crtc_state, old_conn_state); 5222 } 5223 } 5224 5225 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 5226 struct intel_crtc *crtc) 5227 { 5228 const struct intel_crtc_state *old_crtc_state = 5229 intel_atomic_get_old_crtc_state(state, crtc); 5230 const struct drm_connector_state *old_conn_state; 5231 struct drm_connector *conn; 5232 int i; 5233 5234 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 5235 struct intel_encoder *encoder = 5236 to_intel_encoder(old_conn_state->best_encoder); 5237 5238 if (old_conn_state->crtc != &crtc->base) 5239 continue; 5240 5241 if (encoder->post_pll_disable) 5242 encoder->post_pll_disable(state, encoder, 5243 old_crtc_state, old_conn_state); 5244 } 5245 } 5246 5247 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 5248 struct intel_crtc *crtc) 5249 { 5250 const struct intel_crtc_state *crtc_state = 5251 intel_atomic_get_new_crtc_state(state, crtc); 5252 const struct drm_connector_state *conn_state; 5253 struct drm_connector *conn; 5254 int i; 5255 5256 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 5257 struct intel_encoder *encoder = 5258 to_intel_encoder(conn_state->best_encoder); 5259 5260 if (conn_state->crtc != &crtc->base) 5261 continue; 5262 5263 if (encoder->update_pipe) 5264 encoder->update_pipe(state, encoder, 5265 crtc_state, conn_state); 5266 } 5267 } 5268 5269 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 5270 { 5271 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5272 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 5273 5274 plane->disable_plane(plane, crtc_state); 5275 } 5276 5277 static void ilk_crtc_enable(struct intel_atomic_state *state, 5278 struct intel_crtc *crtc) 5279 { 5280 const struct intel_crtc_state *new_crtc_state = 5281 intel_atomic_get_new_crtc_state(state, crtc); 5282 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5283 enum pipe pipe = crtc->pipe; 5284 5285 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 5286 return; 5287 5288 /* 5289 * Sometimes spurious CPU pipe underruns happen during FDI 5290 * training, at least with VGA+HDMI cloning. Suppress them. 5291 * 5292 * On ILK we get an occasional spurious CPU pipe underruns 5293 * between eDP port A enable and vdd enable. Also PCH port 5294 * enable seems to result in the occasional CPU pipe underrun. 5295 * 5296 * Spurious PCH underruns also occur during PCH enabling. 5297 */ 5298 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5299 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5300 5301 if (new_crtc_state->has_pch_encoder) 5302 intel_prepare_shared_dpll(new_crtc_state); 5303 5304 if (intel_crtc_has_dp_encoder(new_crtc_state)) 5305 intel_dp_set_m_n(new_crtc_state, M1_N1); 5306 5307 intel_set_transcoder_timings(new_crtc_state); 5308 intel_set_pipe_src_size(new_crtc_state); 5309 5310 if (new_crtc_state->has_pch_encoder) 5311 intel_cpu_transcoder_set_m_n(new_crtc_state, 5312 &new_crtc_state->fdi_m_n, NULL); 5313 5314 ilk_set_pipeconf(new_crtc_state); 5315 5316 crtc->active = true; 5317 5318 intel_encoders_pre_enable(state, crtc); 5319 5320 if (new_crtc_state->has_pch_encoder) { 5321 /* Note: FDI PLL enabling _must_ be done before we enable the 5322 * cpu pipes, hence this is separate from all the other fdi/pch 5323 * enabling. */ 5324 ilk_fdi_pll_enable(new_crtc_state); 5325 } else { 5326 assert_fdi_tx_disabled(dev_priv, pipe); 5327 assert_fdi_rx_disabled(dev_priv, pipe); 5328 } 5329 5330 ilk_pfit_enable(new_crtc_state); 5331 5332 /* 5333 * On ILK+ LUT must be loaded before the pipe is running but with 5334 * clocks enabled 5335 */ 5336 intel_color_load_luts(new_crtc_state); 5337 intel_color_commit(new_crtc_state); 5338 /* update DSPCNTR to configure gamma for pipe bottom color */ 5339 intel_disable_primary_plane(new_crtc_state); 5340 5341 if (dev_priv->display.initial_watermarks) 5342 dev_priv->display.initial_watermarks(state, crtc); 5343 intel_enable_pipe(new_crtc_state); 5344 5345 if (new_crtc_state->has_pch_encoder) 5346 ilk_pch_enable(state, new_crtc_state); 5347 5348 intel_crtc_vblank_on(new_crtc_state); 5349 5350 intel_encoders_enable(state, crtc); 5351 5352 if (HAS_PCH_CPT(dev_priv)) 5353 cpt_verify_modeset(dev_priv, pipe); 5354 5355 /* 5356 * Must wait for vblank to avoid spurious PCH FIFO underruns. 5357 * And a second vblank wait is needed at least on ILK with 5358 * some interlaced HDMI modes. Let's do the double wait always 5359 * in case there are more corner cases we don't know about. 5360 */ 5361 if (new_crtc_state->has_pch_encoder) { 5362 intel_wait_for_vblank(dev_priv, pipe); 5363 intel_wait_for_vblank(dev_priv, pipe); 5364 } 5365 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5366 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5367 } 5368 5369 /* IPS only exists on ULT machines and is tied to pipe A. */ 5370 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 5371 { 5372 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 5373 } 5374 5375 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 5376 enum pipe pipe, bool apply) 5377 { 5378 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); 5379 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 5380 5381 if (apply) 5382 val |= mask; 5383 else 5384 val &= ~mask; 5385 5386 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); 5387 } 5388 5389 static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 5390 { 5391 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5392 enum pipe pipe = crtc->pipe; 5393 u32 val; 5394 5395 val = MBUS_DBOX_A_CREDIT(2); 5396 5397 if (INTEL_GEN(dev_priv) >= 12) { 5398 val |= MBUS_DBOX_BW_CREDIT(2); 5399 val |= MBUS_DBOX_B_CREDIT(12); 5400 } else { 5401 val |= MBUS_DBOX_BW_CREDIT(1); 5402 val |= MBUS_DBOX_B_CREDIT(8); 5403 } 5404 5405 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val); 5406 } 5407 5408 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 5409 { 5410 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5411 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5412 5413 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 5414 HSW_LINETIME(crtc_state->linetime) | 5415 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 5416 } 5417 5418 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 5419 { 5420 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5421 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5422 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); 5423 u32 val; 5424 5425 val = intel_de_read(dev_priv, reg); 5426 val &= ~HSW_FRAME_START_DELAY_MASK; 5427 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 5428 intel_de_write(dev_priv, reg, val); 5429 } 5430 5431 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, 5432 const struct intel_crtc_state *crtc_state) 5433 { 5434 struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc); 5435 struct intel_crtc_state *master_crtc_state; 5436 struct drm_connector_state *conn_state; 5437 struct drm_connector *conn; 5438 struct intel_encoder *encoder = NULL; 5439 int i; 5440 5441 if (crtc_state->bigjoiner_slave) 5442 master = crtc_state->bigjoiner_linked_crtc; 5443 5444 master_crtc_state = intel_atomic_get_new_crtc_state(state, master); 5445 5446 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 5447 if (conn_state->crtc != &master->base) 5448 continue; 5449 5450 encoder = to_intel_encoder(conn_state->best_encoder); 5451 break; 5452 } 5453 5454 if (!crtc_state->bigjoiner_slave) { 5455 /* need to enable VDSC, which we skipped in pre-enable */ 5456 intel_dsc_enable(encoder, crtc_state); 5457 } else { 5458 /* 5459 * Enable sequence steps 1-7 on bigjoiner master 5460 */ 5461 intel_encoders_pre_pll_enable(state, master); 5462 intel_enable_shared_dpll(master_crtc_state); 5463 intel_encoders_pre_enable(state, master); 5464 5465 /* and DSC on slave */ 5466 intel_dsc_enable(NULL, crtc_state); 5467 } 5468 } 5469 5470 static void hsw_crtc_enable(struct intel_atomic_state *state, 5471 struct intel_crtc *crtc) 5472 { 5473 const struct intel_crtc_state *new_crtc_state = 5474 intel_atomic_get_new_crtc_state(state, crtc); 5475 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5476 enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 5477 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 5478 bool psl_clkgate_wa; 5479 5480 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 5481 return; 5482 5483 if (!new_crtc_state->bigjoiner) { 5484 intel_encoders_pre_pll_enable(state, crtc); 5485 5486 if (new_crtc_state->shared_dpll) 5487 intel_enable_shared_dpll(new_crtc_state); 5488 5489 intel_encoders_pre_enable(state, crtc); 5490 } else { 5491 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); 5492 } 5493 5494 intel_set_pipe_src_size(new_crtc_state); 5495 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 5496 bdw_set_pipemisc(new_crtc_state); 5497 5498 if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) { 5499 intel_set_transcoder_timings(new_crtc_state); 5500 5501 if (cpu_transcoder != TRANSCODER_EDP) 5502 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), 5503 new_crtc_state->pixel_multiplier - 1); 5504 5505 if (new_crtc_state->has_pch_encoder) 5506 intel_cpu_transcoder_set_m_n(new_crtc_state, 5507 &new_crtc_state->fdi_m_n, NULL); 5508 5509 hsw_set_frame_start_delay(new_crtc_state); 5510 } 5511 5512 if (!transcoder_is_dsi(cpu_transcoder)) 5513 hsw_set_pipeconf(new_crtc_state); 5514 5515 crtc->active = true; 5516 5517 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 5518 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 5519 new_crtc_state->pch_pfit.enabled; 5520 if (psl_clkgate_wa) 5521 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 5522 5523 if (INTEL_GEN(dev_priv) >= 9) 5524 skl_pfit_enable(new_crtc_state); 5525 else 5526 ilk_pfit_enable(new_crtc_state); 5527 5528 /* 5529 * On ILK+ LUT must be loaded before the pipe is running but with 5530 * clocks enabled 5531 */ 5532 intel_color_load_luts(new_crtc_state); 5533 intel_color_commit(new_crtc_state); 5534 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 5535 if (INTEL_GEN(dev_priv) < 9) 5536 intel_disable_primary_plane(new_crtc_state); 5537 5538 hsw_set_linetime_wm(new_crtc_state); 5539 5540 if (INTEL_GEN(dev_priv) >= 11) 5541 icl_set_pipe_chicken(crtc); 5542 5543 if (dev_priv->display.initial_watermarks) 5544 dev_priv->display.initial_watermarks(state, crtc); 5545 5546 if (INTEL_GEN(dev_priv) >= 11) 5547 icl_pipe_mbus_enable(crtc); 5548 5549 if (new_crtc_state->bigjoiner_slave) { 5550 trace_intel_pipe_enable(crtc); 5551 intel_crtc_vblank_on(new_crtc_state); 5552 } 5553 5554 intel_encoders_enable(state, crtc); 5555 5556 if (psl_clkgate_wa) { 5557 intel_wait_for_vblank(dev_priv, pipe); 5558 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 5559 } 5560 5561 /* If we change the relative order between pipe/planes enabling, we need 5562 * to change the workaround. */ 5563 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 5564 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 5565 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 5566 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 5567 } 5568 } 5569 5570 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 5571 { 5572 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 5573 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5574 enum pipe pipe = crtc->pipe; 5575 5576 /* To avoid upsetting the power well on haswell only disable the pfit if 5577 * it's in use. The hw state code will make sure we get this right. */ 5578 if (!old_crtc_state->pch_pfit.enabled) 5579 return; 5580 5581 intel_de_write(dev_priv, PF_CTL(pipe), 0); 5582 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0); 5583 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0); 5584 } 5585 5586 static void ilk_crtc_disable(struct intel_atomic_state *state, 5587 struct intel_crtc *crtc) 5588 { 5589 const struct intel_crtc_state *old_crtc_state = 5590 intel_atomic_get_old_crtc_state(state, crtc); 5591 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5592 enum pipe pipe = crtc->pipe; 5593 5594 /* 5595 * Sometimes spurious CPU pipe underruns happen when the 5596 * pipe is already disabled, but FDI RX/TX is still enabled. 5597 * Happens at least with VGA+HDMI cloning. Suppress them. 5598 */ 5599 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5600 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5601 5602 intel_encoders_disable(state, crtc); 5603 5604 intel_crtc_vblank_off(old_crtc_state); 5605 5606 intel_disable_pipe(old_crtc_state); 5607 5608 ilk_pfit_disable(old_crtc_state); 5609 5610 if (old_crtc_state->has_pch_encoder) 5611 ilk_fdi_disable(crtc); 5612 5613 intel_encoders_post_disable(state, crtc); 5614 5615 if (old_crtc_state->has_pch_encoder) { 5616 ilk_disable_pch_transcoder(dev_priv, pipe); 5617 5618 if (HAS_PCH_CPT(dev_priv)) { 5619 i915_reg_t reg; 5620 u32 temp; 5621 5622 /* disable TRANS_DP_CTL */ 5623 reg = TRANS_DP_CTL(pipe); 5624 temp = intel_de_read(dev_priv, reg); 5625 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5626 TRANS_DP_PORT_SEL_MASK); 5627 temp |= TRANS_DP_PORT_SEL_NONE; 5628 intel_de_write(dev_priv, reg, temp); 5629 5630 /* disable DPLL_SEL */ 5631 temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 5632 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5633 intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 5634 } 5635 5636 ilk_fdi_pll_disable(crtc); 5637 } 5638 5639 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5640 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5641 } 5642 5643 static void hsw_crtc_disable(struct intel_atomic_state *state, 5644 struct intel_crtc *crtc) 5645 { 5646 /* 5647 * FIXME collapse everything to one hook. 5648 * Need care with mst->ddi interactions. 5649 */ 5650 intel_encoders_disable(state, crtc); 5651 intel_encoders_post_disable(state, crtc); 5652 } 5653 5654 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 5655 { 5656 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5657 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5658 5659 if (!crtc_state->gmch_pfit.control) 5660 return; 5661 5662 /* 5663 * The panel fitter should only be adjusted whilst the pipe is disabled, 5664 * according to register description and PRM. 5665 */ 5666 drm_WARN_ON(&dev_priv->drm, 5667 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); 5668 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 5669 5670 intel_de_write(dev_priv, PFIT_PGM_RATIOS, 5671 crtc_state->gmch_pfit.pgm_ratios); 5672 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); 5673 5674 /* Border color in case we don't scale up to the full screen. Black by 5675 * default, change to something else for debugging. */ 5676 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); 5677 } 5678 5679 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 5680 { 5681 if (phy == PHY_NONE) 5682 return false; 5683 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 5684 return phy <= PHY_D; 5685 else if (IS_JSL_EHL(dev_priv)) 5686 return phy <= PHY_C; 5687 else if (INTEL_GEN(dev_priv) >= 11) 5688 return phy <= PHY_B; 5689 else 5690 return false; 5691 } 5692 5693 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 5694 { 5695 if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 5696 return false; 5697 else if (INTEL_GEN(dev_priv) >= 12) 5698 return phy >= PHY_D && phy <= PHY_I; 5699 else if (INTEL_GEN(dev_priv) >= 11 && !IS_JSL_EHL(dev_priv)) 5700 return phy >= PHY_C && phy <= PHY_F; 5701 else 5702 return false; 5703 } 5704 5705 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 5706 { 5707 if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) 5708 return PHY_C + port - PORT_TC1; 5709 else if (IS_JSL_EHL(i915) && port == PORT_D) 5710 return PHY_A; 5711 5712 return PHY_A + port - PORT_A; 5713 } 5714 5715 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 5716 { 5717 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 5718 return TC_PORT_NONE; 5719 5720 if (INTEL_GEN(dev_priv) >= 12) 5721 return TC_PORT_1 + port - PORT_TC1; 5722 else 5723 return TC_PORT_1 + port - PORT_C; 5724 } 5725 5726 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 5727 { 5728 switch (port) { 5729 case PORT_A: 5730 return POWER_DOMAIN_PORT_DDI_A_LANES; 5731 case PORT_B: 5732 return POWER_DOMAIN_PORT_DDI_B_LANES; 5733 case PORT_C: 5734 return POWER_DOMAIN_PORT_DDI_C_LANES; 5735 case PORT_D: 5736 return POWER_DOMAIN_PORT_DDI_D_LANES; 5737 case PORT_E: 5738 return POWER_DOMAIN_PORT_DDI_E_LANES; 5739 case PORT_F: 5740 return POWER_DOMAIN_PORT_DDI_F_LANES; 5741 case PORT_G: 5742 return POWER_DOMAIN_PORT_DDI_G_LANES; 5743 case PORT_H: 5744 return POWER_DOMAIN_PORT_DDI_H_LANES; 5745 case PORT_I: 5746 return POWER_DOMAIN_PORT_DDI_I_LANES; 5747 default: 5748 MISSING_CASE(port); 5749 return POWER_DOMAIN_PORT_OTHER; 5750 } 5751 } 5752 5753 enum intel_display_power_domain 5754 intel_aux_power_domain(struct intel_digital_port *dig_port) 5755 { 5756 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 5757 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 5758 5759 if (intel_phy_is_tc(dev_priv, phy) && 5760 dig_port->tc_mode == TC_PORT_TBT_ALT) { 5761 switch (dig_port->aux_ch) { 5762 case AUX_CH_C: 5763 return POWER_DOMAIN_AUX_C_TBT; 5764 case AUX_CH_D: 5765 return POWER_DOMAIN_AUX_D_TBT; 5766 case AUX_CH_E: 5767 return POWER_DOMAIN_AUX_E_TBT; 5768 case AUX_CH_F: 5769 return POWER_DOMAIN_AUX_F_TBT; 5770 case AUX_CH_G: 5771 return POWER_DOMAIN_AUX_G_TBT; 5772 case AUX_CH_H: 5773 return POWER_DOMAIN_AUX_H_TBT; 5774 case AUX_CH_I: 5775 return POWER_DOMAIN_AUX_I_TBT; 5776 default: 5777 MISSING_CASE(dig_port->aux_ch); 5778 return POWER_DOMAIN_AUX_C_TBT; 5779 } 5780 } 5781 5782 return intel_legacy_aux_to_power_domain(dig_port->aux_ch); 5783 } 5784 5785 /* 5786 * Converts aux_ch to power_domain without caring about TBT ports for that use 5787 * intel_aux_power_domain() 5788 */ 5789 enum intel_display_power_domain 5790 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) 5791 { 5792 switch (aux_ch) { 5793 case AUX_CH_A: 5794 return POWER_DOMAIN_AUX_A; 5795 case AUX_CH_B: 5796 return POWER_DOMAIN_AUX_B; 5797 case AUX_CH_C: 5798 return POWER_DOMAIN_AUX_C; 5799 case AUX_CH_D: 5800 return POWER_DOMAIN_AUX_D; 5801 case AUX_CH_E: 5802 return POWER_DOMAIN_AUX_E; 5803 case AUX_CH_F: 5804 return POWER_DOMAIN_AUX_F; 5805 case AUX_CH_G: 5806 return POWER_DOMAIN_AUX_G; 5807 case AUX_CH_H: 5808 return POWER_DOMAIN_AUX_H; 5809 case AUX_CH_I: 5810 return POWER_DOMAIN_AUX_I; 5811 default: 5812 MISSING_CASE(aux_ch); 5813 return POWER_DOMAIN_AUX_A; 5814 } 5815 } 5816 5817 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 5818 { 5819 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5820 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5821 struct drm_encoder *encoder; 5822 enum pipe pipe = crtc->pipe; 5823 u64 mask; 5824 enum transcoder transcoder = crtc_state->cpu_transcoder; 5825 5826 if (!crtc_state->hw.active) 5827 return 0; 5828 5829 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 5830 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); 5831 if (crtc_state->pch_pfit.enabled || 5832 crtc_state->pch_pfit.force_thru) 5833 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5834 5835 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 5836 crtc_state->uapi.encoder_mask) { 5837 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5838 5839 mask |= BIT_ULL(intel_encoder->power_domain); 5840 } 5841 5842 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 5843 mask |= BIT_ULL(POWER_DOMAIN_AUDIO); 5844 5845 if (crtc_state->shared_dpll) 5846 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 5847 5848 if (crtc_state->dsc.compression_enable) 5849 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state)); 5850 5851 return mask; 5852 } 5853 5854 static u64 5855 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 5856 { 5857 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5858 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5859 enum intel_display_power_domain domain; 5860 u64 domains, new_domains, old_domains; 5861 5862 domains = get_crtc_power_domains(crtc_state); 5863 5864 new_domains = domains & ~crtc->enabled_power_domains.mask; 5865 old_domains = crtc->enabled_power_domains.mask & ~domains; 5866 5867 for_each_power_domain(domain, new_domains) 5868 intel_display_power_get_in_set(dev_priv, 5869 &crtc->enabled_power_domains, 5870 domain); 5871 5872 return old_domains; 5873 } 5874 5875 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc, 5876 u64 domains) 5877 { 5878 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), 5879 &crtc->enabled_power_domains, 5880 domains); 5881 } 5882 5883 static void valleyview_crtc_enable(struct intel_atomic_state *state, 5884 struct intel_crtc *crtc) 5885 { 5886 const struct intel_crtc_state *new_crtc_state = 5887 intel_atomic_get_new_crtc_state(state, crtc); 5888 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5889 enum pipe pipe = crtc->pipe; 5890 5891 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 5892 return; 5893 5894 if (intel_crtc_has_dp_encoder(new_crtc_state)) 5895 intel_dp_set_m_n(new_crtc_state, M1_N1); 5896 5897 intel_set_transcoder_timings(new_crtc_state); 5898 intel_set_pipe_src_size(new_crtc_state); 5899 5900 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 5901 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); 5902 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); 5903 } 5904 5905 i9xx_set_pipeconf(new_crtc_state); 5906 5907 crtc->active = true; 5908 5909 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5910 5911 intel_encoders_pre_pll_enable(state, crtc); 5912 5913 if (IS_CHERRYVIEW(dev_priv)) { 5914 chv_prepare_pll(crtc, new_crtc_state); 5915 chv_enable_pll(crtc, new_crtc_state); 5916 } else { 5917 vlv_prepare_pll(crtc, new_crtc_state); 5918 vlv_enable_pll(crtc, new_crtc_state); 5919 } 5920 5921 intel_encoders_pre_enable(state, crtc); 5922 5923 i9xx_pfit_enable(new_crtc_state); 5924 5925 intel_color_load_luts(new_crtc_state); 5926 intel_color_commit(new_crtc_state); 5927 /* update DSPCNTR to configure gamma for pipe bottom color */ 5928 intel_disable_primary_plane(new_crtc_state); 5929 5930 dev_priv->display.initial_watermarks(state, crtc); 5931 intel_enable_pipe(new_crtc_state); 5932 5933 intel_crtc_vblank_on(new_crtc_state); 5934 5935 intel_encoders_enable(state, crtc); 5936 } 5937 5938 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) 5939 { 5940 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5941 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5942 5943 intel_de_write(dev_priv, FP0(crtc->pipe), 5944 crtc_state->dpll_hw_state.fp0); 5945 intel_de_write(dev_priv, FP1(crtc->pipe), 5946 crtc_state->dpll_hw_state.fp1); 5947 } 5948 5949 static void i9xx_crtc_enable(struct intel_atomic_state *state, 5950 struct intel_crtc *crtc) 5951 { 5952 const struct intel_crtc_state *new_crtc_state = 5953 intel_atomic_get_new_crtc_state(state, crtc); 5954 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5955 enum pipe pipe = crtc->pipe; 5956 5957 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 5958 return; 5959 5960 i9xx_set_pll_dividers(new_crtc_state); 5961 5962 if (intel_crtc_has_dp_encoder(new_crtc_state)) 5963 intel_dp_set_m_n(new_crtc_state, M1_N1); 5964 5965 intel_set_transcoder_timings(new_crtc_state); 5966 intel_set_pipe_src_size(new_crtc_state); 5967 5968 i9xx_set_pipeconf(new_crtc_state); 5969 5970 crtc->active = true; 5971 5972 if (!IS_GEN(dev_priv, 2)) 5973 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5974 5975 intel_encoders_pre_enable(state, crtc); 5976 5977 i9xx_enable_pll(crtc, new_crtc_state); 5978 5979 i9xx_pfit_enable(new_crtc_state); 5980 5981 intel_color_load_luts(new_crtc_state); 5982 intel_color_commit(new_crtc_state); 5983 /* update DSPCNTR to configure gamma for pipe bottom color */ 5984 intel_disable_primary_plane(new_crtc_state); 5985 5986 if (dev_priv->display.initial_watermarks) 5987 dev_priv->display.initial_watermarks(state, crtc); 5988 else 5989 intel_update_watermarks(crtc); 5990 intel_enable_pipe(new_crtc_state); 5991 5992 intel_crtc_vblank_on(new_crtc_state); 5993 5994 intel_encoders_enable(state, crtc); 5995 5996 /* prevents spurious underruns */ 5997 if (IS_GEN(dev_priv, 2)) 5998 intel_wait_for_vblank(dev_priv, pipe); 5999 } 6000 6001 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 6002 { 6003 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 6004 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6005 6006 if (!old_crtc_state->gmch_pfit.control) 6007 return; 6008 6009 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 6010 6011 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 6012 intel_de_read(dev_priv, PFIT_CONTROL)); 6013 intel_de_write(dev_priv, PFIT_CONTROL, 0); 6014 } 6015 6016 static void i9xx_crtc_disable(struct intel_atomic_state *state, 6017 struct intel_crtc *crtc) 6018 { 6019 struct intel_crtc_state *old_crtc_state = 6020 intel_atomic_get_old_crtc_state(state, crtc); 6021 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6022 enum pipe pipe = crtc->pipe; 6023 6024 /* 6025 * On gen2 planes are double buffered but the pipe isn't, so we must 6026 * wait for planes to fully turn off before disabling the pipe. 6027 */ 6028 if (IS_GEN(dev_priv, 2)) 6029 intel_wait_for_vblank(dev_priv, pipe); 6030 6031 intel_encoders_disable(state, crtc); 6032 6033 intel_crtc_vblank_off(old_crtc_state); 6034 6035 intel_disable_pipe(old_crtc_state); 6036 6037 i9xx_pfit_disable(old_crtc_state); 6038 6039 intel_encoders_post_disable(state, crtc); 6040 6041 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 6042 if (IS_CHERRYVIEW(dev_priv)) 6043 chv_disable_pll(dev_priv, pipe); 6044 else if (IS_VALLEYVIEW(dev_priv)) 6045 vlv_disable_pll(dev_priv, pipe); 6046 else 6047 i9xx_disable_pll(old_crtc_state); 6048 } 6049 6050 intel_encoders_post_pll_disable(state, crtc); 6051 6052 if (!IS_GEN(dev_priv, 2)) 6053 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6054 6055 if (!dev_priv->display.initial_watermarks) 6056 intel_update_watermarks(crtc); 6057 6058 /* clock the pipe down to 640x480@60 to potentially save power */ 6059 if (IS_I830(dev_priv)) 6060 i830_enable_pipe(dev_priv, pipe); 6061 } 6062 6063 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, 6064 struct drm_modeset_acquire_ctx *ctx) 6065 { 6066 struct intel_encoder *encoder; 6067 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6068 struct intel_bw_state *bw_state = 6069 to_intel_bw_state(dev_priv->bw_obj.state); 6070 struct intel_cdclk_state *cdclk_state = 6071 to_intel_cdclk_state(dev_priv->cdclk.obj.state); 6072 struct intel_dbuf_state *dbuf_state = 6073 to_intel_dbuf_state(dev_priv->dbuf.obj.state); 6074 struct intel_crtc_state *crtc_state = 6075 to_intel_crtc_state(crtc->base.state); 6076 struct intel_plane *plane; 6077 struct drm_atomic_state *state; 6078 struct intel_crtc_state *temp_crtc_state; 6079 enum pipe pipe = crtc->pipe; 6080 int ret; 6081 6082 if (!crtc_state->hw.active) 6083 return; 6084 6085 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 6086 const struct intel_plane_state *plane_state = 6087 to_intel_plane_state(plane->base.state); 6088 6089 if (plane_state->uapi.visible) 6090 intel_plane_disable_noatomic(crtc, plane); 6091 } 6092 6093 state = drm_atomic_state_alloc(&dev_priv->drm); 6094 if (!state) { 6095 drm_dbg_kms(&dev_priv->drm, 6096 "failed to disable [CRTC:%d:%s], out of memory", 6097 crtc->base.base.id, crtc->base.name); 6098 return; 6099 } 6100 6101 state->acquire_ctx = ctx; 6102 6103 /* Everything's already locked, -EDEADLK can't happen. */ 6104 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); 6105 ret = drm_atomic_add_affected_connectors(state, &crtc->base); 6106 6107 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret); 6108 6109 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc); 6110 6111 drm_atomic_state_put(state); 6112 6113 drm_dbg_kms(&dev_priv->drm, 6114 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 6115 crtc->base.base.id, crtc->base.name); 6116 6117 crtc->active = false; 6118 crtc->base.enabled = false; 6119 6120 drm_WARN_ON(&dev_priv->drm, 6121 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); 6122 crtc_state->uapi.active = false; 6123 crtc_state->uapi.connector_mask = 0; 6124 crtc_state->uapi.encoder_mask = 0; 6125 intel_crtc_free_hw_state(crtc_state); 6126 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw)); 6127 6128 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder) 6129 encoder->base.crtc = NULL; 6130 6131 intel_fbc_disable(crtc); 6132 intel_update_watermarks(crtc); 6133 intel_disable_shared_dpll(crtc_state); 6134 6135 intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains); 6136 6137 dev_priv->active_pipes &= ~BIT(pipe); 6138 cdclk_state->min_cdclk[pipe] = 0; 6139 cdclk_state->min_voltage_level[pipe] = 0; 6140 cdclk_state->active_pipes &= ~BIT(pipe); 6141 6142 dbuf_state->active_pipes &= ~BIT(pipe); 6143 6144 bw_state->data_rate[pipe] = 0; 6145 bw_state->num_active_planes[pipe] = 0; 6146 } 6147 6148 /* 6149 * turn all crtc's off, but do not adjust state 6150 * This has to be paired with a call to intel_modeset_setup_hw_state. 6151 */ 6152 int intel_display_suspend(struct drm_device *dev) 6153 { 6154 struct drm_i915_private *dev_priv = to_i915(dev); 6155 struct drm_atomic_state *state; 6156 int ret; 6157 6158 state = drm_atomic_helper_suspend(dev); 6159 ret = PTR_ERR_OR_ZERO(state); 6160 if (ret) 6161 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 6162 ret); 6163 else 6164 dev_priv->modeset_restore_state = state; 6165 return ret; 6166 } 6167 6168 void intel_encoder_destroy(struct drm_encoder *encoder) 6169 { 6170 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6171 6172 drm_encoder_cleanup(encoder); 6173 kfree(intel_encoder); 6174 } 6175 6176 /* Cross check the actual hw state with our own modeset state tracking (and it's 6177 * internal consistency). */ 6178 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 6179 struct drm_connector_state *conn_state) 6180 { 6181 struct intel_connector *connector = to_intel_connector(conn_state->connector); 6182 struct drm_i915_private *i915 = to_i915(connector->base.dev); 6183 6184 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", 6185 connector->base.base.id, connector->base.name); 6186 6187 if (connector->get_hw_state(connector)) { 6188 struct intel_encoder *encoder = intel_attached_encoder(connector); 6189 6190 I915_STATE_WARN(!crtc_state, 6191 "connector enabled without attached crtc\n"); 6192 6193 if (!crtc_state) 6194 return; 6195 6196 I915_STATE_WARN(!crtc_state->hw.active, 6197 "connector is active, but attached crtc isn't\n"); 6198 6199 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 6200 return; 6201 6202 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 6203 "atomic encoder doesn't match attached encoder\n"); 6204 6205 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 6206 "attached encoder crtc differs from connector crtc\n"); 6207 } else { 6208 I915_STATE_WARN(crtc_state && crtc_state->hw.active, 6209 "attached crtc is active, but connector isn't\n"); 6210 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 6211 "best encoder set without crtc!\n"); 6212 } 6213 } 6214 6215 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) 6216 { 6217 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6218 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6219 6220 /* IPS only exists on ULT machines and is tied to pipe A. */ 6221 if (!hsw_crtc_supports_ips(crtc)) 6222 return false; 6223 6224 if (!dev_priv->params.enable_ips) 6225 return false; 6226 6227 if (crtc_state->pipe_bpp > 24) 6228 return false; 6229 6230 /* 6231 * We compare against max which means we must take 6232 * the increased cdclk requirement into account when 6233 * calculating the new cdclk. 6234 * 6235 * Should measure whether using a lower cdclk w/o IPS 6236 */ 6237 if (IS_BROADWELL(dev_priv) && 6238 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) 6239 return false; 6240 6241 return true; 6242 } 6243 6244 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state) 6245 { 6246 struct drm_i915_private *dev_priv = 6247 to_i915(crtc_state->uapi.crtc->dev); 6248 struct intel_atomic_state *state = 6249 to_intel_atomic_state(crtc_state->uapi.state); 6250 6251 crtc_state->ips_enabled = false; 6252 6253 if (!hsw_crtc_state_ips_capable(crtc_state)) 6254 return 0; 6255 6256 /* 6257 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 6258 * enabled and disabled dynamically based on package C states, 6259 * user space can't make reliable use of the CRCs, so let's just 6260 * completely disable it. 6261 */ 6262 if (crtc_state->crc_enabled) 6263 return 0; 6264 6265 /* IPS should be fine as long as at least one plane is enabled. */ 6266 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) 6267 return 0; 6268 6269 if (IS_BROADWELL(dev_priv)) { 6270 const struct intel_cdclk_state *cdclk_state; 6271 6272 cdclk_state = intel_atomic_get_cdclk_state(state); 6273 if (IS_ERR(cdclk_state)) 6274 return PTR_ERR(cdclk_state); 6275 6276 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 6277 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100) 6278 return 0; 6279 } 6280 6281 crtc_state->ips_enabled = true; 6282 6283 return 0; 6284 } 6285 6286 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 6287 { 6288 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6289 6290 /* GDG double wide on either pipe, otherwise pipe A only */ 6291 return INTEL_GEN(dev_priv) < 4 && 6292 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6293 } 6294 6295 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 6296 { 6297 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 6298 unsigned int pipe_w, pipe_h, pfit_w, pfit_h; 6299 6300 /* 6301 * We only use IF-ID interlacing. If we ever use 6302 * PF-ID we'll need to adjust the pixel_rate here. 6303 */ 6304 6305 if (!crtc_state->pch_pfit.enabled) 6306 return pixel_rate; 6307 6308 pipe_w = crtc_state->pipe_src_w; 6309 pipe_h = crtc_state->pipe_src_h; 6310 6311 pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst); 6312 pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst); 6313 6314 if (pipe_w < pfit_w) 6315 pipe_w = pfit_w; 6316 if (pipe_h < pfit_h) 6317 pipe_h = pfit_h; 6318 6319 if (drm_WARN_ON(crtc_state->uapi.crtc->dev, 6320 !pfit_w || !pfit_h)) 6321 return pixel_rate; 6322 6323 return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), 6324 pfit_w * pfit_h); 6325 } 6326 6327 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 6328 const struct drm_display_mode *timings) 6329 { 6330 mode->hdisplay = timings->crtc_hdisplay; 6331 mode->htotal = timings->crtc_htotal; 6332 mode->hsync_start = timings->crtc_hsync_start; 6333 mode->hsync_end = timings->crtc_hsync_end; 6334 6335 mode->vdisplay = timings->crtc_vdisplay; 6336 mode->vtotal = timings->crtc_vtotal; 6337 mode->vsync_start = timings->crtc_vsync_start; 6338 mode->vsync_end = timings->crtc_vsync_end; 6339 6340 mode->flags = timings->flags; 6341 mode->type = DRM_MODE_TYPE_DRIVER; 6342 6343 mode->clock = timings->crtc_clock; 6344 6345 drm_mode_set_name(mode); 6346 } 6347 6348 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 6349 { 6350 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6351 6352 if (HAS_GMCH(dev_priv)) 6353 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 6354 crtc_state->pixel_rate = 6355 crtc_state->hw.pipe_mode.crtc_clock; 6356 else 6357 crtc_state->pixel_rate = 6358 ilk_pipe_pixel_rate(crtc_state); 6359 } 6360 6361 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 6362 { 6363 struct drm_display_mode *mode = &crtc_state->hw.mode; 6364 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 6365 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 6366 6367 drm_mode_copy(pipe_mode, adjusted_mode); 6368 6369 if (crtc_state->bigjoiner) { 6370 /* 6371 * transcoder is programmed to the full mode, 6372 * but pipe timings are half of the transcoder mode 6373 */ 6374 pipe_mode->crtc_hdisplay /= 2; 6375 pipe_mode->crtc_hblank_start /= 2; 6376 pipe_mode->crtc_hblank_end /= 2; 6377 pipe_mode->crtc_hsync_start /= 2; 6378 pipe_mode->crtc_hsync_end /= 2; 6379 pipe_mode->crtc_htotal /= 2; 6380 pipe_mode->crtc_clock /= 2; 6381 } 6382 6383 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 6384 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode); 6385 6386 intel_crtc_compute_pixel_rate(crtc_state); 6387 6388 drm_mode_copy(mode, adjusted_mode); 6389 mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner; 6390 mode->vdisplay = crtc_state->pipe_src_h; 6391 } 6392 6393 static void intel_encoder_get_config(struct intel_encoder *encoder, 6394 struct intel_crtc_state *crtc_state) 6395 { 6396 encoder->get_config(encoder, crtc_state); 6397 6398 intel_crtc_readout_derived_state(crtc_state); 6399 } 6400 6401 static int intel_crtc_compute_config(struct intel_crtc *crtc, 6402 struct intel_crtc_state *pipe_config) 6403 { 6404 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6405 struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode; 6406 int clock_limit = dev_priv->max_dotclk_freq; 6407 6408 drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode); 6409 6410 /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */ 6411 if (pipe_config->bigjoiner) { 6412 pipe_mode->crtc_clock /= 2; 6413 pipe_mode->crtc_hdisplay /= 2; 6414 pipe_mode->crtc_hblank_start /= 2; 6415 pipe_mode->crtc_hblank_end /= 2; 6416 pipe_mode->crtc_hsync_start /= 2; 6417 pipe_mode->crtc_hsync_end /= 2; 6418 pipe_mode->crtc_htotal /= 2; 6419 pipe_config->pipe_src_w /= 2; 6420 } 6421 6422 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 6423 6424 if (INTEL_GEN(dev_priv) < 4) { 6425 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6426 6427 /* 6428 * Enable double wide mode when the dot clock 6429 * is > 90% of the (display) core speed. 6430 */ 6431 if (intel_crtc_supports_double_wide(crtc) && 6432 pipe_mode->crtc_clock > clock_limit) { 6433 clock_limit = dev_priv->max_dotclk_freq; 6434 pipe_config->double_wide = true; 6435 } 6436 } 6437 6438 if (pipe_mode->crtc_clock > clock_limit) { 6439 drm_dbg_kms(&dev_priv->drm, 6440 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6441 pipe_mode->crtc_clock, clock_limit, 6442 yesno(pipe_config->double_wide)); 6443 return -EINVAL; 6444 } 6445 6446 /* 6447 * Pipe horizontal size must be even in: 6448 * - DVO ganged mode 6449 * - LVDS dual channel mode 6450 * - Double wide pipe 6451 */ 6452 if (pipe_config->pipe_src_w & 1) { 6453 if (pipe_config->double_wide) { 6454 drm_dbg_kms(&dev_priv->drm, 6455 "Odd pipe source width not supported with double wide pipe\n"); 6456 return -EINVAL; 6457 } 6458 6459 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 6460 intel_is_dual_link_lvds(dev_priv)) { 6461 drm_dbg_kms(&dev_priv->drm, 6462 "Odd pipe source width not supported with dual link LVDS\n"); 6463 return -EINVAL; 6464 } 6465 } 6466 6467 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6468 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6469 */ 6470 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 6471 pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay) 6472 return -EINVAL; 6473 6474 intel_crtc_compute_pixel_rate(pipe_config); 6475 6476 if (pipe_config->has_pch_encoder) 6477 return ilk_fdi_compute_config(crtc, pipe_config); 6478 6479 return 0; 6480 } 6481 6482 static void 6483 intel_reduce_m_n_ratio(u32 *num, u32 *den) 6484 { 6485 while (*num > DATA_LINK_M_N_MASK || 6486 *den > DATA_LINK_M_N_MASK) { 6487 *num >>= 1; 6488 *den >>= 1; 6489 } 6490 } 6491 6492 static void compute_m_n(unsigned int m, unsigned int n, 6493 u32 *ret_m, u32 *ret_n, 6494 bool constant_n) 6495 { 6496 /* 6497 * Several DP dongles in particular seem to be fussy about 6498 * too large link M/N values. Give N value as 0x8000 that 6499 * should be acceptable by specific devices. 0x8000 is the 6500 * specified fixed N value for asynchronous clock mode, 6501 * which the devices expect also in synchronous clock mode. 6502 */ 6503 if (constant_n) 6504 *ret_n = DP_LINK_CONSTANT_N_VALUE; 6505 else 6506 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 6507 6508 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 6509 intel_reduce_m_n_ratio(ret_m, ret_n); 6510 } 6511 6512 void 6513 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 6514 int pixel_clock, int link_clock, 6515 struct intel_link_m_n *m_n, 6516 bool constant_n, bool fec_enable) 6517 { 6518 u32 data_clock = bits_per_pixel * pixel_clock; 6519 6520 if (fec_enable) 6521 data_clock = intel_dp_mode_to_fec_clock(data_clock); 6522 6523 m_n->tu = 64; 6524 compute_m_n(data_clock, 6525 link_clock * nlanes * 8, 6526 &m_n->gmch_m, &m_n->gmch_n, 6527 constant_n); 6528 6529 compute_m_n(pixel_clock, link_clock, 6530 &m_n->link_m, &m_n->link_n, 6531 constant_n); 6532 } 6533 6534 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 6535 { 6536 /* 6537 * There may be no VBT; and if the BIOS enabled SSC we can 6538 * just keep using it to avoid unnecessary flicker. Whereas if the 6539 * BIOS isn't using it, don't assume it will work even if the VBT 6540 * indicates as much. 6541 */ 6542 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 6543 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 6544 PCH_DREF_CONTROL) & 6545 DREF_SSC1_ENABLE; 6546 6547 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 6548 drm_dbg_kms(&dev_priv->drm, 6549 "SSC %s by BIOS, overriding VBT which says %s\n", 6550 enableddisabled(bios_lvds_use_ssc), 6551 enableddisabled(dev_priv->vbt.lvds_use_ssc)); 6552 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 6553 } 6554 } 6555 } 6556 6557 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 6558 pipe) 6559 { 6560 u32 reg_val; 6561 6562 /* 6563 * PLLB opamp always calibrates to max value of 0x3f, force enable it 6564 * and set it to a reasonable value instead. 6565 */ 6566 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6567 reg_val &= 0xffffff00; 6568 reg_val |= 0x00000030; 6569 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6570 6571 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6572 reg_val &= 0x00ffffff; 6573 reg_val |= 0x8c000000; 6574 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6575 6576 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6577 reg_val &= 0xffffff00; 6578 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6579 6580 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6581 reg_val &= 0x00ffffff; 6582 reg_val |= 0xb0000000; 6583 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6584 } 6585 6586 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 6587 const struct intel_link_m_n *m_n) 6588 { 6589 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6590 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6591 enum pipe pipe = crtc->pipe; 6592 6593 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe), 6594 TU_SIZE(m_n->tu) | m_n->gmch_m); 6595 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 6596 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m); 6597 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n); 6598 } 6599 6600 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 6601 enum transcoder transcoder) 6602 { 6603 if (IS_HASWELL(dev_priv)) 6604 return transcoder == TRANSCODER_EDP; 6605 6606 /* 6607 * Strictly speaking some registers are available before 6608 * gen7, but we only support DRRS on gen7+ 6609 */ 6610 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); 6611 } 6612 6613 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 6614 const struct intel_link_m_n *m_n, 6615 const struct intel_link_m_n *m2_n2) 6616 { 6617 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6618 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6619 enum pipe pipe = crtc->pipe; 6620 enum transcoder transcoder = crtc_state->cpu_transcoder; 6621 6622 if (INTEL_GEN(dev_priv) >= 5) { 6623 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder), 6624 TU_SIZE(m_n->tu) | m_n->gmch_m); 6625 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder), 6626 m_n->gmch_n); 6627 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder), 6628 m_n->link_m); 6629 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder), 6630 m_n->link_n); 6631 /* 6632 * M2_N2 registers are set only if DRRS is supported 6633 * (to make sure the registers are not unnecessarily accessed). 6634 */ 6635 if (m2_n2 && crtc_state->has_drrs && 6636 transcoder_has_m2_n2(dev_priv, transcoder)) { 6637 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder), 6638 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 6639 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder), 6640 m2_n2->gmch_n); 6641 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder), 6642 m2_n2->link_m); 6643 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder), 6644 m2_n2->link_n); 6645 } 6646 } else { 6647 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe), 6648 TU_SIZE(m_n->tu) | m_n->gmch_m); 6649 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 6650 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m); 6651 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n); 6652 } 6653 } 6654 6655 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) 6656 { 6657 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 6658 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 6659 6660 if (m_n == M1_N1) { 6661 dp_m_n = &crtc_state->dp_m_n; 6662 dp_m2_n2 = &crtc_state->dp_m2_n2; 6663 } else if (m_n == M2_N2) { 6664 6665 /* 6666 * M2_N2 registers are not supported. Hence m2_n2 divider value 6667 * needs to be programmed into M1_N1. 6668 */ 6669 dp_m_n = &crtc_state->dp_m2_n2; 6670 } else { 6671 drm_err(&i915->drm, "Unsupported divider value\n"); 6672 return; 6673 } 6674 6675 if (crtc_state->has_pch_encoder) 6676 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); 6677 else 6678 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); 6679 } 6680 6681 static void vlv_prepare_pll(struct intel_crtc *crtc, 6682 const struct intel_crtc_state *pipe_config) 6683 { 6684 struct drm_device *dev = crtc->base.dev; 6685 struct drm_i915_private *dev_priv = to_i915(dev); 6686 enum pipe pipe = crtc->pipe; 6687 u32 mdiv; 6688 u32 bestn, bestm1, bestm2, bestp1, bestp2; 6689 u32 coreclk, reg_val; 6690 6691 /* Enable Refclk */ 6692 intel_de_write(dev_priv, DPLL(pipe), 6693 pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 6694 6695 /* No need to actually set up the DPLL with DSI */ 6696 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 6697 return; 6698 6699 vlv_dpio_get(dev_priv); 6700 6701 bestn = pipe_config->dpll.n; 6702 bestm1 = pipe_config->dpll.m1; 6703 bestm2 = pipe_config->dpll.m2; 6704 bestp1 = pipe_config->dpll.p1; 6705 bestp2 = pipe_config->dpll.p2; 6706 6707 /* See eDP HDMI DPIO driver vbios notes doc */ 6708 6709 /* PLL B needs special handling */ 6710 if (pipe == PIPE_B) 6711 vlv_pllb_recal_opamp(dev_priv, pipe); 6712 6713 /* Set up Tx target for periodic Rcomp update */ 6714 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 6715 6716 /* Disable target IRef on PLL */ 6717 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 6718 reg_val &= 0x00ffffff; 6719 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 6720 6721 /* Disable fast lock */ 6722 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 6723 6724 /* Set idtafcrecal before PLL is enabled */ 6725 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 6726 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 6727 mdiv |= ((bestn << DPIO_N_SHIFT)); 6728 mdiv |= (1 << DPIO_K_SHIFT); 6729 6730 /* 6731 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 6732 * but we don't support that). 6733 * Note: don't use the DAC post divider as it seems unstable. 6734 */ 6735 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 6736 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6737 6738 mdiv |= DPIO_ENABLE_CALIBRATION; 6739 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6740 6741 /* Set HBR and RBR LPF coefficients */ 6742 if (pipe_config->port_clock == 162000 || 6743 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || 6744 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) 6745 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6746 0x009f0003); 6747 else 6748 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6749 0x00d0000f); 6750 6751 if (intel_crtc_has_dp_encoder(pipe_config)) { 6752 /* Use SSC source */ 6753 if (pipe == PIPE_A) 6754 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6755 0x0df40000); 6756 else 6757 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6758 0x0df70000); 6759 } else { /* HDMI or VGA */ 6760 /* Use bend source */ 6761 if (pipe == PIPE_A) 6762 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6763 0x0df70000); 6764 else 6765 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6766 0x0df40000); 6767 } 6768 6769 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 6770 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 6771 if (intel_crtc_has_dp_encoder(pipe_config)) 6772 coreclk |= 0x01000000; 6773 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 6774 6775 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 6776 6777 vlv_dpio_put(dev_priv); 6778 } 6779 6780 static void chv_prepare_pll(struct intel_crtc *crtc, 6781 const struct intel_crtc_state *pipe_config) 6782 { 6783 struct drm_device *dev = crtc->base.dev; 6784 struct drm_i915_private *dev_priv = to_i915(dev); 6785 enum pipe pipe = crtc->pipe; 6786 enum dpio_channel port = vlv_pipe_to_channel(pipe); 6787 u32 loopfilter, tribuf_calcntr; 6788 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 6789 u32 dpio_val; 6790 int vco; 6791 6792 /* Enable Refclk and SSC */ 6793 intel_de_write(dev_priv, DPLL(pipe), 6794 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 6795 6796 /* No need to actually set up the DPLL with DSI */ 6797 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 6798 return; 6799 6800 bestn = pipe_config->dpll.n; 6801 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 6802 bestm1 = pipe_config->dpll.m1; 6803 bestm2 = pipe_config->dpll.m2 >> 22; 6804 bestp1 = pipe_config->dpll.p1; 6805 bestp2 = pipe_config->dpll.p2; 6806 vco = pipe_config->dpll.vco; 6807 dpio_val = 0; 6808 loopfilter = 0; 6809 6810 vlv_dpio_get(dev_priv); 6811 6812 /* p1 and p2 divider */ 6813 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 6814 5 << DPIO_CHV_S1_DIV_SHIFT | 6815 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 6816 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 6817 1 << DPIO_CHV_K_DIV_SHIFT); 6818 6819 /* Feedback post-divider - m2 */ 6820 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 6821 6822 /* Feedback refclk divider - n and m1 */ 6823 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 6824 DPIO_CHV_M1_DIV_BY_2 | 6825 1 << DPIO_CHV_N_DIV_SHIFT); 6826 6827 /* M2 fraction division */ 6828 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 6829 6830 /* M2 fraction division enable */ 6831 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 6832 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 6833 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 6834 if (bestm2_frac) 6835 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 6836 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 6837 6838 /* Program digital lock detect threshold */ 6839 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 6840 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 6841 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 6842 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 6843 if (!bestm2_frac) 6844 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 6845 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 6846 6847 /* Loop filter */ 6848 if (vco == 5400000) { 6849 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 6850 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 6851 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 6852 tribuf_calcntr = 0x9; 6853 } else if (vco <= 6200000) { 6854 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 6855 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 6856 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6857 tribuf_calcntr = 0x9; 6858 } else if (vco <= 6480000) { 6859 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6860 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6861 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6862 tribuf_calcntr = 0x8; 6863 } else { 6864 /* Not supported. Apply the same limits as in the max case */ 6865 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6866 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6867 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6868 tribuf_calcntr = 0; 6869 } 6870 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 6871 6872 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 6873 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 6874 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 6875 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 6876 6877 /* AFC Recal */ 6878 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 6879 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 6880 DPIO_AFC_RECAL); 6881 6882 vlv_dpio_put(dev_priv); 6883 } 6884 6885 /** 6886 * vlv_force_pll_on - forcibly enable just the PLL 6887 * @dev_priv: i915 private structure 6888 * @pipe: pipe PLL to enable 6889 * @dpll: PLL configuration 6890 * 6891 * Enable the PLL for @pipe using the supplied @dpll config. To be used 6892 * in cases where we need the PLL enabled even when @pipe is not going to 6893 * be enabled. 6894 */ 6895 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 6896 const struct dpll *dpll) 6897 { 6898 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 6899 struct intel_crtc_state *pipe_config; 6900 6901 pipe_config = intel_crtc_state_alloc(crtc); 6902 if (!pipe_config) 6903 return -ENOMEM; 6904 6905 pipe_config->cpu_transcoder = (enum transcoder)pipe; 6906 pipe_config->pixel_multiplier = 1; 6907 pipe_config->dpll = *dpll; 6908 6909 if (IS_CHERRYVIEW(dev_priv)) { 6910 chv_compute_dpll(crtc, pipe_config); 6911 chv_prepare_pll(crtc, pipe_config); 6912 chv_enable_pll(crtc, pipe_config); 6913 } else { 6914 vlv_compute_dpll(crtc, pipe_config); 6915 vlv_prepare_pll(crtc, pipe_config); 6916 vlv_enable_pll(crtc, pipe_config); 6917 } 6918 6919 kfree(pipe_config); 6920 6921 return 0; 6922 } 6923 6924 /** 6925 * vlv_force_pll_off - forcibly disable just the PLL 6926 * @dev_priv: i915 private structure 6927 * @pipe: pipe PLL to disable 6928 * 6929 * Disable the PLL for @pipe. To be used in cases where we need 6930 * the PLL enabled even when @pipe is not going to be enabled. 6931 */ 6932 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 6933 { 6934 if (IS_CHERRYVIEW(dev_priv)) 6935 chv_disable_pll(dev_priv, pipe); 6936 else 6937 vlv_disable_pll(dev_priv, pipe); 6938 } 6939 6940 6941 6942 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 6943 { 6944 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6945 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6946 enum pipe pipe = crtc->pipe; 6947 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 6948 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 6949 u32 crtc_vtotal, crtc_vblank_end; 6950 int vsyncshift = 0; 6951 6952 /* We need to be careful not to changed the adjusted mode, for otherwise 6953 * the hw state checker will get angry at the mismatch. */ 6954 crtc_vtotal = adjusted_mode->crtc_vtotal; 6955 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 6956 6957 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 6958 /* the chip adds 2 halflines automatically */ 6959 crtc_vtotal -= 1; 6960 crtc_vblank_end -= 1; 6961 6962 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 6963 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 6964 else 6965 vsyncshift = adjusted_mode->crtc_hsync_start - 6966 adjusted_mode->crtc_htotal / 2; 6967 if (vsyncshift < 0) 6968 vsyncshift += adjusted_mode->crtc_htotal; 6969 } 6970 6971 if (INTEL_GEN(dev_priv) > 3) 6972 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder), 6973 vsyncshift); 6974 6975 intel_de_write(dev_priv, HTOTAL(cpu_transcoder), 6976 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); 6977 intel_de_write(dev_priv, HBLANK(cpu_transcoder), 6978 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); 6979 intel_de_write(dev_priv, HSYNC(cpu_transcoder), 6980 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); 6981 6982 intel_de_write(dev_priv, VTOTAL(cpu_transcoder), 6983 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16)); 6984 intel_de_write(dev_priv, VBLANK(cpu_transcoder), 6985 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16)); 6986 intel_de_write(dev_priv, VSYNC(cpu_transcoder), 6987 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); 6988 6989 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 6990 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 6991 * documented on the DDI_FUNC_CTL register description, EDP Input Select 6992 * bits. */ 6993 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 6994 (pipe == PIPE_B || pipe == PIPE_C)) 6995 intel_de_write(dev_priv, VTOTAL(pipe), 6996 intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 6997 6998 } 6999 7000 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 7001 { 7002 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7003 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7004 enum pipe pipe = crtc->pipe; 7005 7006 /* pipesrc controls the size that is scaled from, which should 7007 * always be the user's requested size. 7008 */ 7009 intel_de_write(dev_priv, PIPESRC(pipe), 7010 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1)); 7011 } 7012 7013 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 7014 { 7015 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 7016 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 7017 7018 if (IS_GEN(dev_priv, 2)) 7019 return false; 7020 7021 if (INTEL_GEN(dev_priv) >= 9 || 7022 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 7023 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 7024 else 7025 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 7026 } 7027 7028 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 7029 struct intel_crtc_state *pipe_config) 7030 { 7031 struct drm_device *dev = crtc->base.dev; 7032 struct drm_i915_private *dev_priv = to_i915(dev); 7033 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 7034 u32 tmp; 7035 7036 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder)); 7037 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 7038 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 7039 7040 if (!transcoder_is_dsi(cpu_transcoder)) { 7041 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder)); 7042 pipe_config->hw.adjusted_mode.crtc_hblank_start = 7043 (tmp & 0xffff) + 1; 7044 pipe_config->hw.adjusted_mode.crtc_hblank_end = 7045 ((tmp >> 16) & 0xffff) + 1; 7046 } 7047 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder)); 7048 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 7049 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 7050 7051 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder)); 7052 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 7053 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 7054 7055 if (!transcoder_is_dsi(cpu_transcoder)) { 7056 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder)); 7057 pipe_config->hw.adjusted_mode.crtc_vblank_start = 7058 (tmp & 0xffff) + 1; 7059 pipe_config->hw.adjusted_mode.crtc_vblank_end = 7060 ((tmp >> 16) & 0xffff) + 1; 7061 } 7062 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder)); 7063 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 7064 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 7065 7066 if (intel_pipe_is_interlaced(pipe_config)) { 7067 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 7068 pipe_config->hw.adjusted_mode.crtc_vtotal += 1; 7069 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; 7070 } 7071 } 7072 7073 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 7074 struct intel_crtc_state *pipe_config) 7075 { 7076 struct drm_device *dev = crtc->base.dev; 7077 struct drm_i915_private *dev_priv = to_i915(dev); 7078 u32 tmp; 7079 7080 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); 7081 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 7082 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 7083 } 7084 7085 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 7086 { 7087 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7088 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7089 u32 pipeconf; 7090 7091 pipeconf = 0; 7092 7093 /* we keep both pipes enabled on 830 */ 7094 if (IS_I830(dev_priv)) 7095 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; 7096 7097 if (crtc_state->double_wide) 7098 pipeconf |= PIPECONF_DOUBLE_WIDE; 7099 7100 /* only g4x and later have fancy bpc/dither controls */ 7101 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7102 IS_CHERRYVIEW(dev_priv)) { 7103 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7104 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 7105 pipeconf |= PIPECONF_DITHER_EN | 7106 PIPECONF_DITHER_TYPE_SP; 7107 7108 switch (crtc_state->pipe_bpp) { 7109 case 18: 7110 pipeconf |= PIPECONF_6BPC; 7111 break; 7112 case 24: 7113 pipeconf |= PIPECONF_8BPC; 7114 break; 7115 case 30: 7116 pipeconf |= PIPECONF_10BPC; 7117 break; 7118 default: 7119 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7120 BUG(); 7121 } 7122 } 7123 7124 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7125 if (INTEL_GEN(dev_priv) < 4 || 7126 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 7127 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7128 else 7129 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7130 } else { 7131 pipeconf |= PIPECONF_PROGRESSIVE; 7132 } 7133 7134 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7135 crtc_state->limited_color_range) 7136 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7137 7138 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 7139 7140 pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 7141 7142 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf); 7143 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe)); 7144 } 7145 7146 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 7147 { 7148 if (IS_I830(dev_priv)) 7149 return false; 7150 7151 return INTEL_GEN(dev_priv) >= 4 || 7152 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 7153 } 7154 7155 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 7156 { 7157 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7158 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7159 u32 tmp; 7160 7161 if (!i9xx_has_pfit(dev_priv)) 7162 return; 7163 7164 tmp = intel_de_read(dev_priv, PFIT_CONTROL); 7165 if (!(tmp & PFIT_ENABLE)) 7166 return; 7167 7168 /* Check whether the pfit is attached to our pipe. */ 7169 if (INTEL_GEN(dev_priv) < 4) { 7170 if (crtc->pipe != PIPE_B) 7171 return; 7172 } else { 7173 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 7174 return; 7175 } 7176 7177 crtc_state->gmch_pfit.control = tmp; 7178 crtc_state->gmch_pfit.pgm_ratios = 7179 intel_de_read(dev_priv, PFIT_PGM_RATIOS); 7180 } 7181 7182 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 7183 struct intel_crtc_state *pipe_config) 7184 { 7185 struct drm_device *dev = crtc->base.dev; 7186 struct drm_i915_private *dev_priv = to_i915(dev); 7187 enum pipe pipe = crtc->pipe; 7188 struct dpll clock; 7189 u32 mdiv; 7190 int refclk = 100000; 7191 7192 /* In case of DSI, DPLL will not be used */ 7193 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7194 return; 7195 7196 vlv_dpio_get(dev_priv); 7197 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 7198 vlv_dpio_put(dev_priv); 7199 7200 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 7201 clock.m2 = mdiv & DPIO_M2DIV_MASK; 7202 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 7203 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 7204 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 7205 7206 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 7207 } 7208 7209 static void 7210 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 7211 struct intel_initial_plane_config *plane_config) 7212 { 7213 struct drm_device *dev = crtc->base.dev; 7214 struct drm_i915_private *dev_priv = to_i915(dev); 7215 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 7216 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 7217 enum pipe pipe; 7218 u32 val, base, offset; 7219 int fourcc, pixel_format; 7220 unsigned int aligned_height; 7221 struct drm_framebuffer *fb; 7222 struct intel_framebuffer *intel_fb; 7223 7224 if (!plane->get_hw_state(plane, &pipe)) 7225 return; 7226 7227 drm_WARN_ON(dev, pipe != crtc->pipe); 7228 7229 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7230 if (!intel_fb) { 7231 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); 7232 return; 7233 } 7234 7235 fb = &intel_fb->base; 7236 7237 fb->dev = dev; 7238 7239 val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 7240 7241 if (INTEL_GEN(dev_priv) >= 4) { 7242 if (val & DISPPLANE_TILED) { 7243 plane_config->tiling = I915_TILING_X; 7244 fb->modifier = I915_FORMAT_MOD_X_TILED; 7245 } 7246 7247 if (val & DISPPLANE_ROTATE_180) 7248 plane_config->rotation = DRM_MODE_ROTATE_180; 7249 } 7250 7251 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && 7252 val & DISPPLANE_MIRROR) 7253 plane_config->rotation |= DRM_MODE_REFLECT_X; 7254 7255 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7256 fourcc = i9xx_format_to_fourcc(pixel_format); 7257 fb->format = drm_format_info(fourcc); 7258 7259 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 7260 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane)); 7261 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; 7262 } else if (INTEL_GEN(dev_priv) >= 4) { 7263 if (plane_config->tiling) 7264 offset = intel_de_read(dev_priv, 7265 DSPTILEOFF(i9xx_plane)); 7266 else 7267 offset = intel_de_read(dev_priv, 7268 DSPLINOFF(i9xx_plane)); 7269 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; 7270 } else { 7271 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); 7272 } 7273 plane_config->base = base; 7274 7275 val = intel_de_read(dev_priv, PIPESRC(pipe)); 7276 fb->width = ((val >> 16) & 0xfff) + 1; 7277 fb->height = ((val >> 0) & 0xfff) + 1; 7278 7279 val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane)); 7280 fb->pitches[0] = val & 0xffffffc0; 7281 7282 aligned_height = intel_fb_align_height(fb, 0, fb->height); 7283 7284 plane_config->size = fb->pitches[0] * aligned_height; 7285 7286 drm_dbg_kms(&dev_priv->drm, 7287 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7288 crtc->base.name, plane->base.name, fb->width, fb->height, 7289 fb->format->cpp[0] * 8, base, fb->pitches[0], 7290 plane_config->size); 7291 7292 plane_config->fb = intel_fb; 7293 } 7294 7295 static void chv_crtc_clock_get(struct intel_crtc *crtc, 7296 struct intel_crtc_state *pipe_config) 7297 { 7298 struct drm_device *dev = crtc->base.dev; 7299 struct drm_i915_private *dev_priv = to_i915(dev); 7300 enum pipe pipe = crtc->pipe; 7301 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7302 struct dpll clock; 7303 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 7304 int refclk = 100000; 7305 7306 /* In case of DSI, DPLL will not be used */ 7307 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7308 return; 7309 7310 vlv_dpio_get(dev_priv); 7311 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 7312 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7313 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7314 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7315 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7316 vlv_dpio_put(dev_priv); 7317 7318 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7319 clock.m2 = (pll_dw0 & 0xff) << 22; 7320 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 7321 clock.m2 |= pll_dw2 & 0x3fffff; 7322 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7323 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7324 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7325 7326 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 7327 } 7328 7329 static enum intel_output_format 7330 bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 7331 { 7332 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7333 u32 tmp; 7334 7335 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 7336 7337 if (tmp & PIPEMISC_YUV420_ENABLE) { 7338 /* We support 4:2:0 in full blend mode only */ 7339 drm_WARN_ON(&dev_priv->drm, 7340 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 7341 7342 return INTEL_OUTPUT_FORMAT_YCBCR420; 7343 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 7344 return INTEL_OUTPUT_FORMAT_YCBCR444; 7345 } else { 7346 return INTEL_OUTPUT_FORMAT_RGB; 7347 } 7348 } 7349 7350 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 7351 { 7352 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7353 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 7354 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7355 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 7356 u32 tmp; 7357 7358 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 7359 7360 if (tmp & DISPPLANE_GAMMA_ENABLE) 7361 crtc_state->gamma_enable = true; 7362 7363 if (!HAS_GMCH(dev_priv) && 7364 tmp & DISPPLANE_PIPE_CSC_ENABLE) 7365 crtc_state->csc_enable = true; 7366 } 7367 7368 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 7369 struct intel_crtc_state *pipe_config) 7370 { 7371 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7372 enum intel_display_power_domain power_domain; 7373 intel_wakeref_t wakeref; 7374 u32 tmp; 7375 bool ret; 7376 7377 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 7378 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 7379 if (!wakeref) 7380 return false; 7381 7382 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 7383 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7384 pipe_config->shared_dpll = NULL; 7385 7386 ret = false; 7387 7388 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 7389 if (!(tmp & PIPECONF_ENABLE)) 7390 goto out; 7391 7392 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7393 IS_CHERRYVIEW(dev_priv)) { 7394 switch (tmp & PIPECONF_BPC_MASK) { 7395 case PIPECONF_6BPC: 7396 pipe_config->pipe_bpp = 18; 7397 break; 7398 case PIPECONF_8BPC: 7399 pipe_config->pipe_bpp = 24; 7400 break; 7401 case PIPECONF_10BPC: 7402 pipe_config->pipe_bpp = 30; 7403 break; 7404 default: 7405 break; 7406 } 7407 } 7408 7409 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7410 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 7411 pipe_config->limited_color_range = true; 7412 7413 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> 7414 PIPECONF_GAMMA_MODE_SHIFT; 7415 7416 if (IS_CHERRYVIEW(dev_priv)) 7417 pipe_config->cgm_mode = intel_de_read(dev_priv, 7418 CGM_PIPE_MODE(crtc->pipe)); 7419 7420 i9xx_get_pipe_color_config(pipe_config); 7421 intel_color_get_config(pipe_config); 7422 7423 if (INTEL_GEN(dev_priv) < 4) 7424 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 7425 7426 intel_get_transcoder_timings(crtc, pipe_config); 7427 intel_get_pipe_src_size(crtc, pipe_config); 7428 7429 i9xx_get_pfit_config(pipe_config); 7430 7431 if (INTEL_GEN(dev_priv) >= 4) { 7432 /* No way to read it out on pipes B and C */ 7433 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 7434 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 7435 else 7436 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); 7437 pipe_config->pixel_multiplier = 7438 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 7439 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 7440 pipe_config->dpll_hw_state.dpll_md = tmp; 7441 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 7442 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 7443 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); 7444 pipe_config->pixel_multiplier = 7445 ((tmp & SDVO_MULTIPLIER_MASK) 7446 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 7447 } else { 7448 /* Note that on i915G/GM the pixel multiplier is in the sdvo 7449 * port and will be fixed up in the encoder->get_config 7450 * function. */ 7451 pipe_config->pixel_multiplier = 1; 7452 } 7453 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, 7454 DPLL(crtc->pipe)); 7455 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 7456 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, 7457 FP0(crtc->pipe)); 7458 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, 7459 FP1(crtc->pipe)); 7460 } else { 7461 /* Mask out read-only status bits. */ 7462 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 7463 DPLL_PORTC_READY_MASK | 7464 DPLL_PORTB_READY_MASK); 7465 } 7466 7467 if (IS_CHERRYVIEW(dev_priv)) 7468 chv_crtc_clock_get(crtc, pipe_config); 7469 else if (IS_VALLEYVIEW(dev_priv)) 7470 vlv_crtc_clock_get(crtc, pipe_config); 7471 else 7472 i9xx_crtc_clock_get(crtc, pipe_config); 7473 7474 /* 7475 * Normally the dotclock is filled in by the encoder .get_config() 7476 * but in case the pipe is enabled w/o any ports we need a sane 7477 * default. 7478 */ 7479 pipe_config->hw.adjusted_mode.crtc_clock = 7480 pipe_config->port_clock / pipe_config->pixel_multiplier; 7481 7482 ret = true; 7483 7484 out: 7485 intel_display_power_put(dev_priv, power_domain, wakeref); 7486 7487 return ret; 7488 } 7489 7490 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) 7491 { 7492 struct intel_encoder *encoder; 7493 int i; 7494 u32 val, final; 7495 bool has_lvds = false; 7496 bool has_cpu_edp = false; 7497 bool has_panel = false; 7498 bool has_ck505 = false; 7499 bool can_ssc = false; 7500 bool using_ssc_source = false; 7501 7502 /* We need to take the global config into account */ 7503 for_each_intel_encoder(&dev_priv->drm, encoder) { 7504 switch (encoder->type) { 7505 case INTEL_OUTPUT_LVDS: 7506 has_panel = true; 7507 has_lvds = true; 7508 break; 7509 case INTEL_OUTPUT_EDP: 7510 has_panel = true; 7511 if (encoder->port == PORT_A) 7512 has_cpu_edp = true; 7513 break; 7514 default: 7515 break; 7516 } 7517 } 7518 7519 if (HAS_PCH_IBX(dev_priv)) { 7520 has_ck505 = dev_priv->vbt.display_clock_mode; 7521 can_ssc = has_ck505; 7522 } else { 7523 has_ck505 = false; 7524 can_ssc = true; 7525 } 7526 7527 /* Check if any DPLLs are using the SSC source */ 7528 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 7529 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); 7530 7531 if (!(temp & DPLL_VCO_ENABLE)) 7532 continue; 7533 7534 if ((temp & PLL_REF_INPUT_MASK) == 7535 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 7536 using_ssc_source = true; 7537 break; 7538 } 7539 } 7540 7541 drm_dbg_kms(&dev_priv->drm, 7542 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 7543 has_panel, has_lvds, has_ck505, using_ssc_source); 7544 7545 /* Ironlake: try to setup display ref clock before DPLL 7546 * enabling. This is only under driver's control after 7547 * PCH B stepping, previous chipset stepping should be 7548 * ignoring this setting. 7549 */ 7550 val = intel_de_read(dev_priv, PCH_DREF_CONTROL); 7551 7552 /* As we must carefully and slowly disable/enable each source in turn, 7553 * compute the final state we want first and check if we need to 7554 * make any changes at all. 7555 */ 7556 final = val; 7557 final &= ~DREF_NONSPREAD_SOURCE_MASK; 7558 if (has_ck505) 7559 final |= DREF_NONSPREAD_CK505_ENABLE; 7560 else 7561 final |= DREF_NONSPREAD_SOURCE_ENABLE; 7562 7563 final &= ~DREF_SSC_SOURCE_MASK; 7564 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7565 final &= ~DREF_SSC1_ENABLE; 7566 7567 if (has_panel) { 7568 final |= DREF_SSC_SOURCE_ENABLE; 7569 7570 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7571 final |= DREF_SSC1_ENABLE; 7572 7573 if (has_cpu_edp) { 7574 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7575 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7576 else 7577 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7578 } else 7579 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7580 } else if (using_ssc_source) { 7581 final |= DREF_SSC_SOURCE_ENABLE; 7582 final |= DREF_SSC1_ENABLE; 7583 } 7584 7585 if (final == val) 7586 return; 7587 7588 /* Always enable nonspread source */ 7589 val &= ~DREF_NONSPREAD_SOURCE_MASK; 7590 7591 if (has_ck505) 7592 val |= DREF_NONSPREAD_CK505_ENABLE; 7593 else 7594 val |= DREF_NONSPREAD_SOURCE_ENABLE; 7595 7596 if (has_panel) { 7597 val &= ~DREF_SSC_SOURCE_MASK; 7598 val |= DREF_SSC_SOURCE_ENABLE; 7599 7600 /* SSC must be turned on before enabling the CPU output */ 7601 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7602 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); 7603 val |= DREF_SSC1_ENABLE; 7604 } else 7605 val &= ~DREF_SSC1_ENABLE; 7606 7607 /* Get SSC going before enabling the outputs */ 7608 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 7609 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 7610 udelay(200); 7611 7612 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7613 7614 /* Enable CPU source on CPU attached eDP */ 7615 if (has_cpu_edp) { 7616 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7617 drm_dbg_kms(&dev_priv->drm, 7618 "Using SSC on eDP\n"); 7619 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7620 } else 7621 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7622 } else 7623 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7624 7625 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 7626 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 7627 udelay(200); 7628 } else { 7629 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); 7630 7631 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7632 7633 /* Turn off CPU output */ 7634 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7635 7636 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 7637 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 7638 udelay(200); 7639 7640 if (!using_ssc_source) { 7641 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); 7642 7643 /* Turn off the SSC source */ 7644 val &= ~DREF_SSC_SOURCE_MASK; 7645 val |= DREF_SSC_SOURCE_DISABLE; 7646 7647 /* Turn off SSC1 */ 7648 val &= ~DREF_SSC1_ENABLE; 7649 7650 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 7651 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 7652 udelay(200); 7653 } 7654 } 7655 7656 BUG_ON(val != final); 7657 } 7658 7659 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 7660 { 7661 u32 tmp; 7662 7663 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 7664 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 7665 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 7666 7667 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & 7668 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 7669 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); 7670 7671 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 7672 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 7673 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 7674 7675 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & 7676 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 7677 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); 7678 } 7679 7680 /* WaMPhyProgramming:hsw */ 7681 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 7682 { 7683 u32 tmp; 7684 7685 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 7686 tmp &= ~(0xFF << 24); 7687 tmp |= (0x12 << 24); 7688 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 7689 7690 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 7691 tmp |= (1 << 11); 7692 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 7693 7694 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 7695 tmp |= (1 << 11); 7696 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 7697 7698 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 7699 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7700 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 7701 7702 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 7703 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7704 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 7705 7706 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 7707 tmp &= ~(7 << 13); 7708 tmp |= (5 << 13); 7709 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 7710 7711 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 7712 tmp &= ~(7 << 13); 7713 tmp |= (5 << 13); 7714 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 7715 7716 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 7717 tmp &= ~0xFF; 7718 tmp |= 0x1C; 7719 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 7720 7721 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 7722 tmp &= ~0xFF; 7723 tmp |= 0x1C; 7724 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 7725 7726 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 7727 tmp &= ~(0xFF << 16); 7728 tmp |= (0x1C << 16); 7729 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 7730 7731 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 7732 tmp &= ~(0xFF << 16); 7733 tmp |= (0x1C << 16); 7734 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 7735 7736 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 7737 tmp |= (1 << 27); 7738 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 7739 7740 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 7741 tmp |= (1 << 27); 7742 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 7743 7744 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 7745 tmp &= ~(0xF << 28); 7746 tmp |= (4 << 28); 7747 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 7748 7749 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 7750 tmp &= ~(0xF << 28); 7751 tmp |= (4 << 28); 7752 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 7753 } 7754 7755 /* Implements 3 different sequences from BSpec chapter "Display iCLK 7756 * Programming" based on the parameters passed: 7757 * - Sequence to enable CLKOUT_DP 7758 * - Sequence to enable CLKOUT_DP without spread 7759 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 7760 */ 7761 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 7762 bool with_spread, bool with_fdi) 7763 { 7764 u32 reg, tmp; 7765 7766 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, 7767 "FDI requires downspread\n")) 7768 with_spread = true; 7769 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && 7770 with_fdi, "LP PCH doesn't have FDI\n")) 7771 with_fdi = false; 7772 7773 mutex_lock(&dev_priv->sb_lock); 7774 7775 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7776 tmp &= ~SBI_SSCCTL_DISABLE; 7777 tmp |= SBI_SSCCTL_PATHALT; 7778 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7779 7780 udelay(24); 7781 7782 if (with_spread) { 7783 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7784 tmp &= ~SBI_SSCCTL_PATHALT; 7785 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7786 7787 if (with_fdi) { 7788 lpt_reset_fdi_mphy(dev_priv); 7789 lpt_program_fdi_mphy(dev_priv); 7790 } 7791 } 7792 7793 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 7794 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7795 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7796 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7797 7798 mutex_unlock(&dev_priv->sb_lock); 7799 } 7800 7801 /* Sequence to disable CLKOUT_DP */ 7802 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 7803 { 7804 u32 reg, tmp; 7805 7806 mutex_lock(&dev_priv->sb_lock); 7807 7808 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 7809 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7810 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7811 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7812 7813 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7814 if (!(tmp & SBI_SSCCTL_DISABLE)) { 7815 if (!(tmp & SBI_SSCCTL_PATHALT)) { 7816 tmp |= SBI_SSCCTL_PATHALT; 7817 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7818 udelay(32); 7819 } 7820 tmp |= SBI_SSCCTL_DISABLE; 7821 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7822 } 7823 7824 mutex_unlock(&dev_priv->sb_lock); 7825 } 7826 7827 #define BEND_IDX(steps) ((50 + (steps)) / 5) 7828 7829 static const u16 sscdivintphase[] = { 7830 [BEND_IDX( 50)] = 0x3B23, 7831 [BEND_IDX( 45)] = 0x3B23, 7832 [BEND_IDX( 40)] = 0x3C23, 7833 [BEND_IDX( 35)] = 0x3C23, 7834 [BEND_IDX( 30)] = 0x3D23, 7835 [BEND_IDX( 25)] = 0x3D23, 7836 [BEND_IDX( 20)] = 0x3E23, 7837 [BEND_IDX( 15)] = 0x3E23, 7838 [BEND_IDX( 10)] = 0x3F23, 7839 [BEND_IDX( 5)] = 0x3F23, 7840 [BEND_IDX( 0)] = 0x0025, 7841 [BEND_IDX( -5)] = 0x0025, 7842 [BEND_IDX(-10)] = 0x0125, 7843 [BEND_IDX(-15)] = 0x0125, 7844 [BEND_IDX(-20)] = 0x0225, 7845 [BEND_IDX(-25)] = 0x0225, 7846 [BEND_IDX(-30)] = 0x0325, 7847 [BEND_IDX(-35)] = 0x0325, 7848 [BEND_IDX(-40)] = 0x0425, 7849 [BEND_IDX(-45)] = 0x0425, 7850 [BEND_IDX(-50)] = 0x0525, 7851 }; 7852 7853 /* 7854 * Bend CLKOUT_DP 7855 * steps -50 to 50 inclusive, in steps of 5 7856 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 7857 * change in clock period = -(steps / 10) * 5.787 ps 7858 */ 7859 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 7860 { 7861 u32 tmp; 7862 int idx = BEND_IDX(steps); 7863 7864 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) 7865 return; 7866 7867 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) 7868 return; 7869 7870 mutex_lock(&dev_priv->sb_lock); 7871 7872 if (steps % 10 != 0) 7873 tmp = 0xAAAAAAAB; 7874 else 7875 tmp = 0x00000000; 7876 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 7877 7878 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 7879 tmp &= 0xffff0000; 7880 tmp |= sscdivintphase[idx]; 7881 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 7882 7883 mutex_unlock(&dev_priv->sb_lock); 7884 } 7885 7886 #undef BEND_IDX 7887 7888 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 7889 { 7890 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 7891 u32 ctl = intel_de_read(dev_priv, SPLL_CTL); 7892 7893 if ((ctl & SPLL_PLL_ENABLE) == 0) 7894 return false; 7895 7896 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 7897 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 7898 return true; 7899 7900 if (IS_BROADWELL(dev_priv) && 7901 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 7902 return true; 7903 7904 return false; 7905 } 7906 7907 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 7908 enum intel_dpll_id id) 7909 { 7910 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 7911 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); 7912 7913 if ((ctl & WRPLL_PLL_ENABLE) == 0) 7914 return false; 7915 7916 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 7917 return true; 7918 7919 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 7920 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 7921 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 7922 return true; 7923 7924 return false; 7925 } 7926 7927 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 7928 { 7929 struct intel_encoder *encoder; 7930 bool has_fdi = false; 7931 7932 for_each_intel_encoder(&dev_priv->drm, encoder) { 7933 switch (encoder->type) { 7934 case INTEL_OUTPUT_ANALOG: 7935 has_fdi = true; 7936 break; 7937 default: 7938 break; 7939 } 7940 } 7941 7942 /* 7943 * The BIOS may have decided to use the PCH SSC 7944 * reference so we must not disable it until the 7945 * relevant PLLs have stopped relying on it. We'll 7946 * just leave the PCH SSC reference enabled in case 7947 * any active PLL is using it. It will get disabled 7948 * after runtime suspend if we don't have FDI. 7949 * 7950 * TODO: Move the whole reference clock handling 7951 * to the modeset sequence proper so that we can 7952 * actually enable/disable/reconfigure these things 7953 * safely. To do that we need to introduce a real 7954 * clock hierarchy. That would also allow us to do 7955 * clock bending finally. 7956 */ 7957 dev_priv->pch_ssc_use = 0; 7958 7959 if (spll_uses_pch_ssc(dev_priv)) { 7960 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); 7961 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 7962 } 7963 7964 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 7965 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); 7966 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 7967 } 7968 7969 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 7970 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); 7971 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 7972 } 7973 7974 if (dev_priv->pch_ssc_use) 7975 return; 7976 7977 if (has_fdi) { 7978 lpt_bend_clkout_dp(dev_priv, 0); 7979 lpt_enable_clkout_dp(dev_priv, true, true); 7980 } else { 7981 lpt_disable_clkout_dp(dev_priv); 7982 } 7983 } 7984 7985 /* 7986 * Initialize reference clocks when the driver loads 7987 */ 7988 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 7989 { 7990 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 7991 ilk_init_pch_refclk(dev_priv); 7992 else if (HAS_PCH_LPT(dev_priv)) 7993 lpt_init_pch_refclk(dev_priv); 7994 } 7995 7996 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 7997 { 7998 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7999 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8000 enum pipe pipe = crtc->pipe; 8001 u32 val; 8002 8003 val = 0; 8004 8005 switch (crtc_state->pipe_bpp) { 8006 case 18: 8007 val |= PIPECONF_6BPC; 8008 break; 8009 case 24: 8010 val |= PIPECONF_8BPC; 8011 break; 8012 case 30: 8013 val |= PIPECONF_10BPC; 8014 break; 8015 case 36: 8016 val |= PIPECONF_12BPC; 8017 break; 8018 default: 8019 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8020 BUG(); 8021 } 8022 8023 if (crtc_state->dither) 8024 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8025 8026 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8027 val |= PIPECONF_INTERLACED_ILK; 8028 else 8029 val |= PIPECONF_PROGRESSIVE; 8030 8031 /* 8032 * This would end up with an odd purple hue over 8033 * the entire display. Make sure we don't do it. 8034 */ 8035 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 8036 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 8037 8038 if (crtc_state->limited_color_range && 8039 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8040 val |= PIPECONF_COLOR_RANGE_SELECT; 8041 8042 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 8043 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 8044 8045 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 8046 8047 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 8048 8049 intel_de_write(dev_priv, PIPECONF(pipe), val); 8050 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 8051 } 8052 8053 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state) 8054 { 8055 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8056 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8057 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8058 u32 val = 0; 8059 8060 if (IS_HASWELL(dev_priv) && crtc_state->dither) 8061 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8062 8063 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8064 val |= PIPECONF_INTERLACED_ILK; 8065 else 8066 val |= PIPECONF_PROGRESSIVE; 8067 8068 if (IS_HASWELL(dev_priv) && 8069 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 8070 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 8071 8072 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); 8073 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder)); 8074 } 8075 8076 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 8077 { 8078 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8079 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8080 u32 val = 0; 8081 8082 switch (crtc_state->pipe_bpp) { 8083 case 18: 8084 val |= PIPEMISC_DITHER_6_BPC; 8085 break; 8086 case 24: 8087 val |= PIPEMISC_DITHER_8_BPC; 8088 break; 8089 case 30: 8090 val |= PIPEMISC_DITHER_10_BPC; 8091 break; 8092 case 36: 8093 val |= PIPEMISC_DITHER_12_BPC; 8094 break; 8095 default: 8096 MISSING_CASE(crtc_state->pipe_bpp); 8097 break; 8098 } 8099 8100 if (crtc_state->dither) 8101 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8102 8103 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 8104 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 8105 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 8106 8107 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 8108 val |= PIPEMISC_YUV420_ENABLE | 8109 PIPEMISC_YUV420_MODE_FULL_BLEND; 8110 8111 if (INTEL_GEN(dev_priv) >= 11 && 8112 (crtc_state->active_planes & ~(icl_hdr_plane_mask() | 8113 BIT(PLANE_CURSOR))) == 0) 8114 val |= PIPEMISC_HDR_MODE_PRECISION; 8115 8116 if (INTEL_GEN(dev_priv) >= 12) 8117 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC; 8118 8119 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val); 8120 } 8121 8122 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 8123 { 8124 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8125 u32 tmp; 8126 8127 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 8128 8129 switch (tmp & PIPEMISC_DITHER_BPC_MASK) { 8130 case PIPEMISC_DITHER_6_BPC: 8131 return 18; 8132 case PIPEMISC_DITHER_8_BPC: 8133 return 24; 8134 case PIPEMISC_DITHER_10_BPC: 8135 return 30; 8136 case PIPEMISC_DITHER_12_BPC: 8137 return 36; 8138 default: 8139 MISSING_CASE(tmp); 8140 return 0; 8141 } 8142 } 8143 8144 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 8145 { 8146 /* 8147 * Account for spread spectrum to avoid 8148 * oversubscribing the link. Max center spread 8149 * is 2.5%; use 5% for safety's sake. 8150 */ 8151 u32 bps = target_clock * bpp * 21 / 20; 8152 return DIV_ROUND_UP(bps, link_bw * 8); 8153 } 8154 8155 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 8156 struct intel_link_m_n *m_n) 8157 { 8158 struct drm_device *dev = crtc->base.dev; 8159 struct drm_i915_private *dev_priv = to_i915(dev); 8160 enum pipe pipe = crtc->pipe; 8161 8162 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe)); 8163 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe)); 8164 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) 8165 & ~TU_SIZE_MASK; 8166 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe)); 8167 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) 8168 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8169 } 8170 8171 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 8172 enum transcoder transcoder, 8173 struct intel_link_m_n *m_n, 8174 struct intel_link_m_n *m2_n2) 8175 { 8176 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8177 enum pipe pipe = crtc->pipe; 8178 8179 if (INTEL_GEN(dev_priv) >= 5) { 8180 m_n->link_m = intel_de_read(dev_priv, 8181 PIPE_LINK_M1(transcoder)); 8182 m_n->link_n = intel_de_read(dev_priv, 8183 PIPE_LINK_N1(transcoder)); 8184 m_n->gmch_m = intel_de_read(dev_priv, 8185 PIPE_DATA_M1(transcoder)) 8186 & ~TU_SIZE_MASK; 8187 m_n->gmch_n = intel_de_read(dev_priv, 8188 PIPE_DATA_N1(transcoder)); 8189 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder)) 8190 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8191 8192 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { 8193 m2_n2->link_m = intel_de_read(dev_priv, 8194 PIPE_LINK_M2(transcoder)); 8195 m2_n2->link_n = intel_de_read(dev_priv, 8196 PIPE_LINK_N2(transcoder)); 8197 m2_n2->gmch_m = intel_de_read(dev_priv, 8198 PIPE_DATA_M2(transcoder)) 8199 & ~TU_SIZE_MASK; 8200 m2_n2->gmch_n = intel_de_read(dev_priv, 8201 PIPE_DATA_N2(transcoder)); 8202 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder)) 8203 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8204 } 8205 } else { 8206 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe)); 8207 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe)); 8208 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) 8209 & ~TU_SIZE_MASK; 8210 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe)); 8211 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) 8212 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8213 } 8214 } 8215 8216 void intel_dp_get_m_n(struct intel_crtc *crtc, 8217 struct intel_crtc_state *pipe_config) 8218 { 8219 if (pipe_config->has_pch_encoder) 8220 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 8221 else 8222 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8223 &pipe_config->dp_m_n, 8224 &pipe_config->dp_m2_n2); 8225 } 8226 8227 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, 8228 struct intel_crtc_state *pipe_config) 8229 { 8230 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8231 &pipe_config->fdi_m_n, NULL); 8232 } 8233 8234 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, 8235 u32 pos, u32 size) 8236 { 8237 drm_rect_init(&crtc_state->pch_pfit.dst, 8238 pos >> 16, pos & 0xffff, 8239 size >> 16, size & 0xffff); 8240 } 8241 8242 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) 8243 { 8244 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8245 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8246 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 8247 int id = -1; 8248 int i; 8249 8250 /* find scaler attached to this pipe */ 8251 for (i = 0; i < crtc->num_scalers; i++) { 8252 u32 ctl, pos, size; 8253 8254 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); 8255 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN) 8256 continue; 8257 8258 id = i; 8259 crtc_state->pch_pfit.enabled = true; 8260 8261 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); 8262 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); 8263 8264 ilk_get_pfit_pos_size(crtc_state, pos, size); 8265 8266 scaler_state->scalers[i].in_use = true; 8267 break; 8268 } 8269 8270 scaler_state->scaler_id = id; 8271 if (id >= 0) 8272 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 8273 else 8274 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 8275 } 8276 8277 static void 8278 skl_get_initial_plane_config(struct intel_crtc *crtc, 8279 struct intel_initial_plane_config *plane_config) 8280 { 8281 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 8282 struct drm_device *dev = crtc->base.dev; 8283 struct drm_i915_private *dev_priv = to_i915(dev); 8284 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 8285 enum plane_id plane_id = plane->id; 8286 enum pipe pipe; 8287 u32 val, base, offset, stride_mult, tiling, alpha; 8288 int fourcc, pixel_format; 8289 unsigned int aligned_height; 8290 struct drm_framebuffer *fb; 8291 struct intel_framebuffer *intel_fb; 8292 8293 if (!plane->get_hw_state(plane, &pipe)) 8294 return; 8295 8296 drm_WARN_ON(dev, pipe != crtc->pipe); 8297 8298 if (crtc_state->bigjoiner) { 8299 drm_dbg_kms(&dev_priv->drm, 8300 "Unsupported bigjoiner configuration for initial FB\n"); 8301 return; 8302 } 8303 8304 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8305 if (!intel_fb) { 8306 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); 8307 return; 8308 } 8309 8310 fb = &intel_fb->base; 8311 8312 fb->dev = dev; 8313 8314 val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id)); 8315 8316 if (INTEL_GEN(dev_priv) >= 11) 8317 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; 8318 else 8319 pixel_format = val & PLANE_CTL_FORMAT_MASK; 8320 8321 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 8322 alpha = intel_de_read(dev_priv, 8323 PLANE_COLOR_CTL(pipe, plane_id)); 8324 alpha &= PLANE_COLOR_ALPHA_MASK; 8325 } else { 8326 alpha = val & PLANE_CTL_ALPHA_MASK; 8327 } 8328 8329 fourcc = skl_format_to_fourcc(pixel_format, 8330 val & PLANE_CTL_ORDER_RGBX, alpha); 8331 fb->format = drm_format_info(fourcc); 8332 8333 tiling = val & PLANE_CTL_TILED_MASK; 8334 switch (tiling) { 8335 case PLANE_CTL_TILED_LINEAR: 8336 fb->modifier = DRM_FORMAT_MOD_LINEAR; 8337 break; 8338 case PLANE_CTL_TILED_X: 8339 plane_config->tiling = I915_TILING_X; 8340 fb->modifier = I915_FORMAT_MOD_X_TILED; 8341 break; 8342 case PLANE_CTL_TILED_Y: 8343 plane_config->tiling = I915_TILING_Y; 8344 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 8345 fb->modifier = INTEL_GEN(dev_priv) >= 12 ? 8346 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : 8347 I915_FORMAT_MOD_Y_TILED_CCS; 8348 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) 8349 fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 8350 else 8351 fb->modifier = I915_FORMAT_MOD_Y_TILED; 8352 break; 8353 case PLANE_CTL_TILED_YF: 8354 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 8355 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 8356 else 8357 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 8358 break; 8359 default: 8360 MISSING_CASE(tiling); 8361 goto error; 8362 } 8363 8364 /* 8365 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 8366 * while i915 HW rotation is clockwise, thats why this swapping. 8367 */ 8368 switch (val & PLANE_CTL_ROTATE_MASK) { 8369 case PLANE_CTL_ROTATE_0: 8370 plane_config->rotation = DRM_MODE_ROTATE_0; 8371 break; 8372 case PLANE_CTL_ROTATE_90: 8373 plane_config->rotation = DRM_MODE_ROTATE_270; 8374 break; 8375 case PLANE_CTL_ROTATE_180: 8376 plane_config->rotation = DRM_MODE_ROTATE_180; 8377 break; 8378 case PLANE_CTL_ROTATE_270: 8379 plane_config->rotation = DRM_MODE_ROTATE_90; 8380 break; 8381 } 8382 8383 if (INTEL_GEN(dev_priv) >= 10 && 8384 val & PLANE_CTL_FLIP_HORIZONTAL) 8385 plane_config->rotation |= DRM_MODE_REFLECT_X; 8386 8387 /* 90/270 degree rotation would require extra work */ 8388 if (drm_rotation_90_or_270(plane_config->rotation)) 8389 goto error; 8390 8391 base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000; 8392 plane_config->base = base; 8393 8394 offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); 8395 8396 val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); 8397 fb->height = ((val >> 16) & 0xffff) + 1; 8398 fb->width = ((val >> 0) & 0xffff) + 1; 8399 8400 val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); 8401 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); 8402 fb->pitches[0] = (val & 0x3ff) * stride_mult; 8403 8404 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8405 8406 plane_config->size = fb->pitches[0] * aligned_height; 8407 8408 drm_dbg_kms(&dev_priv->drm, 8409 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8410 crtc->base.name, plane->base.name, fb->width, fb->height, 8411 fb->format->cpp[0] * 8, base, fb->pitches[0], 8412 plane_config->size); 8413 8414 plane_config->fb = intel_fb; 8415 return; 8416 8417 error: 8418 kfree(intel_fb); 8419 } 8420 8421 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 8422 { 8423 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8424 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8425 u32 ctl, pos, size; 8426 8427 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 8428 if ((ctl & PF_ENABLE) == 0) 8429 return; 8430 8431 crtc_state->pch_pfit.enabled = true; 8432 8433 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 8434 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 8435 8436 ilk_get_pfit_pos_size(crtc_state, pos, size); 8437 8438 /* 8439 * We currently do not free assignements of panel fitters on 8440 * ivb/hsw (since we don't use the higher upscaling modes which 8441 * differentiates them) so just WARN about this case for now. 8442 */ 8443 drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) && 8444 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); 8445 } 8446 8447 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 8448 struct intel_crtc_state *pipe_config) 8449 { 8450 struct drm_device *dev = crtc->base.dev; 8451 struct drm_i915_private *dev_priv = to_i915(dev); 8452 enum intel_display_power_domain power_domain; 8453 intel_wakeref_t wakeref; 8454 u32 tmp; 8455 bool ret; 8456 8457 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8458 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 8459 if (!wakeref) 8460 return false; 8461 8462 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8463 pipe_config->shared_dpll = NULL; 8464 8465 ret = false; 8466 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 8467 if (!(tmp & PIPECONF_ENABLE)) 8468 goto out; 8469 8470 switch (tmp & PIPECONF_BPC_MASK) { 8471 case PIPECONF_6BPC: 8472 pipe_config->pipe_bpp = 18; 8473 break; 8474 case PIPECONF_8BPC: 8475 pipe_config->pipe_bpp = 24; 8476 break; 8477 case PIPECONF_10BPC: 8478 pipe_config->pipe_bpp = 30; 8479 break; 8480 case PIPECONF_12BPC: 8481 pipe_config->pipe_bpp = 36; 8482 break; 8483 default: 8484 break; 8485 } 8486 8487 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 8488 pipe_config->limited_color_range = true; 8489 8490 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 8491 case PIPECONF_OUTPUT_COLORSPACE_YUV601: 8492 case PIPECONF_OUTPUT_COLORSPACE_YUV709: 8493 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 8494 break; 8495 default: 8496 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 8497 break; 8498 } 8499 8500 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> 8501 PIPECONF_GAMMA_MODE_SHIFT; 8502 8503 pipe_config->csc_mode = intel_de_read(dev_priv, 8504 PIPE_CSC_MODE(crtc->pipe)); 8505 8506 i9xx_get_pipe_color_config(pipe_config); 8507 intel_color_get_config(pipe_config); 8508 8509 if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 8510 struct intel_shared_dpll *pll; 8511 enum intel_dpll_id pll_id; 8512 bool pll_active; 8513 8514 pipe_config->has_pch_encoder = true; 8515 8516 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe)); 8517 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 8518 FDI_DP_PORT_WIDTH_SHIFT) + 1; 8519 8520 ilk_get_fdi_m_n_config(crtc, pipe_config); 8521 8522 if (HAS_PCH_IBX(dev_priv)) { 8523 /* 8524 * The pipe->pch transcoder and pch transcoder->pll 8525 * mapping is fixed. 8526 */ 8527 pll_id = (enum intel_dpll_id) crtc->pipe; 8528 } else { 8529 tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); 8530 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 8531 pll_id = DPLL_ID_PCH_PLL_B; 8532 else 8533 pll_id= DPLL_ID_PCH_PLL_A; 8534 } 8535 8536 pipe_config->shared_dpll = 8537 intel_get_shared_dpll_by_id(dev_priv, pll_id); 8538 pll = pipe_config->shared_dpll; 8539 8540 pll_active = intel_dpll_get_hw_state(dev_priv, pll, 8541 &pipe_config->dpll_hw_state); 8542 drm_WARN_ON(dev, !pll_active); 8543 8544 tmp = pipe_config->dpll_hw_state.dpll; 8545 pipe_config->pixel_multiplier = 8546 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 8547 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 8548 8549 ilk_pch_clock_get(crtc, pipe_config); 8550 } else { 8551 pipe_config->pixel_multiplier = 1; 8552 } 8553 8554 intel_get_transcoder_timings(crtc, pipe_config); 8555 intel_get_pipe_src_size(crtc, pipe_config); 8556 8557 ilk_get_pfit_config(pipe_config); 8558 8559 ret = true; 8560 8561 out: 8562 intel_display_power_put(dev_priv, power_domain, wakeref); 8563 8564 return ret; 8565 } 8566 8567 static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 8568 struct intel_crtc_state *pipe_config) 8569 { 8570 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; 8571 enum phy phy = intel_port_to_phy(dev_priv, port); 8572 struct icl_port_dpll *port_dpll; 8573 struct intel_shared_dpll *pll; 8574 enum intel_dpll_id id; 8575 bool pll_active; 8576 u32 clk_sel; 8577 8578 clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 8579 id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy); 8580 8581 if (WARN_ON(id > DPLL_ID_DG1_DPLL3)) 8582 return; 8583 8584 pll = intel_get_shared_dpll_by_id(dev_priv, id); 8585 port_dpll = &pipe_config->icl_port_dplls[port_dpll_id]; 8586 8587 port_dpll->pll = pll; 8588 pll_active = intel_dpll_get_hw_state(dev_priv, pll, 8589 &port_dpll->hw_state); 8590 drm_WARN_ON(&dev_priv->drm, !pll_active); 8591 8592 icl_set_active_port_dpll(pipe_config, port_dpll_id); 8593 } 8594 8595 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 8596 struct intel_crtc_state *pipe_config) 8597 { 8598 enum phy phy = intel_port_to_phy(dev_priv, port); 8599 enum icl_port_dpll_id port_dpll_id; 8600 struct icl_port_dpll *port_dpll; 8601 struct intel_shared_dpll *pll; 8602 enum intel_dpll_id id; 8603 bool pll_active; 8604 u32 temp; 8605 8606 if (intel_phy_is_combo(dev_priv, phy)) { 8607 u32 mask, shift; 8608 8609 if (IS_ROCKETLAKE(dev_priv)) { 8610 mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 8611 shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 8612 } else { 8613 mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 8614 shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 8615 } 8616 8617 temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask; 8618 id = temp >> shift; 8619 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 8620 } else if (intel_phy_is_tc(dev_priv, phy)) { 8621 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; 8622 8623 if (clk_sel == DDI_CLK_SEL_MG) { 8624 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 8625 port)); 8626 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 8627 } else { 8628 drm_WARN_ON(&dev_priv->drm, 8629 clk_sel < DDI_CLK_SEL_TBT_162); 8630 id = DPLL_ID_ICL_TBTPLL; 8631 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 8632 } 8633 } else { 8634 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port); 8635 return; 8636 } 8637 8638 pll = intel_get_shared_dpll_by_id(dev_priv, id); 8639 port_dpll = &pipe_config->icl_port_dplls[port_dpll_id]; 8640 8641 port_dpll->pll = pll; 8642 pll_active = intel_dpll_get_hw_state(dev_priv, pll, 8643 &port_dpll->hw_state); 8644 drm_WARN_ON(&dev_priv->drm, !pll_active); 8645 8646 icl_set_active_port_dpll(pipe_config, port_dpll_id); 8647 } 8648 8649 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 8650 struct intel_crtc_state *pipe_config) 8651 { 8652 struct intel_shared_dpll *pll; 8653 enum intel_dpll_id id; 8654 bool pll_active; 8655 u32 temp; 8656 8657 temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 8658 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 8659 8660 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2)) 8661 return; 8662 8663 pll = intel_get_shared_dpll_by_id(dev_priv, id); 8664 8665 pipe_config->shared_dpll = pll; 8666 pll_active = intel_dpll_get_hw_state(dev_priv, pll, 8667 &pipe_config->dpll_hw_state); 8668 drm_WARN_ON(&dev_priv->drm, !pll_active); 8669 } 8670 8671 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 8672 enum port port, 8673 struct intel_crtc_state *pipe_config) 8674 { 8675 struct intel_shared_dpll *pll; 8676 enum intel_dpll_id id; 8677 bool pll_active; 8678 8679 switch (port) { 8680 case PORT_A: 8681 id = DPLL_ID_SKL_DPLL0; 8682 break; 8683 case PORT_B: 8684 id = DPLL_ID_SKL_DPLL1; 8685 break; 8686 case PORT_C: 8687 id = DPLL_ID_SKL_DPLL2; 8688 break; 8689 default: 8690 drm_err(&dev_priv->drm, "Incorrect port type\n"); 8691 return; 8692 } 8693 8694 pll = intel_get_shared_dpll_by_id(dev_priv, id); 8695 8696 pipe_config->shared_dpll = pll; 8697 pll_active = intel_dpll_get_hw_state(dev_priv, pll, 8698 &pipe_config->dpll_hw_state); 8699 drm_WARN_ON(&dev_priv->drm, !pll_active); 8700 } 8701 8702 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 8703 struct intel_crtc_state *pipe_config) 8704 { 8705 struct intel_shared_dpll *pll; 8706 enum intel_dpll_id id; 8707 bool pll_active; 8708 u32 temp; 8709 8710 temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 8711 id = temp >> (port * 3 + 1); 8712 8713 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3)) 8714 return; 8715 8716 pll = intel_get_shared_dpll_by_id(dev_priv, id); 8717 8718 pipe_config->shared_dpll = pll; 8719 pll_active = intel_dpll_get_hw_state(dev_priv, pll, 8720 &pipe_config->dpll_hw_state); 8721 drm_WARN_ON(&dev_priv->drm, !pll_active); 8722 } 8723 8724 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 8725 struct intel_crtc_state *pipe_config) 8726 { 8727 struct intel_shared_dpll *pll; 8728 enum intel_dpll_id id; 8729 u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port)); 8730 bool pll_active; 8731 8732 switch (ddi_pll_sel) { 8733 case PORT_CLK_SEL_WRPLL1: 8734 id = DPLL_ID_WRPLL1; 8735 break; 8736 case PORT_CLK_SEL_WRPLL2: 8737 id = DPLL_ID_WRPLL2; 8738 break; 8739 case PORT_CLK_SEL_SPLL: 8740 id = DPLL_ID_SPLL; 8741 break; 8742 case PORT_CLK_SEL_LCPLL_810: 8743 id = DPLL_ID_LCPLL_810; 8744 break; 8745 case PORT_CLK_SEL_LCPLL_1350: 8746 id = DPLL_ID_LCPLL_1350; 8747 break; 8748 case PORT_CLK_SEL_LCPLL_2700: 8749 id = DPLL_ID_LCPLL_2700; 8750 break; 8751 default: 8752 MISSING_CASE(ddi_pll_sel); 8753 fallthrough; 8754 case PORT_CLK_SEL_NONE: 8755 return; 8756 } 8757 8758 pll = intel_get_shared_dpll_by_id(dev_priv, id); 8759 8760 pipe_config->shared_dpll = pll; 8761 pll_active = intel_dpll_get_hw_state(dev_priv, pll, 8762 &pipe_config->dpll_hw_state); 8763 drm_WARN_ON(&dev_priv->drm, !pll_active); 8764 } 8765 8766 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 8767 struct intel_crtc_state *pipe_config, 8768 struct intel_display_power_domain_set *power_domain_set) 8769 { 8770 struct drm_device *dev = crtc->base.dev; 8771 struct drm_i915_private *dev_priv = to_i915(dev); 8772 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); 8773 unsigned long enabled_panel_transcoders = 0; 8774 enum transcoder panel_transcoder; 8775 u32 tmp; 8776 8777 if (INTEL_GEN(dev_priv) >= 11) 8778 panel_transcoder_mask |= 8779 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 8780 8781 /* 8782 * The pipe->transcoder mapping is fixed with the exception of the eDP 8783 * and DSI transcoders handled below. 8784 */ 8785 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8786 8787 /* 8788 * XXX: Do intel_display_power_get_if_enabled before reading this (for 8789 * consistency and less surprising code; it's in always on power). 8790 */ 8791 for_each_cpu_transcoder_masked(dev_priv, panel_transcoder, 8792 panel_transcoder_mask) { 8793 bool force_thru = false; 8794 enum pipe trans_pipe; 8795 8796 tmp = intel_de_read(dev_priv, 8797 TRANS_DDI_FUNC_CTL(panel_transcoder)); 8798 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 8799 continue; 8800 8801 /* 8802 * Log all enabled ones, only use the first one. 8803 * 8804 * FIXME: This won't work for two separate DSI displays. 8805 */ 8806 enabled_panel_transcoders |= BIT(panel_transcoder); 8807 if (enabled_panel_transcoders != BIT(panel_transcoder)) 8808 continue; 8809 8810 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 8811 default: 8812 drm_WARN(dev, 1, 8813 "unknown pipe linked to transcoder %s\n", 8814 transcoder_name(panel_transcoder)); 8815 fallthrough; 8816 case TRANS_DDI_EDP_INPUT_A_ONOFF: 8817 force_thru = true; 8818 fallthrough; 8819 case TRANS_DDI_EDP_INPUT_A_ON: 8820 trans_pipe = PIPE_A; 8821 break; 8822 case TRANS_DDI_EDP_INPUT_B_ONOFF: 8823 trans_pipe = PIPE_B; 8824 break; 8825 case TRANS_DDI_EDP_INPUT_C_ONOFF: 8826 trans_pipe = PIPE_C; 8827 break; 8828 case TRANS_DDI_EDP_INPUT_D_ONOFF: 8829 trans_pipe = PIPE_D; 8830 break; 8831 } 8832 8833 if (trans_pipe == crtc->pipe) { 8834 pipe_config->cpu_transcoder = panel_transcoder; 8835 pipe_config->pch_pfit.force_thru = force_thru; 8836 } 8837 } 8838 8839 /* 8840 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 8841 */ 8842 drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 8843 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 8844 8845 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 8846 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 8847 return false; 8848 8849 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); 8850 8851 return tmp & PIPECONF_ENABLE; 8852 } 8853 8854 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 8855 struct intel_crtc_state *pipe_config, 8856 struct intel_display_power_domain_set *power_domain_set) 8857 { 8858 struct drm_device *dev = crtc->base.dev; 8859 struct drm_i915_private *dev_priv = to_i915(dev); 8860 enum transcoder cpu_transcoder; 8861 enum port port; 8862 u32 tmp; 8863 8864 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 8865 if (port == PORT_A) 8866 cpu_transcoder = TRANSCODER_DSI_A; 8867 else 8868 cpu_transcoder = TRANSCODER_DSI_C; 8869 8870 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 8871 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 8872 continue; 8873 8874 /* 8875 * The PLL needs to be enabled with a valid divider 8876 * configuration, otherwise accessing DSI registers will hang 8877 * the machine. See BSpec North Display Engine 8878 * registers/MIPI[BXT]. We can break out here early, since we 8879 * need the same DSI PLL to be enabled for both DSI ports. 8880 */ 8881 if (!bxt_dsi_pll_is_enabled(dev_priv)) 8882 break; 8883 8884 /* XXX: this works for video mode only */ 8885 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); 8886 if (!(tmp & DPI_ENABLE)) 8887 continue; 8888 8889 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 8890 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 8891 continue; 8892 8893 pipe_config->cpu_transcoder = cpu_transcoder; 8894 break; 8895 } 8896 8897 return transcoder_is_dsi(pipe_config->cpu_transcoder); 8898 } 8899 8900 static void hsw_get_ddi_port_state(struct intel_crtc *crtc, 8901 struct intel_crtc_state *pipe_config) 8902 { 8903 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8904 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 8905 enum port port; 8906 u32 tmp; 8907 8908 if (transcoder_is_dsi(cpu_transcoder)) { 8909 port = (cpu_transcoder == TRANSCODER_DSI_A) ? 8910 PORT_A : PORT_B; 8911 } else { 8912 tmp = intel_de_read(dev_priv, 8913 TRANS_DDI_FUNC_CTL(cpu_transcoder)); 8914 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 8915 return; 8916 if (INTEL_GEN(dev_priv) >= 12) 8917 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 8918 else 8919 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 8920 } 8921 8922 if (IS_DG1(dev_priv)) 8923 dg1_get_ddi_pll(dev_priv, port, pipe_config); 8924 else if (INTEL_GEN(dev_priv) >= 11) 8925 icl_get_ddi_pll(dev_priv, port, pipe_config); 8926 else if (IS_CANNONLAKE(dev_priv)) 8927 cnl_get_ddi_pll(dev_priv, port, pipe_config); 8928 else if (IS_GEN9_LP(dev_priv)) 8929 bxt_get_ddi_pll(dev_priv, port, pipe_config); 8930 else if (IS_GEN9_BC(dev_priv)) 8931 skl_get_ddi_pll(dev_priv, port, pipe_config); 8932 else 8933 hsw_get_ddi_pll(dev_priv, port, pipe_config); 8934 8935 /* 8936 * Haswell has only FDI/PCH transcoder A. It is which is connected to 8937 * DDI E. So just check whether this pipe is wired to DDI E and whether 8938 * the PCH transcoder is on. 8939 */ 8940 if (INTEL_GEN(dev_priv) < 9 && 8941 (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) { 8942 pipe_config->has_pch_encoder = true; 8943 8944 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 8945 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 8946 FDI_DP_PORT_WIDTH_SHIFT) + 1; 8947 8948 ilk_get_fdi_m_n_config(crtc, pipe_config); 8949 } 8950 } 8951 8952 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 8953 struct intel_crtc_state *pipe_config) 8954 { 8955 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8956 struct intel_display_power_domain_set power_domain_set = { }; 8957 bool active; 8958 u32 tmp; 8959 8960 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set, 8961 POWER_DOMAIN_PIPE(crtc->pipe))) 8962 return false; 8963 8964 pipe_config->shared_dpll = NULL; 8965 8966 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set); 8967 8968 if (IS_GEN9_LP(dev_priv) && 8969 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) { 8970 drm_WARN_ON(&dev_priv->drm, active); 8971 active = true; 8972 } 8973 8974 intel_dsc_get_config(pipe_config); 8975 8976 if (!active) { 8977 /* bigjoiner slave doesn't enable transcoder */ 8978 if (!pipe_config->bigjoiner_slave) 8979 goto out; 8980 8981 active = true; 8982 pipe_config->pixel_multiplier = 1; 8983 8984 /* we cannot read out most state, so don't bother.. */ 8985 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE; 8986 } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 8987 INTEL_GEN(dev_priv) >= 11) { 8988 hsw_get_ddi_port_state(crtc, pipe_config); 8989 intel_get_transcoder_timings(crtc, pipe_config); 8990 } 8991 8992 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 8993 intel_vrr_get_config(crtc, pipe_config); 8994 8995 intel_get_pipe_src_size(crtc, pipe_config); 8996 8997 if (IS_HASWELL(dev_priv)) { 8998 u32 tmp = intel_de_read(dev_priv, 8999 PIPECONF(pipe_config->cpu_transcoder)); 9000 9001 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 9002 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 9003 else 9004 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 9005 } else { 9006 pipe_config->output_format = 9007 bdw_get_pipemisc_output_format(crtc); 9008 } 9009 9010 pipe_config->gamma_mode = intel_de_read(dev_priv, 9011 GAMMA_MODE(crtc->pipe)); 9012 9013 pipe_config->csc_mode = intel_de_read(dev_priv, 9014 PIPE_CSC_MODE(crtc->pipe)); 9015 9016 if (INTEL_GEN(dev_priv) >= 9) { 9017 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe)); 9018 9019 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 9020 pipe_config->gamma_enable = true; 9021 9022 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 9023 pipe_config->csc_enable = true; 9024 } else { 9025 i9xx_get_pipe_color_config(pipe_config); 9026 } 9027 9028 intel_color_get_config(pipe_config); 9029 9030 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 9031 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 9032 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 9033 pipe_config->ips_linetime = 9034 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 9035 9036 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set, 9037 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 9038 if (INTEL_GEN(dev_priv) >= 9) 9039 skl_get_pfit_config(pipe_config); 9040 else 9041 ilk_get_pfit_config(pipe_config); 9042 } 9043 9044 if (hsw_crtc_supports_ips(crtc)) { 9045 if (IS_HASWELL(dev_priv)) 9046 pipe_config->ips_enabled = intel_de_read(dev_priv, 9047 IPS_CTL) & IPS_ENABLE; 9048 else { 9049 /* 9050 * We cannot readout IPS state on broadwell, set to 9051 * true so we can set it to a defined state on first 9052 * commit. 9053 */ 9054 pipe_config->ips_enabled = true; 9055 } 9056 } 9057 9058 if (pipe_config->bigjoiner_slave) { 9059 /* Cannot be read out as a slave, set to 0. */ 9060 pipe_config->pixel_multiplier = 0; 9061 } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 9062 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 9063 pipe_config->pixel_multiplier = 9064 intel_de_read(dev_priv, 9065 PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 9066 } else { 9067 pipe_config->pixel_multiplier = 1; 9068 } 9069 9070 out: 9071 intel_display_power_put_all_in_set(dev_priv, &power_domain_set); 9072 9073 return active; 9074 } 9075 9076 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 9077 { 9078 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9079 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 9080 9081 if (!i915->display.get_pipe_config(crtc, crtc_state)) 9082 return false; 9083 9084 crtc_state->hw.active = true; 9085 9086 intel_crtc_readout_derived_state(crtc_state); 9087 9088 return true; 9089 } 9090 9091 /* VESA 640x480x72Hz mode to set on the pipe */ 9092 static const struct drm_display_mode load_detect_mode = { 9093 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 9094 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 9095 }; 9096 9097 struct drm_framebuffer * 9098 intel_framebuffer_create(struct drm_i915_gem_object *obj, 9099 struct drm_mode_fb_cmd2 *mode_cmd) 9100 { 9101 struct intel_framebuffer *intel_fb; 9102 int ret; 9103 9104 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9105 if (!intel_fb) 9106 return ERR_PTR(-ENOMEM); 9107 9108 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 9109 if (ret) 9110 goto err; 9111 9112 return &intel_fb->base; 9113 9114 err: 9115 kfree(intel_fb); 9116 return ERR_PTR(ret); 9117 } 9118 9119 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 9120 struct drm_crtc *crtc) 9121 { 9122 struct drm_plane *plane; 9123 struct drm_plane_state *plane_state; 9124 int ret, i; 9125 9126 ret = drm_atomic_add_affected_planes(state, crtc); 9127 if (ret) 9128 return ret; 9129 9130 for_each_new_plane_in_state(state, plane, plane_state, i) { 9131 if (plane_state->crtc != crtc) 9132 continue; 9133 9134 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 9135 if (ret) 9136 return ret; 9137 9138 drm_atomic_set_fb_for_plane(plane_state, NULL); 9139 } 9140 9141 return 0; 9142 } 9143 9144 int intel_get_load_detect_pipe(struct drm_connector *connector, 9145 struct intel_load_detect_pipe *old, 9146 struct drm_modeset_acquire_ctx *ctx) 9147 { 9148 struct intel_crtc *intel_crtc; 9149 struct intel_encoder *intel_encoder = 9150 intel_attached_encoder(to_intel_connector(connector)); 9151 struct drm_crtc *possible_crtc; 9152 struct drm_encoder *encoder = &intel_encoder->base; 9153 struct drm_crtc *crtc = NULL; 9154 struct drm_device *dev = encoder->dev; 9155 struct drm_i915_private *dev_priv = to_i915(dev); 9156 struct drm_mode_config *config = &dev->mode_config; 9157 struct drm_atomic_state *state = NULL, *restore_state = NULL; 9158 struct drm_connector_state *connector_state; 9159 struct intel_crtc_state *crtc_state; 9160 int ret, i = -1; 9161 9162 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9163 connector->base.id, connector->name, 9164 encoder->base.id, encoder->name); 9165 9166 old->restore_state = NULL; 9167 9168 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); 9169 9170 /* 9171 * Algorithm gets a little messy: 9172 * 9173 * - if the connector already has an assigned crtc, use it (but make 9174 * sure it's on first) 9175 * 9176 * - try to find the first unused crtc that can drive this connector, 9177 * and use that if we find one 9178 */ 9179 9180 /* See if we already have a CRTC for this connector */ 9181 if (connector->state->crtc) { 9182 crtc = connector->state->crtc; 9183 9184 ret = drm_modeset_lock(&crtc->mutex, ctx); 9185 if (ret) 9186 goto fail; 9187 9188 /* Make sure the crtc and connector are running */ 9189 goto found; 9190 } 9191 9192 /* Find an unused one (if possible) */ 9193 for_each_crtc(dev, possible_crtc) { 9194 i++; 9195 if (!(encoder->possible_crtcs & (1 << i))) 9196 continue; 9197 9198 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 9199 if (ret) 9200 goto fail; 9201 9202 if (possible_crtc->state->enable) { 9203 drm_modeset_unlock(&possible_crtc->mutex); 9204 continue; 9205 } 9206 9207 crtc = possible_crtc; 9208 break; 9209 } 9210 9211 /* 9212 * If we didn't find an unused CRTC, don't use any. 9213 */ 9214 if (!crtc) { 9215 drm_dbg_kms(&dev_priv->drm, 9216 "no pipe available for load-detect\n"); 9217 ret = -ENODEV; 9218 goto fail; 9219 } 9220 9221 found: 9222 intel_crtc = to_intel_crtc(crtc); 9223 9224 state = drm_atomic_state_alloc(dev); 9225 restore_state = drm_atomic_state_alloc(dev); 9226 if (!state || !restore_state) { 9227 ret = -ENOMEM; 9228 goto fail; 9229 } 9230 9231 state->acquire_ctx = ctx; 9232 restore_state->acquire_ctx = ctx; 9233 9234 connector_state = drm_atomic_get_connector_state(state, connector); 9235 if (IS_ERR(connector_state)) { 9236 ret = PTR_ERR(connector_state); 9237 goto fail; 9238 } 9239 9240 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 9241 if (ret) 9242 goto fail; 9243 9244 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 9245 if (IS_ERR(crtc_state)) { 9246 ret = PTR_ERR(crtc_state); 9247 goto fail; 9248 } 9249 9250 crtc_state->uapi.active = true; 9251 9252 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, 9253 &load_detect_mode); 9254 if (ret) 9255 goto fail; 9256 9257 ret = intel_modeset_disable_planes(state, crtc); 9258 if (ret) 9259 goto fail; 9260 9261 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 9262 if (!ret) 9263 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 9264 if (!ret) 9265 ret = drm_atomic_add_affected_planes(restore_state, crtc); 9266 if (ret) { 9267 drm_dbg_kms(&dev_priv->drm, 9268 "Failed to create a copy of old state to restore: %i\n", 9269 ret); 9270 goto fail; 9271 } 9272 9273 ret = drm_atomic_commit(state); 9274 if (ret) { 9275 drm_dbg_kms(&dev_priv->drm, 9276 "failed to set mode on load-detect pipe\n"); 9277 goto fail; 9278 } 9279 9280 old->restore_state = restore_state; 9281 drm_atomic_state_put(state); 9282 9283 /* let the connector get through one full cycle before testing */ 9284 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 9285 return true; 9286 9287 fail: 9288 if (state) { 9289 drm_atomic_state_put(state); 9290 state = NULL; 9291 } 9292 if (restore_state) { 9293 drm_atomic_state_put(restore_state); 9294 restore_state = NULL; 9295 } 9296 9297 if (ret == -EDEADLK) 9298 return ret; 9299 9300 return false; 9301 } 9302 9303 void intel_release_load_detect_pipe(struct drm_connector *connector, 9304 struct intel_load_detect_pipe *old, 9305 struct drm_modeset_acquire_ctx *ctx) 9306 { 9307 struct intel_encoder *intel_encoder = 9308 intel_attached_encoder(to_intel_connector(connector)); 9309 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); 9310 struct drm_encoder *encoder = &intel_encoder->base; 9311 struct drm_atomic_state *state = old->restore_state; 9312 int ret; 9313 9314 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9315 connector->base.id, connector->name, 9316 encoder->base.id, encoder->name); 9317 9318 if (!state) 9319 return; 9320 9321 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 9322 if (ret) 9323 drm_dbg_kms(&i915->drm, 9324 "Couldn't release load detect pipe: %i\n", ret); 9325 drm_atomic_state_put(state); 9326 } 9327 9328 static int i9xx_pll_refclk(struct drm_device *dev, 9329 const struct intel_crtc_state *pipe_config) 9330 { 9331 struct drm_i915_private *dev_priv = to_i915(dev); 9332 u32 dpll = pipe_config->dpll_hw_state.dpll; 9333 9334 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 9335 return dev_priv->vbt.lvds_ssc_freq; 9336 else if (HAS_PCH_SPLIT(dev_priv)) 9337 return 120000; 9338 else if (!IS_GEN(dev_priv, 2)) 9339 return 96000; 9340 else 9341 return 48000; 9342 } 9343 9344 /* Returns the clock of the currently programmed mode of the given pipe. */ 9345 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 9346 struct intel_crtc_state *pipe_config) 9347 { 9348 struct drm_device *dev = crtc->base.dev; 9349 struct drm_i915_private *dev_priv = to_i915(dev); 9350 enum pipe pipe = crtc->pipe; 9351 u32 dpll = pipe_config->dpll_hw_state.dpll; 9352 u32 fp; 9353 struct dpll clock; 9354 int port_clock; 9355 int refclk = i9xx_pll_refclk(dev, pipe_config); 9356 9357 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 9358 fp = pipe_config->dpll_hw_state.fp0; 9359 else 9360 fp = pipe_config->dpll_hw_state.fp1; 9361 9362 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 9363 if (IS_PINEVIEW(dev_priv)) { 9364 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 9365 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 9366 } else { 9367 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 9368 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 9369 } 9370 9371 if (!IS_GEN(dev_priv, 2)) { 9372 if (IS_PINEVIEW(dev_priv)) 9373 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 9374 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 9375 else 9376 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 9377 DPLL_FPA01_P1_POST_DIV_SHIFT); 9378 9379 switch (dpll & DPLL_MODE_MASK) { 9380 case DPLLB_MODE_DAC_SERIAL: 9381 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 9382 5 : 10; 9383 break; 9384 case DPLLB_MODE_LVDS: 9385 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 9386 7 : 14; 9387 break; 9388 default: 9389 drm_dbg_kms(&dev_priv->drm, 9390 "Unknown DPLL mode %08x in programmed " 9391 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 9392 return; 9393 } 9394 9395 if (IS_PINEVIEW(dev_priv)) 9396 port_clock = pnv_calc_dpll_params(refclk, &clock); 9397 else 9398 port_clock = i9xx_calc_dpll_params(refclk, &clock); 9399 } else { 9400 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv, 9401 LVDS); 9402 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 9403 9404 if (is_lvds) { 9405 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 9406 DPLL_FPA01_P1_POST_DIV_SHIFT); 9407 9408 if (lvds & LVDS_CLKB_POWER_UP) 9409 clock.p2 = 7; 9410 else 9411 clock.p2 = 14; 9412 } else { 9413 if (dpll & PLL_P1_DIVIDE_BY_TWO) 9414 clock.p1 = 2; 9415 else { 9416 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 9417 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 9418 } 9419 if (dpll & PLL_P2_DIVIDE_BY_4) 9420 clock.p2 = 4; 9421 else 9422 clock.p2 = 2; 9423 } 9424 9425 port_clock = i9xx_calc_dpll_params(refclk, &clock); 9426 } 9427 9428 /* 9429 * This value includes pixel_multiplier. We will use 9430 * port_clock to compute adjusted_mode.crtc_clock in the 9431 * encoder's get_config() function. 9432 */ 9433 pipe_config->port_clock = port_clock; 9434 } 9435 9436 int intel_dotclock_calculate(int link_freq, 9437 const struct intel_link_m_n *m_n) 9438 { 9439 /* 9440 * The calculation for the data clock is: 9441 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 9442 * But we want to avoid losing precison if possible, so: 9443 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 9444 * 9445 * and the link clock is simpler: 9446 * link_clock = (m * link_clock) / n 9447 */ 9448 9449 if (!m_n->link_n) 9450 return 0; 9451 9452 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 9453 } 9454 9455 static void ilk_pch_clock_get(struct intel_crtc *crtc, 9456 struct intel_crtc_state *pipe_config) 9457 { 9458 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9459 9460 /* read out port_clock from the DPLL */ 9461 i9xx_crtc_clock_get(crtc, pipe_config); 9462 9463 /* 9464 * In case there is an active pipe without active ports, 9465 * we may need some idea for the dotclock anyway. 9466 * Calculate one based on the FDI configuration. 9467 */ 9468 pipe_config->hw.adjusted_mode.crtc_clock = 9469 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 9470 &pipe_config->fdi_m_n); 9471 } 9472 9473 /* Returns the currently programmed mode of the given encoder. */ 9474 struct drm_display_mode * 9475 intel_encoder_current_mode(struct intel_encoder *encoder) 9476 { 9477 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 9478 struct intel_crtc_state *crtc_state; 9479 struct drm_display_mode *mode; 9480 struct intel_crtc *crtc; 9481 enum pipe pipe; 9482 9483 if (!encoder->get_hw_state(encoder, &pipe)) 9484 return NULL; 9485 9486 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 9487 9488 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 9489 if (!mode) 9490 return NULL; 9491 9492 crtc_state = intel_crtc_state_alloc(crtc); 9493 if (!crtc_state) { 9494 kfree(mode); 9495 return NULL; 9496 } 9497 9498 if (!intel_crtc_get_pipe_config(crtc_state)) { 9499 kfree(crtc_state); 9500 kfree(mode); 9501 return NULL; 9502 } 9503 9504 intel_encoder_get_config(encoder, crtc_state); 9505 9506 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 9507 9508 kfree(crtc_state); 9509 9510 return mode; 9511 } 9512 9513 /** 9514 * intel_wm_need_update - Check whether watermarks need updating 9515 * @cur: current plane state 9516 * @new: new plane state 9517 * 9518 * Check current plane state versus the new one to determine whether 9519 * watermarks need to be recalculated. 9520 * 9521 * Returns true or false. 9522 */ 9523 static bool intel_wm_need_update(const struct intel_plane_state *cur, 9524 struct intel_plane_state *new) 9525 { 9526 /* Update watermarks on tiling or size changes. */ 9527 if (new->uapi.visible != cur->uapi.visible) 9528 return true; 9529 9530 if (!cur->hw.fb || !new->hw.fb) 9531 return false; 9532 9533 if (cur->hw.fb->modifier != new->hw.fb->modifier || 9534 cur->hw.rotation != new->hw.rotation || 9535 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || 9536 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || 9537 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || 9538 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) 9539 return true; 9540 9541 return false; 9542 } 9543 9544 static bool needs_scaling(const struct intel_plane_state *state) 9545 { 9546 int src_w = drm_rect_width(&state->uapi.src) >> 16; 9547 int src_h = drm_rect_height(&state->uapi.src) >> 16; 9548 int dst_w = drm_rect_width(&state->uapi.dst); 9549 int dst_h = drm_rect_height(&state->uapi.dst); 9550 9551 return (src_w != dst_w || src_h != dst_h); 9552 } 9553 9554 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 9555 struct intel_crtc_state *crtc_state, 9556 const struct intel_plane_state *old_plane_state, 9557 struct intel_plane_state *plane_state) 9558 { 9559 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9560 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 9561 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9562 bool mode_changed = intel_crtc_needs_modeset(crtc_state); 9563 bool was_crtc_enabled = old_crtc_state->hw.active; 9564 bool is_crtc_enabled = crtc_state->hw.active; 9565 bool turn_off, turn_on, visible, was_visible; 9566 int ret; 9567 9568 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 9569 ret = skl_update_scaler_plane(crtc_state, plane_state); 9570 if (ret) 9571 return ret; 9572 } 9573 9574 was_visible = old_plane_state->uapi.visible; 9575 visible = plane_state->uapi.visible; 9576 9577 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible)) 9578 was_visible = false; 9579 9580 /* 9581 * Visibility is calculated as if the crtc was on, but 9582 * after scaler setup everything depends on it being off 9583 * when the crtc isn't active. 9584 * 9585 * FIXME this is wrong for watermarks. Watermarks should also 9586 * be computed as if the pipe would be active. Perhaps move 9587 * per-plane wm computation to the .check_plane() hook, and 9588 * only combine the results from all planes in the current place? 9589 */ 9590 if (!is_crtc_enabled) { 9591 intel_plane_set_invisible(crtc_state, plane_state); 9592 visible = false; 9593 } 9594 9595 if (!was_visible && !visible) 9596 return 0; 9597 9598 turn_off = was_visible && (!visible || mode_changed); 9599 turn_on = visible && (!was_visible || mode_changed); 9600 9601 drm_dbg_atomic(&dev_priv->drm, 9602 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 9603 crtc->base.base.id, crtc->base.name, 9604 plane->base.base.id, plane->base.name, 9605 was_visible, visible, 9606 turn_off, turn_on, mode_changed); 9607 9608 if (turn_on) { 9609 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 9610 crtc_state->update_wm_pre = true; 9611 9612 /* must disable cxsr around plane enable/disable */ 9613 if (plane->id != PLANE_CURSOR) 9614 crtc_state->disable_cxsr = true; 9615 } else if (turn_off) { 9616 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 9617 crtc_state->update_wm_post = true; 9618 9619 /* must disable cxsr around plane enable/disable */ 9620 if (plane->id != PLANE_CURSOR) 9621 crtc_state->disable_cxsr = true; 9622 } else if (intel_wm_need_update(old_plane_state, plane_state)) { 9623 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 9624 /* FIXME bollocks */ 9625 crtc_state->update_wm_pre = true; 9626 crtc_state->update_wm_post = true; 9627 } 9628 } 9629 9630 if (visible || was_visible) 9631 crtc_state->fb_bits |= plane->frontbuffer_bit; 9632 9633 /* 9634 * ILK/SNB DVSACNTR/Sprite Enable 9635 * IVB SPR_CTL/Sprite Enable 9636 * "When in Self Refresh Big FIFO mode, a write to enable the 9637 * plane will be internally buffered and delayed while Big FIFO 9638 * mode is exiting." 9639 * 9640 * Which means that enabling the sprite can take an extra frame 9641 * when we start in big FIFO mode (LP1+). Thus we need to drop 9642 * down to LP0 and wait for vblank in order to make sure the 9643 * sprite gets enabled on the next vblank after the register write. 9644 * Doing otherwise would risk enabling the sprite one frame after 9645 * we've already signalled flip completion. We can resume LP1+ 9646 * once the sprite has been enabled. 9647 * 9648 * 9649 * WaCxSRDisabledForSpriteScaling:ivb 9650 * IVB SPR_SCALE/Scaling Enable 9651 * "Low Power watermarks must be disabled for at least one 9652 * frame before enabling sprite scaling, and kept disabled 9653 * until sprite scaling is disabled." 9654 * 9655 * ILK/SNB DVSASCALE/Scaling Enable 9656 * "When in Self Refresh Big FIFO mode, scaling enable will be 9657 * masked off while Big FIFO mode is exiting." 9658 * 9659 * Despite the w/a only being listed for IVB we assume that 9660 * the ILK/SNB note has similar ramifications, hence we apply 9661 * the w/a on all three platforms. 9662 * 9663 * With experimental results seems this is needed also for primary 9664 * plane, not only sprite plane. 9665 */ 9666 if (plane->id != PLANE_CURSOR && 9667 (IS_GEN_RANGE(dev_priv, 5, 6) || 9668 IS_IVYBRIDGE(dev_priv)) && 9669 (turn_on || (!needs_scaling(old_plane_state) && 9670 needs_scaling(plane_state)))) 9671 crtc_state->disable_lp_wm = true; 9672 9673 return 0; 9674 } 9675 9676 static bool encoders_cloneable(const struct intel_encoder *a, 9677 const struct intel_encoder *b) 9678 { 9679 /* masks could be asymmetric, so check both ways */ 9680 return a == b || (a->cloneable & (1 << b->type) && 9681 b->cloneable & (1 << a->type)); 9682 } 9683 9684 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 9685 struct intel_crtc *crtc, 9686 struct intel_encoder *encoder) 9687 { 9688 struct intel_encoder *source_encoder; 9689 struct drm_connector *connector; 9690 struct drm_connector_state *connector_state; 9691 int i; 9692 9693 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 9694 if (connector_state->crtc != &crtc->base) 9695 continue; 9696 9697 source_encoder = 9698 to_intel_encoder(connector_state->best_encoder); 9699 if (!encoders_cloneable(encoder, source_encoder)) 9700 return false; 9701 } 9702 9703 return true; 9704 } 9705 9706 static int icl_add_linked_planes(struct intel_atomic_state *state) 9707 { 9708 struct intel_plane *plane, *linked; 9709 struct intel_plane_state *plane_state, *linked_plane_state; 9710 int i; 9711 9712 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 9713 linked = plane_state->planar_linked_plane; 9714 9715 if (!linked) 9716 continue; 9717 9718 linked_plane_state = intel_atomic_get_plane_state(state, linked); 9719 if (IS_ERR(linked_plane_state)) 9720 return PTR_ERR(linked_plane_state); 9721 9722 drm_WARN_ON(state->base.dev, 9723 linked_plane_state->planar_linked_plane != plane); 9724 drm_WARN_ON(state->base.dev, 9725 linked_plane_state->planar_slave == plane_state->planar_slave); 9726 } 9727 9728 return 0; 9729 } 9730 9731 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 9732 { 9733 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9734 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9735 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 9736 struct intel_plane *plane, *linked; 9737 struct intel_plane_state *plane_state; 9738 int i; 9739 9740 if (INTEL_GEN(dev_priv) < 11) 9741 return 0; 9742 9743 /* 9744 * Destroy all old plane links and make the slave plane invisible 9745 * in the crtc_state->active_planes mask. 9746 */ 9747 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 9748 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 9749 continue; 9750 9751 plane_state->planar_linked_plane = NULL; 9752 if (plane_state->planar_slave && !plane_state->uapi.visible) { 9753 crtc_state->enabled_planes &= ~BIT(plane->id); 9754 crtc_state->active_planes &= ~BIT(plane->id); 9755 crtc_state->update_planes |= BIT(plane->id); 9756 } 9757 9758 plane_state->planar_slave = false; 9759 } 9760 9761 if (!crtc_state->nv12_planes) 9762 return 0; 9763 9764 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 9765 struct intel_plane_state *linked_state = NULL; 9766 9767 if (plane->pipe != crtc->pipe || 9768 !(crtc_state->nv12_planes & BIT(plane->id))) 9769 continue; 9770 9771 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 9772 if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 9773 continue; 9774 9775 if (crtc_state->active_planes & BIT(linked->id)) 9776 continue; 9777 9778 linked_state = intel_atomic_get_plane_state(state, linked); 9779 if (IS_ERR(linked_state)) 9780 return PTR_ERR(linked_state); 9781 9782 break; 9783 } 9784 9785 if (!linked_state) { 9786 drm_dbg_kms(&dev_priv->drm, 9787 "Need %d free Y planes for planar YUV\n", 9788 hweight8(crtc_state->nv12_planes)); 9789 9790 return -EINVAL; 9791 } 9792 9793 plane_state->planar_linked_plane = linked; 9794 9795 linked_state->planar_slave = true; 9796 linked_state->planar_linked_plane = plane; 9797 crtc_state->enabled_planes |= BIT(linked->id); 9798 crtc_state->active_planes |= BIT(linked->id); 9799 crtc_state->update_planes |= BIT(linked->id); 9800 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 9801 linked->base.name, plane->base.name); 9802 9803 /* Copy parameters to slave plane */ 9804 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 9805 linked_state->color_ctl = plane_state->color_ctl; 9806 linked_state->view = plane_state->view; 9807 memcpy(linked_state->color_plane, plane_state->color_plane, 9808 sizeof(linked_state->color_plane)); 9809 9810 intel_plane_copy_hw_state(linked_state, plane_state); 9811 linked_state->uapi.src = plane_state->uapi.src; 9812 linked_state->uapi.dst = plane_state->uapi.dst; 9813 9814 if (icl_is_hdr_plane(dev_priv, plane->id)) { 9815 if (linked->id == PLANE_SPRITE5) 9816 plane_state->cus_ctl |= PLANE_CUS_PLANE_7; 9817 else if (linked->id == PLANE_SPRITE4) 9818 plane_state->cus_ctl |= PLANE_CUS_PLANE_6; 9819 else if (linked->id == PLANE_SPRITE3) 9820 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL; 9821 else if (linked->id == PLANE_SPRITE2) 9822 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL; 9823 else 9824 MISSING_CASE(linked->id); 9825 } 9826 } 9827 9828 return 0; 9829 } 9830 9831 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 9832 { 9833 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 9834 struct intel_atomic_state *state = 9835 to_intel_atomic_state(new_crtc_state->uapi.state); 9836 const struct intel_crtc_state *old_crtc_state = 9837 intel_atomic_get_old_crtc_state(state, crtc); 9838 9839 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 9840 } 9841 9842 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 9843 { 9844 const struct drm_display_mode *pipe_mode = 9845 &crtc_state->hw.pipe_mode; 9846 int linetime_wm; 9847 9848 if (!crtc_state->hw.enable) 9849 return 0; 9850 9851 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 9852 pipe_mode->crtc_clock); 9853 9854 return min(linetime_wm, 0x1ff); 9855 } 9856 9857 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 9858 const struct intel_cdclk_state *cdclk_state) 9859 { 9860 const struct drm_display_mode *pipe_mode = 9861 &crtc_state->hw.pipe_mode; 9862 int linetime_wm; 9863 9864 if (!crtc_state->hw.enable) 9865 return 0; 9866 9867 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 9868 cdclk_state->logical.cdclk); 9869 9870 return min(linetime_wm, 0x1ff); 9871 } 9872 9873 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 9874 { 9875 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9876 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9877 const struct drm_display_mode *pipe_mode = 9878 &crtc_state->hw.pipe_mode; 9879 int linetime_wm; 9880 9881 if (!crtc_state->hw.enable) 9882 return 0; 9883 9884 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 9885 crtc_state->pixel_rate); 9886 9887 /* Display WA #1135: BXT:ALL GLK:ALL */ 9888 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled) 9889 linetime_wm /= 2; 9890 9891 return min(linetime_wm, 0x1ff); 9892 } 9893 9894 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 9895 struct intel_crtc *crtc) 9896 { 9897 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9898 struct intel_crtc_state *crtc_state = 9899 intel_atomic_get_new_crtc_state(state, crtc); 9900 const struct intel_cdclk_state *cdclk_state; 9901 9902 if (INTEL_GEN(dev_priv) >= 9) 9903 crtc_state->linetime = skl_linetime_wm(crtc_state); 9904 else 9905 crtc_state->linetime = hsw_linetime_wm(crtc_state); 9906 9907 if (!hsw_crtc_supports_ips(crtc)) 9908 return 0; 9909 9910 cdclk_state = intel_atomic_get_cdclk_state(state); 9911 if (IS_ERR(cdclk_state)) 9912 return PTR_ERR(cdclk_state); 9913 9914 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 9915 cdclk_state); 9916 9917 return 0; 9918 } 9919 9920 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 9921 struct intel_crtc *crtc) 9922 { 9923 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9924 struct intel_crtc_state *crtc_state = 9925 intel_atomic_get_new_crtc_state(state, crtc); 9926 bool mode_changed = intel_crtc_needs_modeset(crtc_state); 9927 int ret; 9928 9929 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && 9930 mode_changed && !crtc_state->hw.active) 9931 crtc_state->update_wm_post = true; 9932 9933 if (mode_changed && crtc_state->hw.enable && 9934 dev_priv->display.crtc_compute_clock && 9935 !crtc_state->bigjoiner_slave && 9936 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { 9937 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); 9938 if (ret) 9939 return ret; 9940 } 9941 9942 /* 9943 * May need to update pipe gamma enable bits 9944 * when C8 planes are getting enabled/disabled. 9945 */ 9946 if (c8_planes_changed(crtc_state)) 9947 crtc_state->uapi.color_mgmt_changed = true; 9948 9949 if (mode_changed || crtc_state->update_pipe || 9950 crtc_state->uapi.color_mgmt_changed) { 9951 ret = intel_color_check(crtc_state); 9952 if (ret) 9953 return ret; 9954 } 9955 9956 if (dev_priv->display.compute_pipe_wm) { 9957 ret = dev_priv->display.compute_pipe_wm(crtc_state); 9958 if (ret) { 9959 drm_dbg_kms(&dev_priv->drm, 9960 "Target pipe watermarks are invalid\n"); 9961 return ret; 9962 } 9963 } 9964 9965 if (dev_priv->display.compute_intermediate_wm) { 9966 if (drm_WARN_ON(&dev_priv->drm, 9967 !dev_priv->display.compute_pipe_wm)) 9968 return 0; 9969 9970 /* 9971 * Calculate 'intermediate' watermarks that satisfy both the 9972 * old state and the new state. We can program these 9973 * immediately. 9974 */ 9975 ret = dev_priv->display.compute_intermediate_wm(crtc_state); 9976 if (ret) { 9977 drm_dbg_kms(&dev_priv->drm, 9978 "No valid intermediate pipe watermarks are possible\n"); 9979 return ret; 9980 } 9981 } 9982 9983 if (INTEL_GEN(dev_priv) >= 9) { 9984 if (mode_changed || crtc_state->update_pipe) { 9985 ret = skl_update_scaler_crtc(crtc_state); 9986 if (ret) 9987 return ret; 9988 } 9989 9990 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); 9991 if (ret) 9992 return ret; 9993 } 9994 9995 if (HAS_IPS(dev_priv)) { 9996 ret = hsw_compute_ips_config(crtc_state); 9997 if (ret) 9998 return ret; 9999 } 10000 10001 if (INTEL_GEN(dev_priv) >= 9 || 10002 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 10003 ret = hsw_compute_linetime_wm(state, crtc); 10004 if (ret) 10005 return ret; 10006 10007 } 10008 10009 if (!mode_changed) { 10010 ret = intel_psr2_sel_fetch_update(state, crtc); 10011 if (ret) 10012 return ret; 10013 } 10014 10015 return 0; 10016 } 10017 10018 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 10019 { 10020 struct intel_connector *connector; 10021 struct drm_connector_list_iter conn_iter; 10022 10023 drm_connector_list_iter_begin(dev, &conn_iter); 10024 for_each_intel_connector_iter(connector, &conn_iter) { 10025 if (connector->base.state->crtc) 10026 drm_connector_put(&connector->base); 10027 10028 if (connector->base.encoder) { 10029 connector->base.state->best_encoder = 10030 connector->base.encoder; 10031 connector->base.state->crtc = 10032 connector->base.encoder->crtc; 10033 10034 drm_connector_get(&connector->base); 10035 } else { 10036 connector->base.state->best_encoder = NULL; 10037 connector->base.state->crtc = NULL; 10038 } 10039 } 10040 drm_connector_list_iter_end(&conn_iter); 10041 } 10042 10043 static int 10044 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 10045 struct intel_crtc_state *pipe_config) 10046 { 10047 struct drm_connector *connector = conn_state->connector; 10048 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 10049 const struct drm_display_info *info = &connector->display_info; 10050 int bpp; 10051 10052 switch (conn_state->max_bpc) { 10053 case 6 ... 7: 10054 bpp = 6 * 3; 10055 break; 10056 case 8 ... 9: 10057 bpp = 8 * 3; 10058 break; 10059 case 10 ... 11: 10060 bpp = 10 * 3; 10061 break; 10062 case 12 ... 16: 10063 bpp = 12 * 3; 10064 break; 10065 default: 10066 MISSING_CASE(conn_state->max_bpc); 10067 return -EINVAL; 10068 } 10069 10070 if (bpp < pipe_config->pipe_bpp) { 10071 drm_dbg_kms(&i915->drm, 10072 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 10073 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 10074 connector->base.id, connector->name, 10075 bpp, 3 * info->bpc, 10076 3 * conn_state->max_requested_bpc, 10077 pipe_config->pipe_bpp); 10078 10079 pipe_config->pipe_bpp = bpp; 10080 } 10081 10082 return 0; 10083 } 10084 10085 static int 10086 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 10087 struct intel_crtc_state *pipe_config) 10088 { 10089 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10090 struct drm_atomic_state *state = pipe_config->uapi.state; 10091 struct drm_connector *connector; 10092 struct drm_connector_state *connector_state; 10093 int bpp, i; 10094 10095 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 10096 IS_CHERRYVIEW(dev_priv))) 10097 bpp = 10*3; 10098 else if (INTEL_GEN(dev_priv) >= 5) 10099 bpp = 12*3; 10100 else 10101 bpp = 8*3; 10102 10103 pipe_config->pipe_bpp = bpp; 10104 10105 /* Clamp display bpp to connector max bpp */ 10106 for_each_new_connector_in_state(state, connector, connector_state, i) { 10107 int ret; 10108 10109 if (connector_state->crtc != &crtc->base) 10110 continue; 10111 10112 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 10113 if (ret) 10114 return ret; 10115 } 10116 10117 return 0; 10118 } 10119 10120 static void intel_dump_crtc_timings(struct drm_i915_private *i915, 10121 const struct drm_display_mode *mode) 10122 { 10123 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, " 10124 "type: 0x%x flags: 0x%x\n", 10125 mode->crtc_clock, 10126 mode->crtc_hdisplay, mode->crtc_hsync_start, 10127 mode->crtc_hsync_end, mode->crtc_htotal, 10128 mode->crtc_vdisplay, mode->crtc_vsync_start, 10129 mode->crtc_vsync_end, mode->crtc_vtotal, 10130 mode->type, mode->flags); 10131 } 10132 10133 static void 10134 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 10135 const char *id, unsigned int lane_count, 10136 const struct intel_link_m_n *m_n) 10137 { 10138 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 10139 10140 drm_dbg_kms(&i915->drm, 10141 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 10142 id, lane_count, 10143 m_n->gmch_m, m_n->gmch_n, 10144 m_n->link_m, m_n->link_n, m_n->tu); 10145 } 10146 10147 static void 10148 intel_dump_infoframe(struct drm_i915_private *dev_priv, 10149 const union hdmi_infoframe *frame) 10150 { 10151 if (!drm_debug_enabled(DRM_UT_KMS)) 10152 return; 10153 10154 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 10155 } 10156 10157 static void 10158 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv, 10159 const struct drm_dp_vsc_sdp *vsc) 10160 { 10161 if (!drm_debug_enabled(DRM_UT_KMS)) 10162 return; 10163 10164 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc); 10165 } 10166 10167 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 10168 10169 static const char * const output_type_str[] = { 10170 OUTPUT_TYPE(UNUSED), 10171 OUTPUT_TYPE(ANALOG), 10172 OUTPUT_TYPE(DVO), 10173 OUTPUT_TYPE(SDVO), 10174 OUTPUT_TYPE(LVDS), 10175 OUTPUT_TYPE(TVOUT), 10176 OUTPUT_TYPE(HDMI), 10177 OUTPUT_TYPE(DP), 10178 OUTPUT_TYPE(EDP), 10179 OUTPUT_TYPE(DSI), 10180 OUTPUT_TYPE(DDI), 10181 OUTPUT_TYPE(DP_MST), 10182 }; 10183 10184 #undef OUTPUT_TYPE 10185 10186 static void snprintf_output_types(char *buf, size_t len, 10187 unsigned int output_types) 10188 { 10189 char *str = buf; 10190 int i; 10191 10192 str[0] = '\0'; 10193 10194 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 10195 int r; 10196 10197 if ((output_types & BIT(i)) == 0) 10198 continue; 10199 10200 r = snprintf(str, len, "%s%s", 10201 str != buf ? "," : "", output_type_str[i]); 10202 if (r >= len) 10203 break; 10204 str += r; 10205 len -= r; 10206 10207 output_types &= ~BIT(i); 10208 } 10209 10210 WARN_ON_ONCE(output_types != 0); 10211 } 10212 10213 static const char * const output_format_str[] = { 10214 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 10215 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 10216 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 10217 }; 10218 10219 static const char *output_formats(enum intel_output_format format) 10220 { 10221 if (format >= ARRAY_SIZE(output_format_str)) 10222 return "invalid"; 10223 return output_format_str[format]; 10224 } 10225 10226 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 10227 { 10228 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 10229 struct drm_i915_private *i915 = to_i915(plane->base.dev); 10230 const struct drm_framebuffer *fb = plane_state->hw.fb; 10231 struct drm_format_name_buf format_name; 10232 10233 if (!fb) { 10234 drm_dbg_kms(&i915->drm, 10235 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 10236 plane->base.base.id, plane->base.name, 10237 yesno(plane_state->uapi.visible)); 10238 return; 10239 } 10240 10241 drm_dbg_kms(&i915->drm, 10242 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n", 10243 plane->base.base.id, plane->base.name, 10244 fb->base.id, fb->width, fb->height, 10245 drm_get_format_name(fb->format->format, &format_name), 10246 fb->modifier, yesno(plane_state->uapi.visible)); 10247 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n", 10248 plane_state->hw.rotation, plane_state->scaler_id); 10249 if (plane_state->uapi.visible) 10250 drm_dbg_kms(&i915->drm, 10251 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 10252 DRM_RECT_FP_ARG(&plane_state->uapi.src), 10253 DRM_RECT_ARG(&plane_state->uapi.dst)); 10254 } 10255 10256 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 10257 struct intel_atomic_state *state, 10258 const char *context) 10259 { 10260 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 10261 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10262 const struct intel_plane_state *plane_state; 10263 struct intel_plane *plane; 10264 char buf[64]; 10265 int i; 10266 10267 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n", 10268 crtc->base.base.id, crtc->base.name, 10269 yesno(pipe_config->hw.enable), context); 10270 10271 if (!pipe_config->hw.enable) 10272 goto dump_planes; 10273 10274 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 10275 drm_dbg_kms(&dev_priv->drm, 10276 "active: %s, output_types: %s (0x%x), output format: %s\n", 10277 yesno(pipe_config->hw.active), 10278 buf, pipe_config->output_types, 10279 output_formats(pipe_config->output_format)); 10280 10281 drm_dbg_kms(&dev_priv->drm, 10282 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 10283 transcoder_name(pipe_config->cpu_transcoder), 10284 pipe_config->pipe_bpp, pipe_config->dither); 10285 10286 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n", 10287 transcoder_name(pipe_config->mst_master_transcoder)); 10288 10289 drm_dbg_kms(&dev_priv->drm, 10290 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n", 10291 transcoder_name(pipe_config->master_transcoder), 10292 pipe_config->sync_mode_slaves_mask); 10293 10294 drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n", 10295 pipe_config->bigjoiner_slave ? "slave" : 10296 pipe_config->bigjoiner ? "master" : "no"); 10297 10298 if (pipe_config->has_pch_encoder) 10299 intel_dump_m_n_config(pipe_config, "fdi", 10300 pipe_config->fdi_lanes, 10301 &pipe_config->fdi_m_n); 10302 10303 if (intel_crtc_has_dp_encoder(pipe_config)) { 10304 intel_dump_m_n_config(pipe_config, "dp m_n", 10305 pipe_config->lane_count, &pipe_config->dp_m_n); 10306 if (pipe_config->has_drrs) 10307 intel_dump_m_n_config(pipe_config, "dp m2_n2", 10308 pipe_config->lane_count, 10309 &pipe_config->dp_m2_n2); 10310 } 10311 10312 drm_dbg_kms(&dev_priv->drm, 10313 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 10314 pipe_config->has_audio, pipe_config->has_infoframe, 10315 pipe_config->infoframes.enable); 10316 10317 if (pipe_config->infoframes.enable & 10318 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 10319 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n", 10320 pipe_config->infoframes.gcp); 10321 if (pipe_config->infoframes.enable & 10322 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 10323 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 10324 if (pipe_config->infoframes.enable & 10325 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 10326 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 10327 if (pipe_config->infoframes.enable & 10328 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 10329 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 10330 if (pipe_config->infoframes.enable & 10331 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM)) 10332 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm); 10333 if (pipe_config->infoframes.enable & 10334 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA)) 10335 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm); 10336 if (pipe_config->infoframes.enable & 10337 intel_hdmi_infoframe_enable(DP_SDP_VSC)) 10338 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc); 10339 10340 drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n", 10341 yesno(pipe_config->vrr.enable), 10342 pipe_config->vrr.vmin, pipe_config->vrr.vmax, 10343 pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline, 10344 intel_vrr_vmin_vblank_start(pipe_config), 10345 intel_vrr_vmax_vblank_start(pipe_config)); 10346 10347 drm_dbg_kms(&dev_priv->drm, "requested mode:\n"); 10348 drm_mode_debug_printmodeline(&pipe_config->hw.mode); 10349 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n"); 10350 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); 10351 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode); 10352 drm_dbg_kms(&dev_priv->drm, "pipe mode:\n"); 10353 drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode); 10354 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode); 10355 drm_dbg_kms(&dev_priv->drm, 10356 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 10357 pipe_config->port_clock, 10358 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 10359 pipe_config->pixel_rate); 10360 10361 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n", 10362 pipe_config->linetime, pipe_config->ips_linetime); 10363 10364 if (INTEL_GEN(dev_priv) >= 9) 10365 drm_dbg_kms(&dev_priv->drm, 10366 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 10367 crtc->num_scalers, 10368 pipe_config->scaler_state.scaler_users, 10369 pipe_config->scaler_state.scaler_id); 10370 10371 if (HAS_GMCH(dev_priv)) 10372 drm_dbg_kms(&dev_priv->drm, 10373 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 10374 pipe_config->gmch_pfit.control, 10375 pipe_config->gmch_pfit.pgm_ratios, 10376 pipe_config->gmch_pfit.lvds_border_bits); 10377 else 10378 drm_dbg_kms(&dev_priv->drm, 10379 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n", 10380 DRM_RECT_ARG(&pipe_config->pch_pfit.dst), 10381 enableddisabled(pipe_config->pch_pfit.enabled), 10382 yesno(pipe_config->pch_pfit.force_thru)); 10383 10384 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n", 10385 pipe_config->ips_enabled, pipe_config->double_wide); 10386 10387 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 10388 10389 if (IS_CHERRYVIEW(dev_priv)) 10390 drm_dbg_kms(&dev_priv->drm, 10391 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 10392 pipe_config->cgm_mode, pipe_config->gamma_mode, 10393 pipe_config->gamma_enable, pipe_config->csc_enable); 10394 else 10395 drm_dbg_kms(&dev_priv->drm, 10396 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 10397 pipe_config->csc_mode, pipe_config->gamma_mode, 10398 pipe_config->gamma_enable, pipe_config->csc_enable); 10399 10400 drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n", 10401 pipe_config->hw.degamma_lut ? 10402 drm_color_lut_size(pipe_config->hw.degamma_lut) : 0, 10403 pipe_config->hw.gamma_lut ? 10404 drm_color_lut_size(pipe_config->hw.gamma_lut) : 0); 10405 10406 dump_planes: 10407 if (!state) 10408 return; 10409 10410 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 10411 if (plane->pipe == crtc->pipe) 10412 intel_dump_plane_state(plane_state); 10413 } 10414 } 10415 10416 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 10417 { 10418 struct drm_device *dev = state->base.dev; 10419 struct drm_connector *connector; 10420 struct drm_connector_list_iter conn_iter; 10421 unsigned int used_ports = 0; 10422 unsigned int used_mst_ports = 0; 10423 bool ret = true; 10424 10425 /* 10426 * We're going to peek into connector->state, 10427 * hence connection_mutex must be held. 10428 */ 10429 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 10430 10431 /* 10432 * Walk the connector list instead of the encoder 10433 * list to detect the problem on ddi platforms 10434 * where there's just one encoder per digital port. 10435 */ 10436 drm_connector_list_iter_begin(dev, &conn_iter); 10437 drm_for_each_connector_iter(connector, &conn_iter) { 10438 struct drm_connector_state *connector_state; 10439 struct intel_encoder *encoder; 10440 10441 connector_state = 10442 drm_atomic_get_new_connector_state(&state->base, 10443 connector); 10444 if (!connector_state) 10445 connector_state = connector->state; 10446 10447 if (!connector_state->best_encoder) 10448 continue; 10449 10450 encoder = to_intel_encoder(connector_state->best_encoder); 10451 10452 drm_WARN_ON(dev, !connector_state->crtc); 10453 10454 switch (encoder->type) { 10455 case INTEL_OUTPUT_DDI: 10456 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 10457 break; 10458 fallthrough; 10459 case INTEL_OUTPUT_DP: 10460 case INTEL_OUTPUT_HDMI: 10461 case INTEL_OUTPUT_EDP: 10462 /* the same port mustn't appear more than once */ 10463 if (used_ports & BIT(encoder->port)) 10464 ret = false; 10465 10466 used_ports |= BIT(encoder->port); 10467 break; 10468 case INTEL_OUTPUT_DP_MST: 10469 used_mst_ports |= 10470 1 << encoder->port; 10471 break; 10472 default: 10473 break; 10474 } 10475 } 10476 drm_connector_list_iter_end(&conn_iter); 10477 10478 /* can't mix MST and SST/HDMI on the same port */ 10479 if (used_ports & used_mst_ports) 10480 return false; 10481 10482 return ret; 10483 } 10484 10485 static void 10486 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 10487 struct intel_crtc_state *crtc_state) 10488 { 10489 const struct intel_crtc_state *from_crtc_state = crtc_state; 10490 10491 if (crtc_state->bigjoiner_slave) { 10492 from_crtc_state = intel_atomic_get_new_crtc_state(state, 10493 crtc_state->bigjoiner_linked_crtc); 10494 10495 /* No need to copy state if the master state is unchanged */ 10496 if (!from_crtc_state) 10497 return; 10498 } 10499 10500 intel_crtc_copy_color_blobs(crtc_state, from_crtc_state); 10501 } 10502 10503 static void 10504 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state, 10505 struct intel_crtc_state *crtc_state) 10506 { 10507 crtc_state->hw.enable = crtc_state->uapi.enable; 10508 crtc_state->hw.active = crtc_state->uapi.active; 10509 crtc_state->hw.mode = crtc_state->uapi.mode; 10510 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; 10511 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 10512 10513 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state); 10514 } 10515 10516 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) 10517 { 10518 if (crtc_state->bigjoiner_slave) 10519 return; 10520 10521 crtc_state->uapi.enable = crtc_state->hw.enable; 10522 crtc_state->uapi.active = crtc_state->hw.active; 10523 drm_WARN_ON(crtc_state->uapi.crtc->dev, 10524 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); 10525 10526 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; 10527 crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter; 10528 10529 /* copy color blobs to uapi */ 10530 drm_property_replace_blob(&crtc_state->uapi.degamma_lut, 10531 crtc_state->hw.degamma_lut); 10532 drm_property_replace_blob(&crtc_state->uapi.gamma_lut, 10533 crtc_state->hw.gamma_lut); 10534 drm_property_replace_blob(&crtc_state->uapi.ctm, 10535 crtc_state->hw.ctm); 10536 } 10537 10538 static int 10539 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state, 10540 const struct intel_crtc_state *from_crtc_state) 10541 { 10542 struct intel_crtc_state *saved_state; 10543 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10544 10545 saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL); 10546 if (!saved_state) 10547 return -ENOMEM; 10548 10549 saved_state->uapi = crtc_state->uapi; 10550 saved_state->scaler_state = crtc_state->scaler_state; 10551 saved_state->shared_dpll = crtc_state->shared_dpll; 10552 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 10553 saved_state->crc_enabled = crtc_state->crc_enabled; 10554 10555 intel_crtc_free_hw_state(crtc_state); 10556 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 10557 kfree(saved_state); 10558 10559 /* Re-init hw state */ 10560 memset(&crtc_state->hw, 0, sizeof(saved_state->hw)); 10561 crtc_state->hw.enable = from_crtc_state->hw.enable; 10562 crtc_state->hw.active = from_crtc_state->hw.active; 10563 crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode; 10564 crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode; 10565 10566 /* Some fixups */ 10567 crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed; 10568 crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed; 10569 crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed; 10570 crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0; 10571 crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc); 10572 crtc_state->bigjoiner_slave = true; 10573 crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe; 10574 crtc_state->has_audio = false; 10575 10576 return 0; 10577 } 10578 10579 static int 10580 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 10581 struct intel_crtc_state *crtc_state) 10582 { 10583 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10584 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10585 struct intel_crtc_state *saved_state; 10586 10587 saved_state = intel_crtc_state_alloc(crtc); 10588 if (!saved_state) 10589 return -ENOMEM; 10590 10591 /* free the old crtc_state->hw members */ 10592 intel_crtc_free_hw_state(crtc_state); 10593 10594 /* FIXME: before the switch to atomic started, a new pipe_config was 10595 * kzalloc'd. Code that depends on any field being zero should be 10596 * fixed, so that the crtc_state can be safely duplicated. For now, 10597 * only fields that are know to not cause problems are preserved. */ 10598 10599 saved_state->uapi = crtc_state->uapi; 10600 saved_state->scaler_state = crtc_state->scaler_state; 10601 saved_state->shared_dpll = crtc_state->shared_dpll; 10602 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 10603 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 10604 sizeof(saved_state->icl_port_dplls)); 10605 saved_state->crc_enabled = crtc_state->crc_enabled; 10606 if (IS_G4X(dev_priv) || 10607 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 10608 saved_state->wm = crtc_state->wm; 10609 10610 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 10611 kfree(saved_state); 10612 10613 intel_crtc_copy_uapi_to_hw_state(state, crtc_state); 10614 10615 return 0; 10616 } 10617 10618 static int 10619 intel_modeset_pipe_config(struct intel_atomic_state *state, 10620 struct intel_crtc_state *pipe_config) 10621 { 10622 struct drm_crtc *crtc = pipe_config->uapi.crtc; 10623 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 10624 struct drm_connector *connector; 10625 struct drm_connector_state *connector_state; 10626 int base_bpp, ret, i; 10627 bool retry = true; 10628 10629 pipe_config->cpu_transcoder = 10630 (enum transcoder) to_intel_crtc(crtc)->pipe; 10631 10632 /* 10633 * Sanitize sync polarity flags based on requested ones. If neither 10634 * positive or negative polarity is requested, treat this as meaning 10635 * negative polarity. 10636 */ 10637 if (!(pipe_config->hw.adjusted_mode.flags & 10638 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 10639 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 10640 10641 if (!(pipe_config->hw.adjusted_mode.flags & 10642 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 10643 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 10644 10645 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 10646 pipe_config); 10647 if (ret) 10648 return ret; 10649 10650 base_bpp = pipe_config->pipe_bpp; 10651 10652 /* 10653 * Determine the real pipe dimensions. Note that stereo modes can 10654 * increase the actual pipe size due to the frame doubling and 10655 * insertion of additional space for blanks between the frame. This 10656 * is stored in the crtc timings. We use the requested mode to do this 10657 * computation to clearly distinguish it from the adjusted mode, which 10658 * can be changed by the connectors in the below retry loop. 10659 */ 10660 drm_mode_get_hv_timing(&pipe_config->hw.mode, 10661 &pipe_config->pipe_src_w, 10662 &pipe_config->pipe_src_h); 10663 10664 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 10665 struct intel_encoder *encoder = 10666 to_intel_encoder(connector_state->best_encoder); 10667 10668 if (connector_state->crtc != crtc) 10669 continue; 10670 10671 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 10672 drm_dbg_kms(&i915->drm, 10673 "rejecting invalid cloning configuration\n"); 10674 return -EINVAL; 10675 } 10676 10677 /* 10678 * Determine output_types before calling the .compute_config() 10679 * hooks so that the hooks can use this information safely. 10680 */ 10681 if (encoder->compute_output_type) 10682 pipe_config->output_types |= 10683 BIT(encoder->compute_output_type(encoder, pipe_config, 10684 connector_state)); 10685 else 10686 pipe_config->output_types |= BIT(encoder->type); 10687 } 10688 10689 encoder_retry: 10690 /* Ensure the port clock defaults are reset when retrying. */ 10691 pipe_config->port_clock = 0; 10692 pipe_config->pixel_multiplier = 1; 10693 10694 /* Fill in default crtc timings, allow encoders to overwrite them. */ 10695 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, 10696 CRTC_STEREO_DOUBLE); 10697 10698 /* Pass our mode to the connectors and the CRTC to give them a chance to 10699 * adjust it according to limitations or connector properties, and also 10700 * a chance to reject the mode entirely. 10701 */ 10702 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 10703 struct intel_encoder *encoder = 10704 to_intel_encoder(connector_state->best_encoder); 10705 10706 if (connector_state->crtc != crtc) 10707 continue; 10708 10709 ret = encoder->compute_config(encoder, pipe_config, 10710 connector_state); 10711 if (ret < 0) { 10712 if (ret != -EDEADLK) 10713 drm_dbg_kms(&i915->drm, 10714 "Encoder config failure: %d\n", 10715 ret); 10716 return ret; 10717 } 10718 } 10719 10720 /* Set default port clock if not overwritten by the encoder. Needs to be 10721 * done afterwards in case the encoder adjusts the mode. */ 10722 if (!pipe_config->port_clock) 10723 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock 10724 * pipe_config->pixel_multiplier; 10725 10726 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 10727 if (ret == -EDEADLK) 10728 return ret; 10729 if (ret < 0) { 10730 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n"); 10731 return ret; 10732 } 10733 10734 if (ret == I915_DISPLAY_CONFIG_RETRY) { 10735 if (drm_WARN(&i915->drm, !retry, 10736 "loop in pipe configuration computation\n")) 10737 return -EINVAL; 10738 10739 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n"); 10740 retry = false; 10741 goto encoder_retry; 10742 } 10743 10744 /* Dithering seems to not pass-through bits correctly when it should, so 10745 * only enable it on 6bpc panels and when its not a compliance 10746 * test requesting 6bpc video pattern. 10747 */ 10748 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 10749 !pipe_config->dither_force_disable; 10750 drm_dbg_kms(&i915->drm, 10751 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 10752 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 10753 10754 return 0; 10755 } 10756 10757 static int 10758 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state) 10759 { 10760 struct intel_atomic_state *state = 10761 to_intel_atomic_state(crtc_state->uapi.state); 10762 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10763 struct drm_connector_state *conn_state; 10764 struct drm_connector *connector; 10765 int i; 10766 10767 for_each_new_connector_in_state(&state->base, connector, 10768 conn_state, i) { 10769 struct intel_encoder *encoder = 10770 to_intel_encoder(conn_state->best_encoder); 10771 int ret; 10772 10773 if (conn_state->crtc != &crtc->base || 10774 !encoder->compute_config_late) 10775 continue; 10776 10777 ret = encoder->compute_config_late(encoder, crtc_state, 10778 conn_state); 10779 if (ret) 10780 return ret; 10781 } 10782 10783 return 0; 10784 } 10785 10786 bool intel_fuzzy_clock_check(int clock1, int clock2) 10787 { 10788 int diff; 10789 10790 if (clock1 == clock2) 10791 return true; 10792 10793 if (!clock1 || !clock2) 10794 return false; 10795 10796 diff = abs(clock1 - clock2); 10797 10798 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 10799 return true; 10800 10801 return false; 10802 } 10803 10804 static bool 10805 intel_compare_m_n(unsigned int m, unsigned int n, 10806 unsigned int m2, unsigned int n2, 10807 bool exact) 10808 { 10809 if (m == m2 && n == n2) 10810 return true; 10811 10812 if (exact || !m || !n || !m2 || !n2) 10813 return false; 10814 10815 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 10816 10817 if (n > n2) { 10818 while (n > n2) { 10819 m2 <<= 1; 10820 n2 <<= 1; 10821 } 10822 } else if (n < n2) { 10823 while (n < n2) { 10824 m <<= 1; 10825 n <<= 1; 10826 } 10827 } 10828 10829 if (n != n2) 10830 return false; 10831 10832 return intel_fuzzy_clock_check(m, m2); 10833 } 10834 10835 static bool 10836 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 10837 const struct intel_link_m_n *m2_n2, 10838 bool exact) 10839 { 10840 return m_n->tu == m2_n2->tu && 10841 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 10842 m2_n2->gmch_m, m2_n2->gmch_n, exact) && 10843 intel_compare_m_n(m_n->link_m, m_n->link_n, 10844 m2_n2->link_m, m2_n2->link_n, exact); 10845 } 10846 10847 static bool 10848 intel_compare_infoframe(const union hdmi_infoframe *a, 10849 const union hdmi_infoframe *b) 10850 { 10851 return memcmp(a, b, sizeof(*a)) == 0; 10852 } 10853 10854 static bool 10855 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 10856 const struct drm_dp_vsc_sdp *b) 10857 { 10858 return memcmp(a, b, sizeof(*a)) == 0; 10859 } 10860 10861 static void 10862 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 10863 bool fastset, const char *name, 10864 const union hdmi_infoframe *a, 10865 const union hdmi_infoframe *b) 10866 { 10867 if (fastset) { 10868 if (!drm_debug_enabled(DRM_UT_KMS)) 10869 return; 10870 10871 drm_dbg_kms(&dev_priv->drm, 10872 "fastset mismatch in %s infoframe\n", name); 10873 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 10874 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 10875 drm_dbg_kms(&dev_priv->drm, "found:\n"); 10876 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 10877 } else { 10878 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); 10879 drm_err(&dev_priv->drm, "expected:\n"); 10880 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 10881 drm_err(&dev_priv->drm, "found:\n"); 10882 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 10883 } 10884 } 10885 10886 static void 10887 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, 10888 bool fastset, const char *name, 10889 const struct drm_dp_vsc_sdp *a, 10890 const struct drm_dp_vsc_sdp *b) 10891 { 10892 if (fastset) { 10893 if (!drm_debug_enabled(DRM_UT_KMS)) 10894 return; 10895 10896 drm_dbg_kms(&dev_priv->drm, 10897 "fastset mismatch in %s dp sdp\n", name); 10898 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 10899 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a); 10900 drm_dbg_kms(&dev_priv->drm, "found:\n"); 10901 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b); 10902 } else { 10903 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name); 10904 drm_err(&dev_priv->drm, "expected:\n"); 10905 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a); 10906 drm_err(&dev_priv->drm, "found:\n"); 10907 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b); 10908 } 10909 } 10910 10911 static void __printf(4, 5) 10912 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 10913 const char *name, const char *format, ...) 10914 { 10915 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 10916 struct va_format vaf; 10917 va_list args; 10918 10919 va_start(args, format); 10920 vaf.fmt = format; 10921 vaf.va = &args; 10922 10923 if (fastset) 10924 drm_dbg_kms(&i915->drm, 10925 "[CRTC:%d:%s] fastset mismatch in %s %pV\n", 10926 crtc->base.base.id, crtc->base.name, name, &vaf); 10927 else 10928 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", 10929 crtc->base.base.id, crtc->base.name, name, &vaf); 10930 10931 va_end(args); 10932 } 10933 10934 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 10935 { 10936 if (dev_priv->params.fastboot != -1) 10937 return dev_priv->params.fastboot; 10938 10939 /* Enable fastboot by default on Skylake and newer */ 10940 if (INTEL_GEN(dev_priv) >= 9) 10941 return true; 10942 10943 /* Enable fastboot by default on VLV and CHV */ 10944 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 10945 return true; 10946 10947 /* Disabled by default on all others */ 10948 return false; 10949 } 10950 10951 static bool 10952 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 10953 const struct intel_crtc_state *pipe_config, 10954 bool fastset) 10955 { 10956 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 10957 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 10958 bool ret = true; 10959 u32 bp_gamma = 0; 10960 bool fixup_inherited = fastset && 10961 current_config->inherited && !pipe_config->inherited; 10962 10963 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 10964 drm_dbg_kms(&dev_priv->drm, 10965 "initial modeset and fastboot not set\n"); 10966 ret = false; 10967 } 10968 10969 #define PIPE_CONF_CHECK_X(name) do { \ 10970 if (current_config->name != pipe_config->name) { \ 10971 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 10972 "(expected 0x%08x, found 0x%08x)", \ 10973 current_config->name, \ 10974 pipe_config->name); \ 10975 ret = false; \ 10976 } \ 10977 } while (0) 10978 10979 #define PIPE_CONF_CHECK_I(name) do { \ 10980 if (current_config->name != pipe_config->name) { \ 10981 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 10982 "(expected %i, found %i)", \ 10983 current_config->name, \ 10984 pipe_config->name); \ 10985 ret = false; \ 10986 } \ 10987 } while (0) 10988 10989 #define PIPE_CONF_CHECK_BOOL(name) do { \ 10990 if (current_config->name != pipe_config->name) { \ 10991 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 10992 "(expected %s, found %s)", \ 10993 yesno(current_config->name), \ 10994 yesno(pipe_config->name)); \ 10995 ret = false; \ 10996 } \ 10997 } while (0) 10998 10999 /* 11000 * Checks state where we only read out the enabling, but not the entire 11001 * state itself (like full infoframes or ELD for audio). These states 11002 * require a full modeset on bootup to fix up. 11003 */ 11004 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 11005 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 11006 PIPE_CONF_CHECK_BOOL(name); \ 11007 } else { \ 11008 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 11009 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 11010 yesno(current_config->name), \ 11011 yesno(pipe_config->name)); \ 11012 ret = false; \ 11013 } \ 11014 } while (0) 11015 11016 #define PIPE_CONF_CHECK_P(name) do { \ 11017 if (current_config->name != pipe_config->name) { \ 11018 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 11019 "(expected %p, found %p)", \ 11020 current_config->name, \ 11021 pipe_config->name); \ 11022 ret = false; \ 11023 } \ 11024 } while (0) 11025 11026 #define PIPE_CONF_CHECK_M_N(name) do { \ 11027 if (!intel_compare_link_m_n(¤t_config->name, \ 11028 &pipe_config->name,\ 11029 !fastset)) { \ 11030 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 11031 "(expected tu %i gmch %i/%i link %i/%i, " \ 11032 "found tu %i, gmch %i/%i link %i/%i)", \ 11033 current_config->name.tu, \ 11034 current_config->name.gmch_m, \ 11035 current_config->name.gmch_n, \ 11036 current_config->name.link_m, \ 11037 current_config->name.link_n, \ 11038 pipe_config->name.tu, \ 11039 pipe_config->name.gmch_m, \ 11040 pipe_config->name.gmch_n, \ 11041 pipe_config->name.link_m, \ 11042 pipe_config->name.link_n); \ 11043 ret = false; \ 11044 } \ 11045 } while (0) 11046 11047 /* This is required for BDW+ where there is only one set of registers for 11048 * switching between high and low RR. 11049 * This macro can be used whenever a comparison has to be made between one 11050 * hw state and multiple sw state variables. 11051 */ 11052 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 11053 if (!intel_compare_link_m_n(¤t_config->name, \ 11054 &pipe_config->name, !fastset) && \ 11055 !intel_compare_link_m_n(¤t_config->alt_name, \ 11056 &pipe_config->name, !fastset)) { \ 11057 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 11058 "(expected tu %i gmch %i/%i link %i/%i, " \ 11059 "or tu %i gmch %i/%i link %i/%i, " \ 11060 "found tu %i, gmch %i/%i link %i/%i)", \ 11061 current_config->name.tu, \ 11062 current_config->name.gmch_m, \ 11063 current_config->name.gmch_n, \ 11064 current_config->name.link_m, \ 11065 current_config->name.link_n, \ 11066 current_config->alt_name.tu, \ 11067 current_config->alt_name.gmch_m, \ 11068 current_config->alt_name.gmch_n, \ 11069 current_config->alt_name.link_m, \ 11070 current_config->alt_name.link_n, \ 11071 pipe_config->name.tu, \ 11072 pipe_config->name.gmch_m, \ 11073 pipe_config->name.gmch_n, \ 11074 pipe_config->name.link_m, \ 11075 pipe_config->name.link_n); \ 11076 ret = false; \ 11077 } \ 11078 } while (0) 11079 11080 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 11081 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 11082 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 11083 "(%x) (expected %i, found %i)", \ 11084 (mask), \ 11085 current_config->name & (mask), \ 11086 pipe_config->name & (mask)); \ 11087 ret = false; \ 11088 } \ 11089 } while (0) 11090 11091 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 11092 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 11093 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 11094 "(expected %i, found %i)", \ 11095 current_config->name, \ 11096 pipe_config->name); \ 11097 ret = false; \ 11098 } \ 11099 } while (0) 11100 11101 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 11102 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 11103 &pipe_config->infoframes.name)) { \ 11104 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 11105 ¤t_config->infoframes.name, \ 11106 &pipe_config->infoframes.name); \ 11107 ret = false; \ 11108 } \ 11109 } while (0) 11110 11111 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 11112 if (!current_config->has_psr && !pipe_config->has_psr && \ 11113 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 11114 &pipe_config->infoframes.name)) { \ 11115 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ 11116 ¤t_config->infoframes.name, \ 11117 &pipe_config->infoframes.name); \ 11118 ret = false; \ 11119 } \ 11120 } while (0) 11121 11122 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ 11123 if (current_config->name1 != pipe_config->name1) { \ 11124 pipe_config_mismatch(fastset, crtc, __stringify(name1), \ 11125 "(expected %i, found %i, won't compare lut values)", \ 11126 current_config->name1, \ 11127 pipe_config->name1); \ 11128 ret = false;\ 11129 } else { \ 11130 if (!intel_color_lut_equal(current_config->name2, \ 11131 pipe_config->name2, pipe_config->name1, \ 11132 bit_precision)) { \ 11133 pipe_config_mismatch(fastset, crtc, __stringify(name2), \ 11134 "hw_state doesn't match sw_state"); \ 11135 ret = false; \ 11136 } \ 11137 } \ 11138 } while (0) 11139 11140 #define PIPE_CONF_QUIRK(quirk) \ 11141 ((current_config->quirks | pipe_config->quirks) & (quirk)) 11142 11143 PIPE_CONF_CHECK_I(cpu_transcoder); 11144 11145 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 11146 PIPE_CONF_CHECK_I(fdi_lanes); 11147 PIPE_CONF_CHECK_M_N(fdi_m_n); 11148 11149 PIPE_CONF_CHECK_I(lane_count); 11150 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 11151 11152 if (INTEL_GEN(dev_priv) < 8) { 11153 PIPE_CONF_CHECK_M_N(dp_m_n); 11154 11155 if (current_config->has_drrs) 11156 PIPE_CONF_CHECK_M_N(dp_m2_n2); 11157 } else 11158 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 11159 11160 PIPE_CONF_CHECK_X(output_types); 11161 11162 /* FIXME do the readout properly and get rid of this quirk */ 11163 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { 11164 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); 11165 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); 11166 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); 11167 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); 11168 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); 11169 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); 11170 11171 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); 11172 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); 11173 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); 11174 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); 11175 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); 11176 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); 11177 11178 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); 11179 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); 11180 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); 11181 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); 11182 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); 11183 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); 11184 11185 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); 11186 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); 11187 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); 11188 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); 11189 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); 11190 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); 11191 11192 PIPE_CONF_CHECK_I(pixel_multiplier); 11193 11194 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 11195 DRM_MODE_FLAG_INTERLACE); 11196 11197 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 11198 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 11199 DRM_MODE_FLAG_PHSYNC); 11200 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 11201 DRM_MODE_FLAG_NHSYNC); 11202 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 11203 DRM_MODE_FLAG_PVSYNC); 11204 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 11205 DRM_MODE_FLAG_NVSYNC); 11206 } 11207 } 11208 11209 PIPE_CONF_CHECK_I(output_format); 11210 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 11211 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 11212 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11213 PIPE_CONF_CHECK_BOOL(limited_color_range); 11214 11215 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 11216 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 11217 PIPE_CONF_CHECK_BOOL(has_infoframe); 11218 /* FIXME do the readout properly and get rid of this quirk */ 11219 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) 11220 PIPE_CONF_CHECK_BOOL(fec_enable); 11221 11222 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 11223 11224 PIPE_CONF_CHECK_X(gmch_pfit.control); 11225 /* pfit ratios are autocomputed by the hw on gen4+ */ 11226 if (INTEL_GEN(dev_priv) < 4) 11227 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 11228 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 11229 11230 /* 11231 * Changing the EDP transcoder input mux 11232 * (A_ONOFF vs. A_ON) requires a full modeset. 11233 */ 11234 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 11235 11236 if (!fastset) { 11237 PIPE_CONF_CHECK_I(pipe_src_w); 11238 PIPE_CONF_CHECK_I(pipe_src_h); 11239 11240 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 11241 if (current_config->pch_pfit.enabled) { 11242 PIPE_CONF_CHECK_I(pch_pfit.dst.x1); 11243 PIPE_CONF_CHECK_I(pch_pfit.dst.y1); 11244 PIPE_CONF_CHECK_I(pch_pfit.dst.x2); 11245 PIPE_CONF_CHECK_I(pch_pfit.dst.y2); 11246 } 11247 11248 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 11249 /* FIXME do the readout properly and get rid of this quirk */ 11250 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) 11251 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 11252 11253 PIPE_CONF_CHECK_X(gamma_mode); 11254 if (IS_CHERRYVIEW(dev_priv)) 11255 PIPE_CONF_CHECK_X(cgm_mode); 11256 else 11257 PIPE_CONF_CHECK_X(csc_mode); 11258 PIPE_CONF_CHECK_BOOL(gamma_enable); 11259 PIPE_CONF_CHECK_BOOL(csc_enable); 11260 11261 PIPE_CONF_CHECK_I(linetime); 11262 PIPE_CONF_CHECK_I(ips_linetime); 11263 11264 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); 11265 if (bp_gamma) 11266 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); 11267 } 11268 11269 PIPE_CONF_CHECK_BOOL(double_wide); 11270 11271 PIPE_CONF_CHECK_P(shared_dpll); 11272 11273 /* FIXME do the readout properly and get rid of this quirk */ 11274 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { 11275 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 11276 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 11277 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 11278 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 11279 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 11280 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 11281 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 11282 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 11283 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 11284 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 11285 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 11286 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 11287 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 11288 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 11289 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 11290 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 11291 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 11292 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 11293 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 11294 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 11295 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 11296 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 11297 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 11298 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 11299 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 11300 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 11301 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 11302 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 11303 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 11304 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 11305 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 11306 11307 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 11308 PIPE_CONF_CHECK_X(dsi_pll.div); 11309 11310 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 11311 PIPE_CONF_CHECK_I(pipe_bpp); 11312 11313 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); 11314 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); 11315 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 11316 11317 PIPE_CONF_CHECK_I(min_voltage_level); 11318 } 11319 11320 PIPE_CONF_CHECK_X(infoframes.enable); 11321 PIPE_CONF_CHECK_X(infoframes.gcp); 11322 PIPE_CONF_CHECK_INFOFRAME(avi); 11323 PIPE_CONF_CHECK_INFOFRAME(spd); 11324 PIPE_CONF_CHECK_INFOFRAME(hdmi); 11325 PIPE_CONF_CHECK_INFOFRAME(drm); 11326 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 11327 11328 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 11329 PIPE_CONF_CHECK_I(master_transcoder); 11330 PIPE_CONF_CHECK_BOOL(bigjoiner); 11331 PIPE_CONF_CHECK_BOOL(bigjoiner_slave); 11332 PIPE_CONF_CHECK_P(bigjoiner_linked_crtc); 11333 11334 PIPE_CONF_CHECK_I(dsc.compression_enable); 11335 PIPE_CONF_CHECK_I(dsc.dsc_split); 11336 PIPE_CONF_CHECK_I(dsc.compressed_bpp); 11337 11338 PIPE_CONF_CHECK_I(mst_master_transcoder); 11339 11340 PIPE_CONF_CHECK_BOOL(vrr.enable); 11341 PIPE_CONF_CHECK_I(vrr.vmin); 11342 PIPE_CONF_CHECK_I(vrr.vmax); 11343 PIPE_CONF_CHECK_I(vrr.flipline); 11344 PIPE_CONF_CHECK_I(vrr.pipeline_full); 11345 11346 #undef PIPE_CONF_CHECK_X 11347 #undef PIPE_CONF_CHECK_I 11348 #undef PIPE_CONF_CHECK_BOOL 11349 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 11350 #undef PIPE_CONF_CHECK_P 11351 #undef PIPE_CONF_CHECK_FLAGS 11352 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 11353 #undef PIPE_CONF_CHECK_COLOR_LUT 11354 #undef PIPE_CONF_QUIRK 11355 11356 return ret; 11357 } 11358 11359 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 11360 const struct intel_crtc_state *pipe_config) 11361 { 11362 if (pipe_config->has_pch_encoder) { 11363 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 11364 &pipe_config->fdi_m_n); 11365 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock; 11366 11367 /* 11368 * FDI already provided one idea for the dotclock. 11369 * Yell if the encoder disagrees. 11370 */ 11371 drm_WARN(&dev_priv->drm, 11372 !intel_fuzzy_clock_check(fdi_dotclock, dotclock), 11373 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 11374 fdi_dotclock, dotclock); 11375 } 11376 } 11377 11378 static void verify_wm_state(struct intel_crtc *crtc, 11379 struct intel_crtc_state *new_crtc_state) 11380 { 11381 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11382 struct skl_hw_state { 11383 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 11384 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 11385 struct skl_pipe_wm wm; 11386 } *hw; 11387 struct skl_pipe_wm *sw_wm; 11388 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 11389 u8 hw_enabled_slices; 11390 const enum pipe pipe = crtc->pipe; 11391 int plane, level, max_level = ilk_wm_max_level(dev_priv); 11392 11393 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active) 11394 return; 11395 11396 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 11397 if (!hw) 11398 return; 11399 11400 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 11401 sw_wm = &new_crtc_state->wm.skl.optimal; 11402 11403 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 11404 11405 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); 11406 11407 if (INTEL_GEN(dev_priv) >= 11 && 11408 hw_enabled_slices != dev_priv->dbuf.enabled_slices) 11409 drm_err(&dev_priv->drm, 11410 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", 11411 dev_priv->dbuf.enabled_slices, 11412 hw_enabled_slices); 11413 11414 /* planes */ 11415 for_each_universal_plane(dev_priv, pipe, plane) { 11416 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 11417 11418 hw_plane_wm = &hw->wm.planes[plane]; 11419 sw_plane_wm = &sw_wm->planes[plane]; 11420 11421 /* Watermarks */ 11422 for (level = 0; level <= max_level; level++) { 11423 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 11424 &sw_plane_wm->wm[level]) || 11425 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level], 11426 &sw_plane_wm->sagv_wm0))) 11427 continue; 11428 11429 drm_err(&dev_priv->drm, 11430 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11431 pipe_name(pipe), plane + 1, level, 11432 sw_plane_wm->wm[level].plane_en, 11433 sw_plane_wm->wm[level].plane_res_b, 11434 sw_plane_wm->wm[level].plane_res_l, 11435 hw_plane_wm->wm[level].plane_en, 11436 hw_plane_wm->wm[level].plane_res_b, 11437 hw_plane_wm->wm[level].plane_res_l); 11438 } 11439 11440 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 11441 &sw_plane_wm->trans_wm)) { 11442 drm_err(&dev_priv->drm, 11443 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11444 pipe_name(pipe), plane + 1, 11445 sw_plane_wm->trans_wm.plane_en, 11446 sw_plane_wm->trans_wm.plane_res_b, 11447 sw_plane_wm->trans_wm.plane_res_l, 11448 hw_plane_wm->trans_wm.plane_en, 11449 hw_plane_wm->trans_wm.plane_res_b, 11450 hw_plane_wm->trans_wm.plane_res_l); 11451 } 11452 11453 /* DDB */ 11454 hw_ddb_entry = &hw->ddb_y[plane]; 11455 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; 11456 11457 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 11458 drm_err(&dev_priv->drm, 11459 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 11460 pipe_name(pipe), plane + 1, 11461 sw_ddb_entry->start, sw_ddb_entry->end, 11462 hw_ddb_entry->start, hw_ddb_entry->end); 11463 } 11464 } 11465 11466 /* 11467 * cursor 11468 * If the cursor plane isn't active, we may not have updated it's ddb 11469 * allocation. In that case since the ddb allocation will be updated 11470 * once the plane becomes visible, we can skip this check 11471 */ 11472 if (1) { 11473 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 11474 11475 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; 11476 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 11477 11478 /* Watermarks */ 11479 for (level = 0; level <= max_level; level++) { 11480 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 11481 &sw_plane_wm->wm[level]) || 11482 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level], 11483 &sw_plane_wm->sagv_wm0))) 11484 continue; 11485 11486 drm_err(&dev_priv->drm, 11487 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11488 pipe_name(pipe), level, 11489 sw_plane_wm->wm[level].plane_en, 11490 sw_plane_wm->wm[level].plane_res_b, 11491 sw_plane_wm->wm[level].plane_res_l, 11492 hw_plane_wm->wm[level].plane_en, 11493 hw_plane_wm->wm[level].plane_res_b, 11494 hw_plane_wm->wm[level].plane_res_l); 11495 } 11496 11497 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 11498 &sw_plane_wm->trans_wm)) { 11499 drm_err(&dev_priv->drm, 11500 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11501 pipe_name(pipe), 11502 sw_plane_wm->trans_wm.plane_en, 11503 sw_plane_wm->trans_wm.plane_res_b, 11504 sw_plane_wm->trans_wm.plane_res_l, 11505 hw_plane_wm->trans_wm.plane_en, 11506 hw_plane_wm->trans_wm.plane_res_b, 11507 hw_plane_wm->trans_wm.plane_res_l); 11508 } 11509 11510 /* DDB */ 11511 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; 11512 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 11513 11514 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 11515 drm_err(&dev_priv->drm, 11516 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 11517 pipe_name(pipe), 11518 sw_ddb_entry->start, sw_ddb_entry->end, 11519 hw_ddb_entry->start, hw_ddb_entry->end); 11520 } 11521 } 11522 11523 kfree(hw); 11524 } 11525 11526 static void 11527 verify_connector_state(struct intel_atomic_state *state, 11528 struct intel_crtc *crtc) 11529 { 11530 struct drm_connector *connector; 11531 struct drm_connector_state *new_conn_state; 11532 int i; 11533 11534 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 11535 struct drm_encoder *encoder = connector->encoder; 11536 struct intel_crtc_state *crtc_state = NULL; 11537 11538 if (new_conn_state->crtc != &crtc->base) 11539 continue; 11540 11541 if (crtc) 11542 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 11543 11544 intel_connector_verify_state(crtc_state, new_conn_state); 11545 11546 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 11547 "connector's atomic encoder doesn't match legacy encoder\n"); 11548 } 11549 } 11550 11551 static void 11552 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 11553 { 11554 struct intel_encoder *encoder; 11555 struct drm_connector *connector; 11556 struct drm_connector_state *old_conn_state, *new_conn_state; 11557 int i; 11558 11559 for_each_intel_encoder(&dev_priv->drm, encoder) { 11560 bool enabled = false, found = false; 11561 enum pipe pipe; 11562 11563 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n", 11564 encoder->base.base.id, 11565 encoder->base.name); 11566 11567 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 11568 new_conn_state, i) { 11569 if (old_conn_state->best_encoder == &encoder->base) 11570 found = true; 11571 11572 if (new_conn_state->best_encoder != &encoder->base) 11573 continue; 11574 found = enabled = true; 11575 11576 I915_STATE_WARN(new_conn_state->crtc != 11577 encoder->base.crtc, 11578 "connector's crtc doesn't match encoder crtc\n"); 11579 } 11580 11581 if (!found) 11582 continue; 11583 11584 I915_STATE_WARN(!!encoder->base.crtc != enabled, 11585 "encoder's enabled state mismatch " 11586 "(expected %i, found %i)\n", 11587 !!encoder->base.crtc, enabled); 11588 11589 if (!encoder->base.crtc) { 11590 bool active; 11591 11592 active = encoder->get_hw_state(encoder, &pipe); 11593 I915_STATE_WARN(active, 11594 "encoder detached but still enabled on pipe %c.\n", 11595 pipe_name(pipe)); 11596 } 11597 } 11598 } 11599 11600 static void 11601 verify_crtc_state(struct intel_crtc *crtc, 11602 struct intel_crtc_state *old_crtc_state, 11603 struct intel_crtc_state *new_crtc_state) 11604 { 11605 struct drm_device *dev = crtc->base.dev; 11606 struct drm_i915_private *dev_priv = to_i915(dev); 11607 struct intel_encoder *encoder; 11608 struct intel_crtc_state *pipe_config = old_crtc_state; 11609 struct drm_atomic_state *state = old_crtc_state->uapi.state; 11610 struct intel_crtc *master = crtc; 11611 11612 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); 11613 intel_crtc_free_hw_state(old_crtc_state); 11614 intel_crtc_state_reset(old_crtc_state, crtc); 11615 old_crtc_state->uapi.state = state; 11616 11617 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, 11618 crtc->base.name); 11619 11620 pipe_config->hw.enable = new_crtc_state->hw.enable; 11621 11622 intel_crtc_get_pipe_config(pipe_config); 11623 11624 /* we keep both pipes enabled on 830 */ 11625 if (IS_I830(dev_priv) && pipe_config->hw.active) 11626 pipe_config->hw.active = new_crtc_state->hw.active; 11627 11628 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active, 11629 "crtc active state doesn't match with hw state " 11630 "(expected %i, found %i)\n", 11631 new_crtc_state->hw.active, pipe_config->hw.active); 11632 11633 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, 11634 "transitional active state does not match atomic hw state " 11635 "(expected %i, found %i)\n", 11636 new_crtc_state->hw.active, crtc->active); 11637 11638 if (new_crtc_state->bigjoiner_slave) 11639 master = new_crtc_state->bigjoiner_linked_crtc; 11640 11641 for_each_encoder_on_crtc(dev, &master->base, encoder) { 11642 enum pipe pipe; 11643 bool active; 11644 11645 active = encoder->get_hw_state(encoder, &pipe); 11646 I915_STATE_WARN(active != new_crtc_state->hw.active, 11647 "[ENCODER:%i] active %i with crtc active %i\n", 11648 encoder->base.base.id, active, 11649 new_crtc_state->hw.active); 11650 11651 I915_STATE_WARN(active && master->pipe != pipe, 11652 "Encoder connected to wrong pipe %c\n", 11653 pipe_name(pipe)); 11654 11655 if (active) 11656 intel_encoder_get_config(encoder, pipe_config); 11657 } 11658 11659 if (!new_crtc_state->hw.active) 11660 return; 11661 11662 intel_pipe_config_sanity_check(dev_priv, pipe_config); 11663 11664 if (!intel_pipe_config_compare(new_crtc_state, 11665 pipe_config, false)) { 11666 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 11667 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 11668 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 11669 } 11670 } 11671 11672 static void 11673 intel_verify_planes(struct intel_atomic_state *state) 11674 { 11675 struct intel_plane *plane; 11676 const struct intel_plane_state *plane_state; 11677 int i; 11678 11679 for_each_new_intel_plane_in_state(state, plane, 11680 plane_state, i) 11681 assert_plane(plane, plane_state->planar_slave || 11682 plane_state->uapi.visible); 11683 } 11684 11685 static void 11686 verify_single_dpll_state(struct drm_i915_private *dev_priv, 11687 struct intel_shared_dpll *pll, 11688 struct intel_crtc *crtc, 11689 struct intel_crtc_state *new_crtc_state) 11690 { 11691 struct intel_dpll_hw_state dpll_hw_state; 11692 unsigned int crtc_mask; 11693 bool active; 11694 11695 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 11696 11697 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name); 11698 11699 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state); 11700 11701 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 11702 I915_STATE_WARN(!pll->on && pll->active_mask, 11703 "pll in active use but not on in sw tracking\n"); 11704 I915_STATE_WARN(pll->on && !pll->active_mask, 11705 "pll is on but not used by any active crtc\n"); 11706 I915_STATE_WARN(pll->on != active, 11707 "pll on state mismatch (expected %i, found %i)\n", 11708 pll->on, active); 11709 } 11710 11711 if (!crtc) { 11712 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 11713 "more active pll users than references: %x vs %x\n", 11714 pll->active_mask, pll->state.crtc_mask); 11715 11716 return; 11717 } 11718 11719 crtc_mask = drm_crtc_mask(&crtc->base); 11720 11721 if (new_crtc_state->hw.active) 11722 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 11723 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 11724 pipe_name(crtc->pipe), pll->active_mask); 11725 else 11726 I915_STATE_WARN(pll->active_mask & crtc_mask, 11727 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 11728 pipe_name(crtc->pipe), pll->active_mask); 11729 11730 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 11731 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 11732 crtc_mask, pll->state.crtc_mask); 11733 11734 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 11735 &dpll_hw_state, 11736 sizeof(dpll_hw_state)), 11737 "pll hw state mismatch\n"); 11738 } 11739 11740 static void 11741 verify_shared_dpll_state(struct intel_crtc *crtc, 11742 struct intel_crtc_state *old_crtc_state, 11743 struct intel_crtc_state *new_crtc_state) 11744 { 11745 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11746 11747 if (new_crtc_state->shared_dpll) 11748 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 11749 11750 if (old_crtc_state->shared_dpll && 11751 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 11752 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 11753 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 11754 11755 I915_STATE_WARN(pll->active_mask & crtc_mask, 11756 "pll active mismatch (didn't expect pipe %c in active mask)\n", 11757 pipe_name(crtc->pipe)); 11758 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 11759 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 11760 pipe_name(crtc->pipe)); 11761 } 11762 } 11763 11764 static void 11765 intel_modeset_verify_crtc(struct intel_crtc *crtc, 11766 struct intel_atomic_state *state, 11767 struct intel_crtc_state *old_crtc_state, 11768 struct intel_crtc_state *new_crtc_state) 11769 { 11770 if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 11771 return; 11772 11773 verify_wm_state(crtc, new_crtc_state); 11774 verify_connector_state(state, crtc); 11775 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 11776 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 11777 } 11778 11779 static void 11780 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 11781 { 11782 int i; 11783 11784 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) 11785 verify_single_dpll_state(dev_priv, 11786 &dev_priv->dpll.shared_dplls[i], 11787 NULL, NULL); 11788 } 11789 11790 static void 11791 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 11792 struct intel_atomic_state *state) 11793 { 11794 verify_encoder_state(dev_priv, state); 11795 verify_connector_state(state, NULL); 11796 verify_disabled_dpll_state(dev_priv); 11797 } 11798 11799 static void 11800 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 11801 { 11802 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 11803 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11804 struct drm_display_mode adjusted_mode = 11805 crtc_state->hw.adjusted_mode; 11806 11807 if (crtc_state->vrr.enable) { 11808 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; 11809 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax; 11810 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state); 11811 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state); 11812 } 11813 11814 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode); 11815 11816 crtc->mode_flags = crtc_state->mode_flags; 11817 11818 /* 11819 * The scanline counter increments at the leading edge of hsync. 11820 * 11821 * On most platforms it starts counting from vtotal-1 on the 11822 * first active line. That means the scanline counter value is 11823 * always one less than what we would expect. Ie. just after 11824 * start of vblank, which also occurs at start of hsync (on the 11825 * last active line), the scanline counter will read vblank_start-1. 11826 * 11827 * On gen2 the scanline counter starts counting from 1 instead 11828 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 11829 * to keep the value positive), instead of adding one. 11830 * 11831 * On HSW+ the behaviour of the scanline counter depends on the output 11832 * type. For DP ports it behaves like most other platforms, but on HDMI 11833 * there's an extra 1 line difference. So we need to add two instead of 11834 * one to the value. 11835 * 11836 * On VLV/CHV DSI the scanline counter would appear to increment 11837 * approx. 1/3 of a scanline before start of vblank. Unfortunately 11838 * that means we can't tell whether we're in vblank or not while 11839 * we're on that particular line. We must still set scanline_offset 11840 * to 1 so that the vblank timestamps come out correct when we query 11841 * the scanline counter from within the vblank interrupt handler. 11842 * However if queried just before the start of vblank we'll get an 11843 * answer that's slightly in the future. 11844 */ 11845 if (IS_GEN(dev_priv, 2)) { 11846 int vtotal; 11847 11848 vtotal = adjusted_mode.crtc_vtotal; 11849 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 11850 vtotal /= 2; 11851 11852 crtc->scanline_offset = vtotal - 1; 11853 } else if (HAS_DDI(dev_priv) && 11854 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 11855 crtc->scanline_offset = 2; 11856 } else { 11857 crtc->scanline_offset = 1; 11858 } 11859 } 11860 11861 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 11862 { 11863 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 11864 struct intel_crtc_state *new_crtc_state; 11865 struct intel_crtc *crtc; 11866 int i; 11867 11868 if (!dev_priv->display.crtc_compute_clock) 11869 return; 11870 11871 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 11872 if (!intel_crtc_needs_modeset(new_crtc_state)) 11873 continue; 11874 11875 intel_release_shared_dplls(state, crtc); 11876 } 11877 } 11878 11879 /* 11880 * This implements the workaround described in the "notes" section of the mode 11881 * set sequence documentation. When going from no pipes or single pipe to 11882 * multiple pipes, and planes are enabled after the pipe, we need to wait at 11883 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 11884 */ 11885 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 11886 { 11887 struct intel_crtc_state *crtc_state; 11888 struct intel_crtc *crtc; 11889 struct intel_crtc_state *first_crtc_state = NULL; 11890 struct intel_crtc_state *other_crtc_state = NULL; 11891 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 11892 int i; 11893 11894 /* look at all crtc's that are going to be enabled in during modeset */ 11895 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 11896 if (!crtc_state->hw.active || 11897 !intel_crtc_needs_modeset(crtc_state)) 11898 continue; 11899 11900 if (first_crtc_state) { 11901 other_crtc_state = crtc_state; 11902 break; 11903 } else { 11904 first_crtc_state = crtc_state; 11905 first_pipe = crtc->pipe; 11906 } 11907 } 11908 11909 /* No workaround needed? */ 11910 if (!first_crtc_state) 11911 return 0; 11912 11913 /* w/a possibly needed, check how many crtc's are already enabled. */ 11914 for_each_intel_crtc(state->base.dev, crtc) { 11915 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 11916 if (IS_ERR(crtc_state)) 11917 return PTR_ERR(crtc_state); 11918 11919 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 11920 11921 if (!crtc_state->hw.active || 11922 intel_crtc_needs_modeset(crtc_state)) 11923 continue; 11924 11925 /* 2 or more enabled crtcs means no need for w/a */ 11926 if (enabled_pipe != INVALID_PIPE) 11927 return 0; 11928 11929 enabled_pipe = crtc->pipe; 11930 } 11931 11932 if (enabled_pipe != INVALID_PIPE) 11933 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 11934 else if (other_crtc_state) 11935 other_crtc_state->hsw_workaround_pipe = first_pipe; 11936 11937 return 0; 11938 } 11939 11940 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 11941 u8 active_pipes) 11942 { 11943 const struct intel_crtc_state *crtc_state; 11944 struct intel_crtc *crtc; 11945 int i; 11946 11947 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 11948 if (crtc_state->hw.active) 11949 active_pipes |= BIT(crtc->pipe); 11950 else 11951 active_pipes &= ~BIT(crtc->pipe); 11952 } 11953 11954 return active_pipes; 11955 } 11956 11957 static int intel_modeset_checks(struct intel_atomic_state *state) 11958 { 11959 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 11960 11961 state->modeset = true; 11962 11963 if (IS_HASWELL(dev_priv)) 11964 return hsw_mode_set_planes_workaround(state); 11965 11966 return 0; 11967 } 11968 11969 /* 11970 * Handle calculation of various watermark data at the end of the atomic check 11971 * phase. The code here should be run after the per-crtc and per-plane 'check' 11972 * handlers to ensure that all derived state has been updated. 11973 */ 11974 static int calc_watermark_data(struct intel_atomic_state *state) 11975 { 11976 struct drm_device *dev = state->base.dev; 11977 struct drm_i915_private *dev_priv = to_i915(dev); 11978 11979 /* Is there platform-specific watermark information to calculate? */ 11980 if (dev_priv->display.compute_global_watermarks) 11981 return dev_priv->display.compute_global_watermarks(state); 11982 11983 return 0; 11984 } 11985 11986 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 11987 struct intel_crtc_state *new_crtc_state) 11988 { 11989 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 11990 return; 11991 11992 new_crtc_state->uapi.mode_changed = false; 11993 new_crtc_state->update_pipe = true; 11994 } 11995 11996 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, 11997 struct intel_crtc_state *new_crtc_state) 11998 { 11999 /* 12000 * If we're not doing the full modeset we want to 12001 * keep the current M/N values as they may be 12002 * sufficiently different to the computed values 12003 * to cause problems. 12004 * 12005 * FIXME: should really copy more fuzzy state here 12006 */ 12007 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 12008 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 12009 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 12010 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 12011 } 12012 12013 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 12014 struct intel_crtc *crtc, 12015 u8 plane_ids_mask) 12016 { 12017 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 12018 struct intel_plane *plane; 12019 12020 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 12021 struct intel_plane_state *plane_state; 12022 12023 if ((plane_ids_mask & BIT(plane->id)) == 0) 12024 continue; 12025 12026 plane_state = intel_atomic_get_plane_state(state, plane); 12027 if (IS_ERR(plane_state)) 12028 return PTR_ERR(plane_state); 12029 } 12030 12031 return 0; 12032 } 12033 12034 int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 12035 struct intel_crtc *crtc) 12036 { 12037 const struct intel_crtc_state *old_crtc_state = 12038 intel_atomic_get_old_crtc_state(state, crtc); 12039 const struct intel_crtc_state *new_crtc_state = 12040 intel_atomic_get_new_crtc_state(state, crtc); 12041 12042 return intel_crtc_add_planes_to_state(state, crtc, 12043 old_crtc_state->enabled_planes | 12044 new_crtc_state->enabled_planes); 12045 } 12046 12047 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 12048 { 12049 /* See {hsw,vlv,ivb}_plane_ratio() */ 12050 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 12051 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 12052 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11); 12053 } 12054 12055 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state, 12056 struct intel_crtc *crtc, 12057 struct intel_crtc *other) 12058 { 12059 const struct intel_plane_state *plane_state; 12060 struct intel_plane *plane; 12061 u8 plane_ids = 0; 12062 int i; 12063 12064 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12065 if (plane->pipe == crtc->pipe) 12066 plane_ids |= BIT(plane->id); 12067 } 12068 12069 return intel_crtc_add_planes_to_state(state, other, plane_ids); 12070 } 12071 12072 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) 12073 { 12074 const struct intel_crtc_state *crtc_state; 12075 struct intel_crtc *crtc; 12076 int i; 12077 12078 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 12079 int ret; 12080 12081 if (!crtc_state->bigjoiner) 12082 continue; 12083 12084 ret = intel_crtc_add_bigjoiner_planes(state, crtc, 12085 crtc_state->bigjoiner_linked_crtc); 12086 if (ret) 12087 return ret; 12088 } 12089 12090 return 0; 12091 } 12092 12093 static int intel_atomic_check_planes(struct intel_atomic_state *state) 12094 { 12095 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 12096 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 12097 struct intel_plane_state *plane_state; 12098 struct intel_plane *plane; 12099 struct intel_crtc *crtc; 12100 int i, ret; 12101 12102 ret = icl_add_linked_planes(state); 12103 if (ret) 12104 return ret; 12105 12106 ret = intel_bigjoiner_add_affected_planes(state); 12107 if (ret) 12108 return ret; 12109 12110 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12111 ret = intel_plane_atomic_check(state, plane); 12112 if (ret) { 12113 drm_dbg_atomic(&dev_priv->drm, 12114 "[PLANE:%d:%s] atomic driver check failed\n", 12115 plane->base.base.id, plane->base.name); 12116 return ret; 12117 } 12118 } 12119 12120 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 12121 new_crtc_state, i) { 12122 u8 old_active_planes, new_active_planes; 12123 12124 ret = icl_check_nv12_planes(new_crtc_state); 12125 if (ret) 12126 return ret; 12127 12128 /* 12129 * On some platforms the number of active planes affects 12130 * the planes' minimum cdclk calculation. Add such planes 12131 * to the state before we compute the minimum cdclk. 12132 */ 12133 if (!active_planes_affects_min_cdclk(dev_priv)) 12134 continue; 12135 12136 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 12137 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 12138 12139 /* 12140 * Not only the number of planes, but if the plane configuration had 12141 * changed might already mean we need to recompute min CDCLK, 12142 * because different planes might consume different amount of Dbuf bandwidth 12143 * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor 12144 */ 12145 if (old_active_planes == new_active_planes) 12146 continue; 12147 12148 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 12149 if (ret) 12150 return ret; 12151 } 12152 12153 return 0; 12154 } 12155 12156 static int intel_atomic_check_cdclk(struct intel_atomic_state *state, 12157 bool *need_cdclk_calc) 12158 { 12159 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 12160 const struct intel_cdclk_state *old_cdclk_state; 12161 const struct intel_cdclk_state *new_cdclk_state; 12162 struct intel_plane_state *plane_state; 12163 struct intel_bw_state *new_bw_state; 12164 struct intel_plane *plane; 12165 int min_cdclk = 0; 12166 enum pipe pipe; 12167 int ret; 12168 int i; 12169 /* 12170 * active_planes bitmask has been updated, and potentially 12171 * affected planes are part of the state. We can now 12172 * compute the minimum cdclk for each plane. 12173 */ 12174 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12175 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); 12176 if (ret) 12177 return ret; 12178 } 12179 12180 old_cdclk_state = intel_atomic_get_old_cdclk_state(state); 12181 new_cdclk_state = intel_atomic_get_new_cdclk_state(state); 12182 12183 if (new_cdclk_state && 12184 old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) 12185 *need_cdclk_calc = true; 12186 12187 ret = dev_priv->display.bw_calc_min_cdclk(state); 12188 if (ret) 12189 return ret; 12190 12191 new_bw_state = intel_atomic_get_new_bw_state(state); 12192 12193 if (!new_cdclk_state || !new_bw_state) 12194 return 0; 12195 12196 for_each_pipe(dev_priv, pipe) { 12197 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); 12198 12199 /* 12200 * Currently do this change only if we need to increase 12201 */ 12202 if (new_bw_state->min_cdclk > min_cdclk) 12203 *need_cdclk_calc = true; 12204 } 12205 12206 return 0; 12207 } 12208 12209 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 12210 { 12211 struct intel_crtc_state *crtc_state; 12212 struct intel_crtc *crtc; 12213 int i; 12214 12215 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 12216 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 12217 int ret; 12218 12219 ret = intel_crtc_atomic_check(state, crtc); 12220 if (ret) { 12221 drm_dbg_atomic(&i915->drm, 12222 "[CRTC:%d:%s] atomic driver check failed\n", 12223 crtc->base.base.id, crtc->base.name); 12224 return ret; 12225 } 12226 } 12227 12228 return 0; 12229 } 12230 12231 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 12232 u8 transcoders) 12233 { 12234 const struct intel_crtc_state *new_crtc_state; 12235 struct intel_crtc *crtc; 12236 int i; 12237 12238 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 12239 if (new_crtc_state->hw.enable && 12240 transcoders & BIT(new_crtc_state->cpu_transcoder) && 12241 intel_crtc_needs_modeset(new_crtc_state)) 12242 return true; 12243 } 12244 12245 return false; 12246 } 12247 12248 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, 12249 struct intel_crtc *crtc, 12250 struct intel_crtc_state *old_crtc_state, 12251 struct intel_crtc_state *new_crtc_state) 12252 { 12253 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 12254 struct intel_crtc_state *slave_crtc_state, *master_crtc_state; 12255 struct intel_crtc *slave, *master; 12256 12257 /* slave being enabled, is master is still claiming this crtc? */ 12258 if (old_crtc_state->bigjoiner_slave) { 12259 slave = crtc; 12260 master = old_crtc_state->bigjoiner_linked_crtc; 12261 master_crtc_state = intel_atomic_get_new_crtc_state(state, master); 12262 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state)) 12263 goto claimed; 12264 } 12265 12266 if (!new_crtc_state->bigjoiner) 12267 return 0; 12268 12269 if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) { 12270 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires " 12271 "CRTC + 1 to be used, doesn't exist\n", 12272 crtc->base.base.id, crtc->base.name); 12273 return -EINVAL; 12274 } 12275 12276 slave = new_crtc_state->bigjoiner_linked_crtc = 12277 intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1); 12278 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave); 12279 master = crtc; 12280 if (IS_ERR(slave_crtc_state)) 12281 return PTR_ERR(slave_crtc_state); 12282 12283 /* master being enabled, slave was already configured? */ 12284 if (slave_crtc_state->uapi.enable) 12285 goto claimed; 12286 12287 DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n", 12288 slave->base.base.id, slave->base.name); 12289 12290 return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state); 12291 12292 claimed: 12293 DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but " 12294 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", 12295 slave->base.base.id, slave->base.name, 12296 master->base.base.id, master->base.name); 12297 return -EINVAL; 12298 } 12299 12300 static void kill_bigjoiner_slave(struct intel_atomic_state *state, 12301 struct intel_crtc_state *master_crtc_state) 12302 { 12303 struct intel_crtc_state *slave_crtc_state = 12304 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc); 12305 12306 slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false; 12307 slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false; 12308 slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL; 12309 intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state); 12310 } 12311 12312 /** 12313 * DOC: asynchronous flip implementation 12314 * 12315 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 12316 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 12317 * Correspondingly, support is currently added for primary plane only. 12318 * 12319 * Async flip can only change the plane surface address, so anything else 12320 * changing is rejected from the intel_atomic_check_async() function. 12321 * Once this check is cleared, flip done interrupt is enabled using 12322 * the intel_crtc_enable_flip_done() function. 12323 * 12324 * As soon as the surface address register is written, flip done interrupt is 12325 * generated and the requested events are sent to the usersapce in the interrupt 12326 * handler itself. The timestamp and sequence sent during the flip done event 12327 * correspond to the last vblank and have no relation to the actual time when 12328 * the flip done event was sent. 12329 */ 12330 static int intel_atomic_check_async(struct intel_atomic_state *state) 12331 { 12332 struct drm_i915_private *i915 = to_i915(state->base.dev); 12333 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 12334 const struct intel_plane_state *new_plane_state, *old_plane_state; 12335 struct intel_crtc *crtc; 12336 struct intel_plane *plane; 12337 int i; 12338 12339 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 12340 new_crtc_state, i) { 12341 if (intel_crtc_needs_modeset(new_crtc_state)) { 12342 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); 12343 return -EINVAL; 12344 } 12345 12346 if (!new_crtc_state->hw.active) { 12347 drm_dbg_kms(&i915->drm, "CRTC inactive\n"); 12348 return -EINVAL; 12349 } 12350 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 12351 drm_dbg_kms(&i915->drm, 12352 "Active planes cannot be changed during async flip\n"); 12353 return -EINVAL; 12354 } 12355 } 12356 12357 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 12358 new_plane_state, i) { 12359 /* 12360 * TODO: Async flip is only supported through the page flip IOCTL 12361 * as of now. So support currently added for primary plane only. 12362 * Support for other planes on platforms on which supports 12363 * this(vlv/chv and icl+) should be added when async flip is 12364 * enabled in the atomic IOCTL path. 12365 */ 12366 if (!plane->async_flip) 12367 return -EINVAL; 12368 12369 /* 12370 * FIXME: This check is kept generic for all platforms. 12371 * Need to verify this for all gen9 and gen10 platforms to enable 12372 * this selectively if required. 12373 */ 12374 switch (new_plane_state->hw.fb->modifier) { 12375 case I915_FORMAT_MOD_X_TILED: 12376 case I915_FORMAT_MOD_Y_TILED: 12377 case I915_FORMAT_MOD_Yf_TILED: 12378 break; 12379 default: 12380 drm_dbg_kms(&i915->drm, 12381 "Linear memory/CCS does not support async flips\n"); 12382 return -EINVAL; 12383 } 12384 12385 if (old_plane_state->color_plane[0].stride != 12386 new_plane_state->color_plane[0].stride) { 12387 drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n"); 12388 return -EINVAL; 12389 } 12390 12391 if (old_plane_state->hw.fb->modifier != 12392 new_plane_state->hw.fb->modifier) { 12393 drm_dbg_kms(&i915->drm, 12394 "Framebuffer modifiers cannot be changed in async flip\n"); 12395 return -EINVAL; 12396 } 12397 12398 if (old_plane_state->hw.fb->format != 12399 new_plane_state->hw.fb->format) { 12400 drm_dbg_kms(&i915->drm, 12401 "Framebuffer format cannot be changed in async flip\n"); 12402 return -EINVAL; 12403 } 12404 12405 if (old_plane_state->hw.rotation != 12406 new_plane_state->hw.rotation) { 12407 drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n"); 12408 return -EINVAL; 12409 } 12410 12411 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 12412 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 12413 drm_dbg_kms(&i915->drm, 12414 "Plane size/co-ordinates cannot be changed in async flip\n"); 12415 return -EINVAL; 12416 } 12417 12418 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 12419 drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n"); 12420 return -EINVAL; 12421 } 12422 12423 if (old_plane_state->hw.pixel_blend_mode != 12424 new_plane_state->hw.pixel_blend_mode) { 12425 drm_dbg_kms(&i915->drm, 12426 "Pixel blend mode cannot be changed in async flip\n"); 12427 return -EINVAL; 12428 } 12429 12430 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 12431 drm_dbg_kms(&i915->drm, 12432 "Color encoding cannot be changed in async flip\n"); 12433 return -EINVAL; 12434 } 12435 12436 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 12437 drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n"); 12438 return -EINVAL; 12439 } 12440 } 12441 12442 return 0; 12443 } 12444 12445 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) 12446 { 12447 struct intel_crtc_state *crtc_state; 12448 struct intel_crtc *crtc; 12449 int i; 12450 12451 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 12452 struct intel_crtc_state *linked_crtc_state; 12453 struct intel_crtc *linked_crtc; 12454 int ret; 12455 12456 if (!crtc_state->bigjoiner) 12457 continue; 12458 12459 linked_crtc = crtc_state->bigjoiner_linked_crtc; 12460 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc); 12461 if (IS_ERR(linked_crtc_state)) 12462 return PTR_ERR(linked_crtc_state); 12463 12464 if (!intel_crtc_needs_modeset(crtc_state)) 12465 continue; 12466 12467 linked_crtc_state->uapi.mode_changed = true; 12468 12469 ret = drm_atomic_add_affected_connectors(&state->base, 12470 &linked_crtc->base); 12471 if (ret) 12472 return ret; 12473 12474 ret = intel_atomic_add_affected_planes(state, linked_crtc); 12475 if (ret) 12476 return ret; 12477 } 12478 12479 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 12480 /* Kill old bigjoiner link, we may re-establish afterwards */ 12481 if (intel_crtc_needs_modeset(crtc_state) && 12482 crtc_state->bigjoiner && !crtc_state->bigjoiner_slave) 12483 kill_bigjoiner_slave(state, crtc_state); 12484 } 12485 12486 return 0; 12487 } 12488 12489 /** 12490 * intel_atomic_check - validate state object 12491 * @dev: drm device 12492 * @_state: state to validate 12493 */ 12494 static int intel_atomic_check(struct drm_device *dev, 12495 struct drm_atomic_state *_state) 12496 { 12497 struct drm_i915_private *dev_priv = to_i915(dev); 12498 struct intel_atomic_state *state = to_intel_atomic_state(_state); 12499 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 12500 struct intel_crtc *crtc; 12501 int ret, i; 12502 bool any_ms = false; 12503 12504 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 12505 new_crtc_state, i) { 12506 if (new_crtc_state->inherited != old_crtc_state->inherited) 12507 new_crtc_state->uapi.mode_changed = true; 12508 } 12509 12510 intel_vrr_check_modeset(state); 12511 12512 ret = drm_atomic_helper_check_modeset(dev, &state->base); 12513 if (ret) 12514 goto fail; 12515 12516 ret = intel_bigjoiner_add_affected_crtcs(state); 12517 if (ret) 12518 goto fail; 12519 12520 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 12521 new_crtc_state, i) { 12522 if (!intel_crtc_needs_modeset(new_crtc_state)) { 12523 /* Light copy */ 12524 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state); 12525 12526 continue; 12527 } 12528 12529 if (!new_crtc_state->uapi.enable) { 12530 if (!new_crtc_state->bigjoiner_slave) { 12531 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state); 12532 any_ms = true; 12533 } 12534 continue; 12535 } 12536 12537 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state); 12538 if (ret) 12539 goto fail; 12540 12541 ret = intel_modeset_pipe_config(state, new_crtc_state); 12542 if (ret) 12543 goto fail; 12544 12545 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state, 12546 new_crtc_state); 12547 if (ret) 12548 goto fail; 12549 } 12550 12551 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 12552 new_crtc_state, i) { 12553 if (!intel_crtc_needs_modeset(new_crtc_state)) 12554 continue; 12555 12556 ret = intel_modeset_pipe_config_late(new_crtc_state); 12557 if (ret) 12558 goto fail; 12559 12560 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 12561 } 12562 12563 /** 12564 * Check if fastset is allowed by external dependencies like other 12565 * pipes and transcoders. 12566 * 12567 * Right now it only forces a fullmodeset when the MST master 12568 * transcoder did not changed but the pipe of the master transcoder 12569 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 12570 * in case of port synced crtcs, if one of the synced crtcs 12571 * needs a full modeset, all other synced crtcs should be 12572 * forced a full modeset. 12573 */ 12574 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 12575 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 12576 continue; 12577 12578 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 12579 enum transcoder master = new_crtc_state->mst_master_transcoder; 12580 12581 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { 12582 new_crtc_state->uapi.mode_changed = true; 12583 new_crtc_state->update_pipe = false; 12584 } 12585 } 12586 12587 if (is_trans_port_sync_mode(new_crtc_state)) { 12588 u8 trans = new_crtc_state->sync_mode_slaves_mask; 12589 12590 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 12591 trans |= BIT(new_crtc_state->master_transcoder); 12592 12593 if (intel_cpu_transcoders_need_modeset(state, trans)) { 12594 new_crtc_state->uapi.mode_changed = true; 12595 new_crtc_state->update_pipe = false; 12596 } 12597 } 12598 12599 if (new_crtc_state->bigjoiner) { 12600 struct intel_crtc_state *linked_crtc_state = 12601 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc); 12602 12603 if (intel_crtc_needs_modeset(linked_crtc_state)) { 12604 new_crtc_state->uapi.mode_changed = true; 12605 new_crtc_state->update_pipe = false; 12606 } 12607 } 12608 } 12609 12610 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 12611 new_crtc_state, i) { 12612 if (intel_crtc_needs_modeset(new_crtc_state)) { 12613 any_ms = true; 12614 continue; 12615 } 12616 12617 if (!new_crtc_state->update_pipe) 12618 continue; 12619 12620 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); 12621 } 12622 12623 if (any_ms && !check_digital_port_conflicts(state)) { 12624 drm_dbg_kms(&dev_priv->drm, 12625 "rejecting conflicting digital port configuration\n"); 12626 ret = -EINVAL; 12627 goto fail; 12628 } 12629 12630 ret = drm_dp_mst_atomic_check(&state->base); 12631 if (ret) 12632 goto fail; 12633 12634 ret = intel_atomic_check_planes(state); 12635 if (ret) 12636 goto fail; 12637 12638 intel_fbc_choose_crtc(dev_priv, state); 12639 ret = calc_watermark_data(state); 12640 if (ret) 12641 goto fail; 12642 12643 ret = intel_bw_atomic_check(state); 12644 if (ret) 12645 goto fail; 12646 12647 ret = intel_atomic_check_cdclk(state, &any_ms); 12648 if (ret) 12649 goto fail; 12650 12651 if (any_ms) { 12652 ret = intel_modeset_checks(state); 12653 if (ret) 12654 goto fail; 12655 12656 ret = intel_modeset_calc_cdclk(state); 12657 if (ret) 12658 return ret; 12659 12660 intel_modeset_clear_plls(state); 12661 } 12662 12663 ret = intel_atomic_check_crtcs(state); 12664 if (ret) 12665 goto fail; 12666 12667 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 12668 new_crtc_state, i) { 12669 if (new_crtc_state->uapi.async_flip) { 12670 ret = intel_atomic_check_async(state); 12671 if (ret) 12672 goto fail; 12673 } 12674 12675 if (!intel_crtc_needs_modeset(new_crtc_state) && 12676 !new_crtc_state->update_pipe) 12677 continue; 12678 12679 intel_dump_pipe_config(new_crtc_state, state, 12680 intel_crtc_needs_modeset(new_crtc_state) ? 12681 "[modeset]" : "[fastset]"); 12682 } 12683 12684 return 0; 12685 12686 fail: 12687 if (ret == -EDEADLK) 12688 return ret; 12689 12690 /* 12691 * FIXME would probably be nice to know which crtc specifically 12692 * caused the failure, in cases where we can pinpoint it. 12693 */ 12694 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 12695 new_crtc_state, i) 12696 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 12697 12698 return ret; 12699 } 12700 12701 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 12702 { 12703 struct intel_crtc_state *crtc_state; 12704 struct intel_crtc *crtc; 12705 int i, ret; 12706 12707 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 12708 if (ret < 0) 12709 return ret; 12710 12711 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 12712 bool mode_changed = intel_crtc_needs_modeset(crtc_state); 12713 12714 if (mode_changed || crtc_state->update_pipe || 12715 crtc_state->uapi.color_mgmt_changed) { 12716 intel_dsb_prepare(crtc_state); 12717 } 12718 } 12719 12720 return 0; 12721 } 12722 12723 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 12724 struct intel_crtc_state *crtc_state) 12725 { 12726 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12727 12728 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes) 12729 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 12730 12731 if (crtc_state->has_pch_encoder) { 12732 enum pipe pch_transcoder = 12733 intel_crtc_pch_transcoder(crtc); 12734 12735 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 12736 } 12737 } 12738 12739 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 12740 const struct intel_crtc_state *new_crtc_state) 12741 { 12742 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 12743 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12744 12745 /* 12746 * Update pipe size and adjust fitter if needed: the reason for this is 12747 * that in compute_mode_changes we check the native mode (not the pfit 12748 * mode) to see if we can flip rather than do a full mode set. In the 12749 * fastboot case, we'll flip, but if we don't update the pipesrc and 12750 * pfit state, we'll end up with a big fb scanned out into the wrong 12751 * sized surface. 12752 */ 12753 intel_set_pipe_src_size(new_crtc_state); 12754 12755 /* on skylake this is done by detaching scalers */ 12756 if (INTEL_GEN(dev_priv) >= 9) { 12757 skl_detach_scalers(new_crtc_state); 12758 12759 if (new_crtc_state->pch_pfit.enabled) 12760 skl_pfit_enable(new_crtc_state); 12761 } else if (HAS_PCH_SPLIT(dev_priv)) { 12762 if (new_crtc_state->pch_pfit.enabled) 12763 ilk_pfit_enable(new_crtc_state); 12764 else if (old_crtc_state->pch_pfit.enabled) 12765 ilk_pfit_disable(old_crtc_state); 12766 } 12767 12768 /* 12769 * The register is supposedly single buffered so perhaps 12770 * not 100% correct to do this here. But SKL+ calculate 12771 * this based on the adjust pixel rate so pfit changes do 12772 * affect it and so it must be updated for fastsets. 12773 * HSW/BDW only really need this here for fastboot, after 12774 * that the value should not change without a full modeset. 12775 */ 12776 if (INTEL_GEN(dev_priv) >= 9 || 12777 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 12778 hsw_set_linetime_wm(new_crtc_state); 12779 12780 if (INTEL_GEN(dev_priv) >= 11) 12781 icl_set_pipe_chicken(crtc); 12782 } 12783 12784 static void commit_pipe_config(struct intel_atomic_state *state, 12785 struct intel_crtc *crtc) 12786 { 12787 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 12788 const struct intel_crtc_state *old_crtc_state = 12789 intel_atomic_get_old_crtc_state(state, crtc); 12790 const struct intel_crtc_state *new_crtc_state = 12791 intel_atomic_get_new_crtc_state(state, crtc); 12792 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 12793 12794 /* 12795 * During modesets pipe configuration was programmed as the 12796 * CRTC was enabled. 12797 */ 12798 if (!modeset) { 12799 if (new_crtc_state->uapi.color_mgmt_changed || 12800 new_crtc_state->update_pipe) 12801 intel_color_commit(new_crtc_state); 12802 12803 if (INTEL_GEN(dev_priv) >= 9) 12804 skl_detach_scalers(new_crtc_state); 12805 12806 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 12807 bdw_set_pipemisc(new_crtc_state); 12808 12809 if (new_crtc_state->update_pipe) 12810 intel_pipe_fastset(old_crtc_state, new_crtc_state); 12811 12812 intel_psr2_program_trans_man_trk_ctl(new_crtc_state); 12813 } 12814 12815 if (dev_priv->display.atomic_update_watermarks) 12816 dev_priv->display.atomic_update_watermarks(state, crtc); 12817 } 12818 12819 static void intel_enable_crtc(struct intel_atomic_state *state, 12820 struct intel_crtc *crtc) 12821 { 12822 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 12823 const struct intel_crtc_state *new_crtc_state = 12824 intel_atomic_get_new_crtc_state(state, crtc); 12825 12826 if (!intel_crtc_needs_modeset(new_crtc_state)) 12827 return; 12828 12829 intel_crtc_update_active_timings(new_crtc_state); 12830 12831 dev_priv->display.crtc_enable(state, crtc); 12832 12833 if (new_crtc_state->bigjoiner_slave) 12834 return; 12835 12836 /* vblanks work again, re-enable pipe CRC. */ 12837 intel_crtc_enable_pipe_crc(crtc); 12838 } 12839 12840 static void intel_update_crtc(struct intel_atomic_state *state, 12841 struct intel_crtc *crtc) 12842 { 12843 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 12844 const struct intel_crtc_state *old_crtc_state = 12845 intel_atomic_get_old_crtc_state(state, crtc); 12846 struct intel_crtc_state *new_crtc_state = 12847 intel_atomic_get_new_crtc_state(state, crtc); 12848 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 12849 12850 if (!modeset) { 12851 if (new_crtc_state->preload_luts && 12852 (new_crtc_state->uapi.color_mgmt_changed || 12853 new_crtc_state->update_pipe)) 12854 intel_color_load_luts(new_crtc_state); 12855 12856 intel_pre_plane_update(state, crtc); 12857 12858 if (new_crtc_state->update_pipe) 12859 intel_encoders_update_pipe(state, crtc); 12860 } 12861 12862 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 12863 intel_fbc_disable(crtc); 12864 else 12865 intel_fbc_enable(state, crtc); 12866 12867 /* Perform vblank evasion around commit operation */ 12868 intel_pipe_update_start(new_crtc_state); 12869 12870 commit_pipe_config(state, crtc); 12871 12872 if (INTEL_GEN(dev_priv) >= 9) 12873 skl_update_planes_on_crtc(state, crtc); 12874 else 12875 i9xx_update_planes_on_crtc(state, crtc); 12876 12877 intel_pipe_update_end(new_crtc_state); 12878 12879 /* 12880 * We usually enable FIFO underrun interrupts as part of the 12881 * CRTC enable sequence during modesets. But when we inherit a 12882 * valid pipe configuration from the BIOS we need to take care 12883 * of enabling them on the CRTC's first fastset. 12884 */ 12885 if (new_crtc_state->update_pipe && !modeset && 12886 old_crtc_state->inherited) 12887 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 12888 } 12889 12890 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 12891 struct intel_crtc_state *old_crtc_state, 12892 struct intel_crtc_state *new_crtc_state, 12893 struct intel_crtc *crtc) 12894 { 12895 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 12896 12897 drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave); 12898 12899 intel_crtc_disable_planes(state, crtc); 12900 12901 /* 12902 * We still need special handling for disabling bigjoiner master 12903 * and slaves since for slave we do not have encoder or plls 12904 * so we dont need to disable those. 12905 */ 12906 if (old_crtc_state->bigjoiner) { 12907 intel_crtc_disable_planes(state, 12908 old_crtc_state->bigjoiner_linked_crtc); 12909 old_crtc_state->bigjoiner_linked_crtc->active = false; 12910 } 12911 12912 /* 12913 * We need to disable pipe CRC before disabling the pipe, 12914 * or we race against vblank off. 12915 */ 12916 intel_crtc_disable_pipe_crc(crtc); 12917 12918 dev_priv->display.crtc_disable(state, crtc); 12919 crtc->active = false; 12920 intel_fbc_disable(crtc); 12921 intel_disable_shared_dpll(old_crtc_state); 12922 12923 /* FIXME unify this for all platforms */ 12924 if (!new_crtc_state->hw.active && 12925 !HAS_GMCH(dev_priv) && 12926 dev_priv->display.initial_watermarks) 12927 dev_priv->display.initial_watermarks(state, crtc); 12928 } 12929 12930 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 12931 { 12932 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 12933 struct intel_crtc *crtc; 12934 u32 handled = 0; 12935 int i; 12936 12937 /* Only disable port sync and MST slaves */ 12938 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 12939 new_crtc_state, i) { 12940 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner) 12941 continue; 12942 12943 if (!old_crtc_state->hw.active) 12944 continue; 12945 12946 /* In case of Transcoder port Sync master slave CRTCs can be 12947 * assigned in any order and we need to make sure that 12948 * slave CRTCs are disabled first and then master CRTC since 12949 * Slave vblanks are masked till Master Vblanks. 12950 */ 12951 if (!is_trans_port_sync_slave(old_crtc_state) && 12952 !intel_dp_mst_is_slave_trans(old_crtc_state)) 12953 continue; 12954 12955 intel_pre_plane_update(state, crtc); 12956 intel_old_crtc_state_disables(state, old_crtc_state, 12957 new_crtc_state, crtc); 12958 handled |= BIT(crtc->pipe); 12959 } 12960 12961 /* Disable everything else left on */ 12962 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 12963 new_crtc_state, i) { 12964 if (!intel_crtc_needs_modeset(new_crtc_state) || 12965 (handled & BIT(crtc->pipe)) || 12966 old_crtc_state->bigjoiner_slave) 12967 continue; 12968 12969 intel_pre_plane_update(state, crtc); 12970 if (old_crtc_state->bigjoiner) { 12971 struct intel_crtc *slave = 12972 old_crtc_state->bigjoiner_linked_crtc; 12973 12974 intel_pre_plane_update(state, slave); 12975 } 12976 12977 if (old_crtc_state->hw.active) 12978 intel_old_crtc_state_disables(state, old_crtc_state, 12979 new_crtc_state, crtc); 12980 } 12981 } 12982 12983 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 12984 { 12985 struct intel_crtc_state *new_crtc_state; 12986 struct intel_crtc *crtc; 12987 int i; 12988 12989 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 12990 if (!new_crtc_state->hw.active) 12991 continue; 12992 12993 intel_enable_crtc(state, crtc); 12994 intel_update_crtc(state, crtc); 12995 } 12996 } 12997 12998 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 12999 { 13000 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13001 struct intel_crtc *crtc; 13002 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13003 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 13004 u8 update_pipes = 0, modeset_pipes = 0; 13005 int i; 13006 13007 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13008 enum pipe pipe = crtc->pipe; 13009 13010 if (!new_crtc_state->hw.active) 13011 continue; 13012 13013 /* ignore allocations for crtc's that have been turned off. */ 13014 if (!intel_crtc_needs_modeset(new_crtc_state)) { 13015 entries[pipe] = old_crtc_state->wm.skl.ddb; 13016 update_pipes |= BIT(pipe); 13017 } else { 13018 modeset_pipes |= BIT(pipe); 13019 } 13020 } 13021 13022 /* 13023 * Whenever the number of active pipes changes, we need to make sure we 13024 * update the pipes in the right order so that their ddb allocations 13025 * never overlap with each other between CRTC updates. Otherwise we'll 13026 * cause pipe underruns and other bad stuff. 13027 * 13028 * So first lets enable all pipes that do not need a fullmodeset as 13029 * those don't have any external dependency. 13030 */ 13031 while (update_pipes) { 13032 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13033 new_crtc_state, i) { 13034 enum pipe pipe = crtc->pipe; 13035 13036 if ((update_pipes & BIT(pipe)) == 0) 13037 continue; 13038 13039 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 13040 entries, I915_MAX_PIPES, pipe)) 13041 continue; 13042 13043 entries[pipe] = new_crtc_state->wm.skl.ddb; 13044 update_pipes &= ~BIT(pipe); 13045 13046 intel_update_crtc(state, crtc); 13047 13048 /* 13049 * If this is an already active pipe, it's DDB changed, 13050 * and this isn't the last pipe that needs updating 13051 * then we need to wait for a vblank to pass for the 13052 * new ddb allocation to take effect. 13053 */ 13054 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 13055 &old_crtc_state->wm.skl.ddb) && 13056 (update_pipes | modeset_pipes)) 13057 intel_wait_for_vblank(dev_priv, pipe); 13058 } 13059 } 13060 13061 update_pipes = modeset_pipes; 13062 13063 /* 13064 * Enable all pipes that needs a modeset and do not depends on other 13065 * pipes 13066 */ 13067 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13068 enum pipe pipe = crtc->pipe; 13069 13070 if ((modeset_pipes & BIT(pipe)) == 0) 13071 continue; 13072 13073 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 13074 is_trans_port_sync_master(new_crtc_state) || 13075 (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave)) 13076 continue; 13077 13078 modeset_pipes &= ~BIT(pipe); 13079 13080 intel_enable_crtc(state, crtc); 13081 } 13082 13083 /* 13084 * Then we enable all remaining pipes that depend on other 13085 * pipes: MST slaves and port sync masters, big joiner master 13086 */ 13087 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13088 enum pipe pipe = crtc->pipe; 13089 13090 if ((modeset_pipes & BIT(pipe)) == 0) 13091 continue; 13092 13093 modeset_pipes &= ~BIT(pipe); 13094 13095 intel_enable_crtc(state, crtc); 13096 } 13097 13098 /* 13099 * Finally we do the plane updates/etc. for all pipes that got enabled. 13100 */ 13101 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13102 enum pipe pipe = crtc->pipe; 13103 13104 if ((update_pipes & BIT(pipe)) == 0) 13105 continue; 13106 13107 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 13108 entries, I915_MAX_PIPES, pipe)); 13109 13110 entries[pipe] = new_crtc_state->wm.skl.ddb; 13111 update_pipes &= ~BIT(pipe); 13112 13113 intel_update_crtc(state, crtc); 13114 } 13115 13116 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 13117 drm_WARN_ON(&dev_priv->drm, update_pipes); 13118 } 13119 13120 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 13121 { 13122 struct intel_atomic_state *state, *next; 13123 struct llist_node *freed; 13124 13125 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 13126 llist_for_each_entry_safe(state, next, freed, freed) 13127 drm_atomic_state_put(&state->base); 13128 } 13129 13130 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 13131 { 13132 struct drm_i915_private *dev_priv = 13133 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 13134 13135 intel_atomic_helper_free_state(dev_priv); 13136 } 13137 13138 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 13139 { 13140 struct wait_queue_entry wait_fence, wait_reset; 13141 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 13142 13143 init_wait_entry(&wait_fence, 0); 13144 init_wait_entry(&wait_reset, 0); 13145 for (;;) { 13146 prepare_to_wait(&intel_state->commit_ready.wait, 13147 &wait_fence, TASK_UNINTERRUPTIBLE); 13148 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 13149 I915_RESET_MODESET), 13150 &wait_reset, TASK_UNINTERRUPTIBLE); 13151 13152 13153 if (i915_sw_fence_done(&intel_state->commit_ready) || 13154 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 13155 break; 13156 13157 schedule(); 13158 } 13159 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 13160 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 13161 I915_RESET_MODESET), 13162 &wait_reset); 13163 } 13164 13165 static void intel_cleanup_dsbs(struct intel_atomic_state *state) 13166 { 13167 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13168 struct intel_crtc *crtc; 13169 int i; 13170 13171 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13172 new_crtc_state, i) 13173 intel_dsb_cleanup(old_crtc_state); 13174 } 13175 13176 static void intel_atomic_cleanup_work(struct work_struct *work) 13177 { 13178 struct intel_atomic_state *state = 13179 container_of(work, struct intel_atomic_state, base.commit_work); 13180 struct drm_i915_private *i915 = to_i915(state->base.dev); 13181 13182 intel_cleanup_dsbs(state); 13183 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 13184 drm_atomic_helper_commit_cleanup_done(&state->base); 13185 drm_atomic_state_put(&state->base); 13186 13187 intel_atomic_helper_free_state(i915); 13188 } 13189 13190 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 13191 { 13192 struct drm_i915_private *i915 = to_i915(state->base.dev); 13193 struct intel_plane *plane; 13194 struct intel_plane_state *plane_state; 13195 int i; 13196 13197 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 13198 struct drm_framebuffer *fb = plane_state->hw.fb; 13199 int ret; 13200 13201 if (!fb || 13202 fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) 13203 continue; 13204 13205 /* 13206 * The layout of the fast clear color value expected by HW 13207 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2): 13208 * - 4 x 4 bytes per-channel value 13209 * (in surface type specific float/int format provided by the fb user) 13210 * - 8 bytes native color value used by the display 13211 * (converted/written by GPU during a fast clear operation using the 13212 * above per-channel values) 13213 * 13214 * The commit's FB prepare hook already ensured that FB obj is pinned and the 13215 * caller made sure that the object is synced wrt. the related color clear value 13216 * GPU write on it. 13217 */ 13218 ret = i915_gem_object_read_from_page(intel_fb_obj(fb), 13219 fb->offsets[2] + 16, 13220 &plane_state->ccval, 13221 sizeof(plane_state->ccval)); 13222 /* The above could only fail if the FB obj has an unexpected backing store type. */ 13223 drm_WARN_ON(&i915->drm, ret); 13224 } 13225 } 13226 13227 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 13228 { 13229 struct drm_device *dev = state->base.dev; 13230 struct drm_i915_private *dev_priv = to_i915(dev); 13231 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 13232 struct intel_crtc *crtc; 13233 u64 put_domains[I915_MAX_PIPES] = {}; 13234 intel_wakeref_t wakeref = 0; 13235 int i; 13236 13237 intel_atomic_commit_fence_wait(state); 13238 13239 drm_atomic_helper_wait_for_dependencies(&state->base); 13240 13241 if (state->modeset) 13242 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 13243 13244 intel_atomic_prepare_plane_clear_colors(state); 13245 13246 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13247 new_crtc_state, i) { 13248 if (intel_crtc_needs_modeset(new_crtc_state) || 13249 new_crtc_state->update_pipe) { 13250 13251 put_domains[crtc->pipe] = 13252 modeset_get_crtc_power_domains(new_crtc_state); 13253 } 13254 } 13255 13256 intel_commit_modeset_disables(state); 13257 13258 /* FIXME: Eventually get rid of our crtc->config pointer */ 13259 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 13260 crtc->config = new_crtc_state; 13261 13262 if (state->modeset) { 13263 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 13264 13265 intel_set_cdclk_pre_plane_update(state); 13266 13267 intel_modeset_verify_disabled(dev_priv, state); 13268 } 13269 13270 intel_sagv_pre_plane_update(state); 13271 13272 /* Complete the events for pipes that have now been disabled */ 13273 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13274 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 13275 13276 /* Complete events for now disable pipes here. */ 13277 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 13278 spin_lock_irq(&dev->event_lock); 13279 drm_crtc_send_vblank_event(&crtc->base, 13280 new_crtc_state->uapi.event); 13281 spin_unlock_irq(&dev->event_lock); 13282 13283 new_crtc_state->uapi.event = NULL; 13284 } 13285 } 13286 13287 if (state->modeset) 13288 intel_encoders_update_prepare(state); 13289 13290 intel_dbuf_pre_plane_update(state); 13291 13292 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13293 if (new_crtc_state->uapi.async_flip) 13294 intel_crtc_enable_flip_done(state, crtc); 13295 } 13296 13297 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 13298 dev_priv->display.commit_modeset_enables(state); 13299 13300 if (state->modeset) { 13301 intel_encoders_update_complete(state); 13302 13303 intel_set_cdclk_post_plane_update(state); 13304 } 13305 13306 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 13307 * already, but still need the state for the delayed optimization. To 13308 * fix this: 13309 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 13310 * - schedule that vblank worker _before_ calling hw_done 13311 * - at the start of commit_tail, cancel it _synchrously 13312 * - switch over to the vblank wait helper in the core after that since 13313 * we don't need out special handling any more. 13314 */ 13315 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 13316 13317 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13318 if (new_crtc_state->uapi.async_flip) 13319 intel_crtc_disable_flip_done(state, crtc); 13320 13321 if (new_crtc_state->hw.active && 13322 !intel_crtc_needs_modeset(new_crtc_state) && 13323 !new_crtc_state->preload_luts && 13324 (new_crtc_state->uapi.color_mgmt_changed || 13325 new_crtc_state->update_pipe)) 13326 intel_color_load_luts(new_crtc_state); 13327 } 13328 13329 /* 13330 * Now that the vblank has passed, we can go ahead and program the 13331 * optimal watermarks on platforms that need two-step watermark 13332 * programming. 13333 * 13334 * TODO: Move this (and other cleanup) to an async worker eventually. 13335 */ 13336 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13337 new_crtc_state, i) { 13338 /* 13339 * Gen2 reports pipe underruns whenever all planes are disabled. 13340 * So re-enable underrun reporting after some planes get enabled. 13341 * 13342 * We do this before .optimize_watermarks() so that we have a 13343 * chance of catching underruns with the intermediate watermarks 13344 * vs. the new plane configuration. 13345 */ 13346 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state)) 13347 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 13348 13349 if (dev_priv->display.optimize_watermarks) 13350 dev_priv->display.optimize_watermarks(state, crtc); 13351 } 13352 13353 intel_dbuf_post_plane_update(state); 13354 13355 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13356 intel_post_plane_update(state, crtc); 13357 13358 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]); 13359 13360 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 13361 13362 /* 13363 * DSB cleanup is done in cleanup_work aligning with framebuffer 13364 * cleanup. So copy and reset the dsb structure to sync with 13365 * commit_done and later do dsb cleanup in cleanup_work. 13366 */ 13367 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); 13368 } 13369 13370 /* Underruns don't always raise interrupts, so check manually */ 13371 intel_check_cpu_fifo_underruns(dev_priv); 13372 intel_check_pch_fifo_underruns(dev_priv); 13373 13374 if (state->modeset) 13375 intel_verify_planes(state); 13376 13377 intel_sagv_post_plane_update(state); 13378 13379 drm_atomic_helper_commit_hw_done(&state->base); 13380 13381 if (state->modeset) { 13382 /* As one of the primary mmio accessors, KMS has a high 13383 * likelihood of triggering bugs in unclaimed access. After we 13384 * finish modesetting, see if an error has been flagged, and if 13385 * so enable debugging for the next modeset - and hope we catch 13386 * the culprit. 13387 */ 13388 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 13389 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 13390 } 13391 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 13392 13393 /* 13394 * Defer the cleanup of the old state to a separate worker to not 13395 * impede the current task (userspace for blocking modesets) that 13396 * are executed inline. For out-of-line asynchronous modesets/flips, 13397 * deferring to a new worker seems overkill, but we would place a 13398 * schedule point (cond_resched()) here anyway to keep latencies 13399 * down. 13400 */ 13401 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 13402 queue_work(system_highpri_wq, &state->base.commit_work); 13403 } 13404 13405 static void intel_atomic_commit_work(struct work_struct *work) 13406 { 13407 struct intel_atomic_state *state = 13408 container_of(work, struct intel_atomic_state, base.commit_work); 13409 13410 intel_atomic_commit_tail(state); 13411 } 13412 13413 static int __i915_sw_fence_call 13414 intel_atomic_commit_ready(struct i915_sw_fence *fence, 13415 enum i915_sw_fence_notify notify) 13416 { 13417 struct intel_atomic_state *state = 13418 container_of(fence, struct intel_atomic_state, commit_ready); 13419 13420 switch (notify) { 13421 case FENCE_COMPLETE: 13422 /* we do blocking waits in the worker, nothing to do here */ 13423 break; 13424 case FENCE_FREE: 13425 { 13426 struct intel_atomic_helper *helper = 13427 &to_i915(state->base.dev)->atomic_helper; 13428 13429 if (llist_add(&state->freed, &helper->free_list)) 13430 schedule_work(&helper->free_work); 13431 break; 13432 } 13433 } 13434 13435 return NOTIFY_DONE; 13436 } 13437 13438 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 13439 { 13440 struct intel_plane_state *old_plane_state, *new_plane_state; 13441 struct intel_plane *plane; 13442 int i; 13443 13444 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 13445 new_plane_state, i) 13446 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 13447 to_intel_frontbuffer(new_plane_state->hw.fb), 13448 plane->frontbuffer_bit); 13449 } 13450 13451 static int intel_atomic_commit(struct drm_device *dev, 13452 struct drm_atomic_state *_state, 13453 bool nonblock) 13454 { 13455 struct intel_atomic_state *state = to_intel_atomic_state(_state); 13456 struct drm_i915_private *dev_priv = to_i915(dev); 13457 int ret = 0; 13458 13459 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 13460 13461 drm_atomic_state_get(&state->base); 13462 i915_sw_fence_init(&state->commit_ready, 13463 intel_atomic_commit_ready); 13464 13465 /* 13466 * The intel_legacy_cursor_update() fast path takes care 13467 * of avoiding the vblank waits for simple cursor 13468 * movement and flips. For cursor on/off and size changes, 13469 * we want to perform the vblank waits so that watermark 13470 * updates happen during the correct frames. Gen9+ have 13471 * double buffered watermarks and so shouldn't need this. 13472 * 13473 * Unset state->legacy_cursor_update before the call to 13474 * drm_atomic_helper_setup_commit() because otherwise 13475 * drm_atomic_helper_wait_for_flip_done() is a noop and 13476 * we get FIFO underruns because we didn't wait 13477 * for vblank. 13478 * 13479 * FIXME doing watermarks and fb cleanup from a vblank worker 13480 * (assuming we had any) would solve these problems. 13481 */ 13482 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { 13483 struct intel_crtc_state *new_crtc_state; 13484 struct intel_crtc *crtc; 13485 int i; 13486 13487 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 13488 if (new_crtc_state->wm.need_postvbl_update || 13489 new_crtc_state->update_wm_post) 13490 state->base.legacy_cursor_update = false; 13491 } 13492 13493 ret = intel_atomic_prepare_commit(state); 13494 if (ret) { 13495 drm_dbg_atomic(&dev_priv->drm, 13496 "Preparing state failed with %i\n", ret); 13497 i915_sw_fence_commit(&state->commit_ready); 13498 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 13499 return ret; 13500 } 13501 13502 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 13503 if (!ret) 13504 ret = drm_atomic_helper_swap_state(&state->base, true); 13505 if (!ret) 13506 intel_atomic_swap_global_state(state); 13507 13508 if (ret) { 13509 struct intel_crtc_state *new_crtc_state; 13510 struct intel_crtc *crtc; 13511 int i; 13512 13513 i915_sw_fence_commit(&state->commit_ready); 13514 13515 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 13516 intel_dsb_cleanup(new_crtc_state); 13517 13518 drm_atomic_helper_cleanup_planes(dev, &state->base); 13519 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 13520 return ret; 13521 } 13522 intel_shared_dpll_swap_state(state); 13523 intel_atomic_track_fbs(state); 13524 13525 drm_atomic_state_get(&state->base); 13526 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 13527 13528 i915_sw_fence_commit(&state->commit_ready); 13529 if (nonblock && state->modeset) { 13530 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 13531 } else if (nonblock) { 13532 queue_work(dev_priv->flip_wq, &state->base.commit_work); 13533 } else { 13534 if (state->modeset) 13535 flush_workqueue(dev_priv->modeset_wq); 13536 intel_atomic_commit_tail(state); 13537 } 13538 13539 return 0; 13540 } 13541 13542 struct wait_rps_boost { 13543 struct wait_queue_entry wait; 13544 13545 struct drm_crtc *crtc; 13546 struct i915_request *request; 13547 }; 13548 13549 static int do_rps_boost(struct wait_queue_entry *_wait, 13550 unsigned mode, int sync, void *key) 13551 { 13552 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 13553 struct i915_request *rq = wait->request; 13554 13555 /* 13556 * If we missed the vblank, but the request is already running it 13557 * is reasonable to assume that it will complete before the next 13558 * vblank without our intervention, so leave RPS alone. 13559 */ 13560 if (!i915_request_started(rq)) 13561 intel_rps_boost(rq); 13562 i915_request_put(rq); 13563 13564 drm_crtc_vblank_put(wait->crtc); 13565 13566 list_del(&wait->wait.entry); 13567 kfree(wait); 13568 return 1; 13569 } 13570 13571 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 13572 struct dma_fence *fence) 13573 { 13574 struct wait_rps_boost *wait; 13575 13576 if (!dma_fence_is_i915(fence)) 13577 return; 13578 13579 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 13580 return; 13581 13582 if (drm_crtc_vblank_get(crtc)) 13583 return; 13584 13585 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 13586 if (!wait) { 13587 drm_crtc_vblank_put(crtc); 13588 return; 13589 } 13590 13591 wait->request = to_request(dma_fence_get(fence)); 13592 wait->crtc = crtc; 13593 13594 wait->wait.func = do_rps_boost; 13595 wait->wait.flags = 0; 13596 13597 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 13598 } 13599 13600 int intel_plane_pin_fb(struct intel_plane_state *plane_state) 13601 { 13602 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 13603 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 13604 struct drm_framebuffer *fb = plane_state->hw.fb; 13605 struct i915_vma *vma; 13606 13607 if (plane->id == PLANE_CURSOR && 13608 INTEL_INFO(dev_priv)->display.cursor_needs_physical) { 13609 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13610 const int align = intel_cursor_alignment(dev_priv); 13611 int err; 13612 13613 err = i915_gem_object_attach_phys(obj, align); 13614 if (err) 13615 return err; 13616 } 13617 13618 vma = intel_pin_and_fence_fb_obj(fb, 13619 &plane_state->view, 13620 intel_plane_uses_fence(plane_state), 13621 &plane_state->flags); 13622 if (IS_ERR(vma)) 13623 return PTR_ERR(vma); 13624 13625 plane_state->vma = vma; 13626 13627 return 0; 13628 } 13629 13630 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 13631 { 13632 struct i915_vma *vma; 13633 13634 vma = fetch_and_zero(&old_plane_state->vma); 13635 if (vma) 13636 intel_unpin_fb_vma(vma, old_plane_state->flags); 13637 } 13638 13639 /** 13640 * intel_prepare_plane_fb - Prepare fb for usage on plane 13641 * @_plane: drm plane to prepare for 13642 * @_new_plane_state: the plane state being prepared 13643 * 13644 * Prepares a framebuffer for usage on a display plane. Generally this 13645 * involves pinning the underlying object and updating the frontbuffer tracking 13646 * bits. Some older platforms need special physical address handling for 13647 * cursor planes. 13648 * 13649 * Returns 0 on success, negative error code on failure. 13650 */ 13651 int 13652 intel_prepare_plane_fb(struct drm_plane *_plane, 13653 struct drm_plane_state *_new_plane_state) 13654 { 13655 struct i915_sched_attr attr = { 13656 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), 13657 }; 13658 struct intel_plane *plane = to_intel_plane(_plane); 13659 struct intel_plane_state *new_plane_state = 13660 to_intel_plane_state(_new_plane_state); 13661 struct intel_atomic_state *state = 13662 to_intel_atomic_state(new_plane_state->uapi.state); 13663 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 13664 const struct intel_plane_state *old_plane_state = 13665 intel_atomic_get_old_plane_state(state, plane); 13666 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); 13667 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); 13668 int ret; 13669 13670 if (old_obj) { 13671 const struct intel_crtc_state *crtc_state = 13672 intel_atomic_get_new_crtc_state(state, 13673 to_intel_crtc(old_plane_state->hw.crtc)); 13674 13675 /* Big Hammer, we also need to ensure that any pending 13676 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 13677 * current scanout is retired before unpinning the old 13678 * framebuffer. Note that we rely on userspace rendering 13679 * into the buffer attached to the pipe they are waiting 13680 * on. If not, userspace generates a GPU hang with IPEHR 13681 * point to the MI_WAIT_FOR_EVENT. 13682 * 13683 * This should only fail upon a hung GPU, in which case we 13684 * can safely continue. 13685 */ 13686 if (intel_crtc_needs_modeset(crtc_state)) { 13687 ret = i915_sw_fence_await_reservation(&state->commit_ready, 13688 old_obj->base.resv, NULL, 13689 false, 0, 13690 GFP_KERNEL); 13691 if (ret < 0) 13692 return ret; 13693 } 13694 } 13695 13696 if (new_plane_state->uapi.fence) { /* explicit fencing */ 13697 i915_gem_fence_wait_priority(new_plane_state->uapi.fence, 13698 &attr); 13699 ret = i915_sw_fence_await_dma_fence(&state->commit_ready, 13700 new_plane_state->uapi.fence, 13701 i915_fence_timeout(dev_priv), 13702 GFP_KERNEL); 13703 if (ret < 0) 13704 return ret; 13705 } 13706 13707 if (!obj) 13708 return 0; 13709 13710 ret = i915_gem_object_pin_pages(obj); 13711 if (ret) 13712 return ret; 13713 13714 ret = intel_plane_pin_fb(new_plane_state); 13715 13716 i915_gem_object_unpin_pages(obj); 13717 if (ret) 13718 return ret; 13719 13720 i915_gem_object_wait_priority(obj, 0, &attr); 13721 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB); 13722 13723 if (!new_plane_state->uapi.fence) { /* implicit fencing */ 13724 struct dma_fence *fence; 13725 13726 ret = i915_sw_fence_await_reservation(&state->commit_ready, 13727 obj->base.resv, NULL, 13728 false, 13729 i915_fence_timeout(dev_priv), 13730 GFP_KERNEL); 13731 if (ret < 0) 13732 goto unpin_fb; 13733 13734 fence = dma_resv_get_excl_rcu(obj->base.resv); 13735 if (fence) { 13736 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 13737 fence); 13738 dma_fence_put(fence); 13739 } 13740 } else { 13741 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 13742 new_plane_state->uapi.fence); 13743 } 13744 13745 /* 13746 * We declare pageflips to be interactive and so merit a small bias 13747 * towards upclocking to deliver the frame on time. By only changing 13748 * the RPS thresholds to sample more regularly and aim for higher 13749 * clocks we can hopefully deliver low power workloads (like kodi) 13750 * that are not quite steady state without resorting to forcing 13751 * maximum clocks following a vblank miss (see do_rps_boost()). 13752 */ 13753 if (!state->rps_interactive) { 13754 intel_rps_mark_interactive(&dev_priv->gt.rps, true); 13755 state->rps_interactive = true; 13756 } 13757 13758 return 0; 13759 13760 unpin_fb: 13761 intel_plane_unpin_fb(new_plane_state); 13762 13763 return ret; 13764 } 13765 13766 /** 13767 * intel_cleanup_plane_fb - Cleans up an fb after plane use 13768 * @plane: drm plane to clean up for 13769 * @_old_plane_state: the state from the previous modeset 13770 * 13771 * Cleans up a framebuffer that has just been removed from a plane. 13772 */ 13773 void 13774 intel_cleanup_plane_fb(struct drm_plane *plane, 13775 struct drm_plane_state *_old_plane_state) 13776 { 13777 struct intel_plane_state *old_plane_state = 13778 to_intel_plane_state(_old_plane_state); 13779 struct intel_atomic_state *state = 13780 to_intel_atomic_state(old_plane_state->uapi.state); 13781 struct drm_i915_private *dev_priv = to_i915(plane->dev); 13782 struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb); 13783 13784 if (!obj) 13785 return; 13786 13787 if (state->rps_interactive) { 13788 intel_rps_mark_interactive(&dev_priv->gt.rps, false); 13789 state->rps_interactive = false; 13790 } 13791 13792 /* Should only be called after a successful intel_prepare_plane_fb()! */ 13793 intel_plane_unpin_fb(old_plane_state); 13794 } 13795 13796 /** 13797 * intel_plane_destroy - destroy a plane 13798 * @plane: plane to destroy 13799 * 13800 * Common destruction function for all types of planes (primary, cursor, 13801 * sprite). 13802 */ 13803 void intel_plane_destroy(struct drm_plane *plane) 13804 { 13805 drm_plane_cleanup(plane); 13806 kfree(to_intel_plane(plane)); 13807 } 13808 13809 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) 13810 { 13811 struct intel_plane *plane; 13812 13813 for_each_intel_plane(&dev_priv->drm, plane) { 13814 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, 13815 plane->pipe); 13816 13817 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); 13818 } 13819 } 13820 13821 13822 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 13823 struct drm_file *file) 13824 { 13825 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 13826 struct drm_crtc *drmmode_crtc; 13827 struct intel_crtc *crtc; 13828 13829 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 13830 if (!drmmode_crtc) 13831 return -ENOENT; 13832 13833 crtc = to_intel_crtc(drmmode_crtc); 13834 pipe_from_crtc_id->pipe = crtc->pipe; 13835 13836 return 0; 13837 } 13838 13839 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 13840 { 13841 struct drm_device *dev = encoder->base.dev; 13842 struct intel_encoder *source_encoder; 13843 u32 possible_clones = 0; 13844 13845 for_each_intel_encoder(dev, source_encoder) { 13846 if (encoders_cloneable(encoder, source_encoder)) 13847 possible_clones |= drm_encoder_mask(&source_encoder->base); 13848 } 13849 13850 return possible_clones; 13851 } 13852 13853 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 13854 { 13855 struct drm_device *dev = encoder->base.dev; 13856 struct intel_crtc *crtc; 13857 u32 possible_crtcs = 0; 13858 13859 for_each_intel_crtc(dev, crtc) { 13860 if (encoder->pipe_mask & BIT(crtc->pipe)) 13861 possible_crtcs |= drm_crtc_mask(&crtc->base); 13862 } 13863 13864 return possible_crtcs; 13865 } 13866 13867 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 13868 { 13869 if (!IS_MOBILE(dev_priv)) 13870 return false; 13871 13872 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 13873 return false; 13874 13875 if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 13876 return false; 13877 13878 return true; 13879 } 13880 13881 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 13882 { 13883 if (INTEL_GEN(dev_priv) >= 9) 13884 return false; 13885 13886 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 13887 return false; 13888 13889 if (HAS_PCH_LPT_H(dev_priv) && 13890 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 13891 return false; 13892 13893 /* DDI E can't be used if DDI A requires 4 lanes */ 13894 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 13895 return false; 13896 13897 if (!dev_priv->vbt.int_crt_support) 13898 return false; 13899 13900 return true; 13901 } 13902 13903 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 13904 { 13905 struct intel_encoder *encoder; 13906 bool dpd_is_edp = false; 13907 13908 intel_pps_unlock_regs_wa(dev_priv); 13909 13910 if (!HAS_DISPLAY(dev_priv)) 13911 return; 13912 13913 if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) { 13914 intel_ddi_init(dev_priv, PORT_A); 13915 intel_ddi_init(dev_priv, PORT_B); 13916 intel_ddi_init(dev_priv, PORT_TC1); 13917 intel_ddi_init(dev_priv, PORT_TC2); 13918 } else if (INTEL_GEN(dev_priv) >= 12) { 13919 intel_ddi_init(dev_priv, PORT_A); 13920 intel_ddi_init(dev_priv, PORT_B); 13921 intel_ddi_init(dev_priv, PORT_TC1); 13922 intel_ddi_init(dev_priv, PORT_TC2); 13923 intel_ddi_init(dev_priv, PORT_TC3); 13924 intel_ddi_init(dev_priv, PORT_TC4); 13925 intel_ddi_init(dev_priv, PORT_TC5); 13926 intel_ddi_init(dev_priv, PORT_TC6); 13927 icl_dsi_init(dev_priv); 13928 } else if (IS_JSL_EHL(dev_priv)) { 13929 intel_ddi_init(dev_priv, PORT_A); 13930 intel_ddi_init(dev_priv, PORT_B); 13931 intel_ddi_init(dev_priv, PORT_C); 13932 intel_ddi_init(dev_priv, PORT_D); 13933 icl_dsi_init(dev_priv); 13934 } else if (IS_GEN(dev_priv, 11)) { 13935 intel_ddi_init(dev_priv, PORT_A); 13936 intel_ddi_init(dev_priv, PORT_B); 13937 intel_ddi_init(dev_priv, PORT_C); 13938 intel_ddi_init(dev_priv, PORT_D); 13939 intel_ddi_init(dev_priv, PORT_E); 13940 /* 13941 * On some ICL SKUs port F is not present. No strap bits for 13942 * this, so rely on VBT. 13943 * Work around broken VBTs on SKUs known to have no port F. 13944 */ 13945 if (IS_ICL_WITH_PORT_F(dev_priv) && 13946 intel_bios_is_port_present(dev_priv, PORT_F)) 13947 intel_ddi_init(dev_priv, PORT_F); 13948 13949 icl_dsi_init(dev_priv); 13950 } else if (IS_GEN9_LP(dev_priv)) { 13951 /* 13952 * FIXME: Broxton doesn't support port detection via the 13953 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 13954 * detect the ports. 13955 */ 13956 intel_ddi_init(dev_priv, PORT_A); 13957 intel_ddi_init(dev_priv, PORT_B); 13958 intel_ddi_init(dev_priv, PORT_C); 13959 13960 vlv_dsi_init(dev_priv); 13961 } else if (HAS_DDI(dev_priv)) { 13962 int found; 13963 13964 if (intel_ddi_crt_present(dev_priv)) 13965 intel_crt_init(dev_priv); 13966 13967 /* 13968 * Haswell uses DDI functions to detect digital outputs. 13969 * On SKL pre-D0 the strap isn't connected, so we assume 13970 * it's there. 13971 */ 13972 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 13973 /* WaIgnoreDDIAStrap: skl */ 13974 if (found || IS_GEN9_BC(dev_priv)) 13975 intel_ddi_init(dev_priv, PORT_A); 13976 13977 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP 13978 * register */ 13979 found = intel_de_read(dev_priv, SFUSE_STRAP); 13980 13981 if (found & SFUSE_STRAP_DDIB_DETECTED) 13982 intel_ddi_init(dev_priv, PORT_B); 13983 if (found & SFUSE_STRAP_DDIC_DETECTED) 13984 intel_ddi_init(dev_priv, PORT_C); 13985 if (found & SFUSE_STRAP_DDID_DETECTED) 13986 intel_ddi_init(dev_priv, PORT_D); 13987 if (found & SFUSE_STRAP_DDIF_DETECTED) 13988 intel_ddi_init(dev_priv, PORT_F); 13989 /* 13990 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 13991 */ 13992 if (IS_GEN9_BC(dev_priv) && 13993 intel_bios_is_port_present(dev_priv, PORT_E)) 13994 intel_ddi_init(dev_priv, PORT_E); 13995 13996 } else if (HAS_PCH_SPLIT(dev_priv)) { 13997 int found; 13998 13999 /* 14000 * intel_edp_init_connector() depends on this completing first, 14001 * to prevent the registration of both eDP and LVDS and the 14002 * incorrect sharing of the PPS. 14003 */ 14004 intel_lvds_init(dev_priv); 14005 intel_crt_init(dev_priv); 14006 14007 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 14008 14009 if (ilk_has_edp_a(dev_priv)) 14010 intel_dp_init(dev_priv, DP_A, PORT_A); 14011 14012 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 14013 /* PCH SDVOB multiplex with HDMIB */ 14014 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 14015 if (!found) 14016 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 14017 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 14018 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 14019 } 14020 14021 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 14022 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 14023 14024 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 14025 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 14026 14027 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 14028 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 14029 14030 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 14031 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 14032 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 14033 bool has_edp, has_port; 14034 14035 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 14036 intel_crt_init(dev_priv); 14037 14038 /* 14039 * The DP_DETECTED bit is the latched state of the DDC 14040 * SDA pin at boot. However since eDP doesn't require DDC 14041 * (no way to plug in a DP->HDMI dongle) the DDC pins for 14042 * eDP ports may have been muxed to an alternate function. 14043 * Thus we can't rely on the DP_DETECTED bit alone to detect 14044 * eDP ports. Consult the VBT as well as DP_DETECTED to 14045 * detect eDP ports. 14046 * 14047 * Sadly the straps seem to be missing sometimes even for HDMI 14048 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 14049 * and VBT for the presence of the port. Additionally we can't 14050 * trust the port type the VBT declares as we've seen at least 14051 * HDMI ports that the VBT claim are DP or eDP. 14052 */ 14053 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 14054 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 14055 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 14056 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 14057 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 14058 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 14059 14060 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 14061 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 14062 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 14063 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 14064 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 14065 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 14066 14067 if (IS_CHERRYVIEW(dev_priv)) { 14068 /* 14069 * eDP not supported on port D, 14070 * so no need to worry about it 14071 */ 14072 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 14073 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 14074 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 14075 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 14076 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 14077 } 14078 14079 vlv_dsi_init(dev_priv); 14080 } else if (IS_PINEVIEW(dev_priv)) { 14081 intel_lvds_init(dev_priv); 14082 intel_crt_init(dev_priv); 14083 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { 14084 bool found = false; 14085 14086 if (IS_MOBILE(dev_priv)) 14087 intel_lvds_init(dev_priv); 14088 14089 intel_crt_init(dev_priv); 14090 14091 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 14092 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 14093 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 14094 if (!found && IS_G4X(dev_priv)) { 14095 drm_dbg_kms(&dev_priv->drm, 14096 "probing HDMI on SDVOB\n"); 14097 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 14098 } 14099 14100 if (!found && IS_G4X(dev_priv)) 14101 intel_dp_init(dev_priv, DP_B, PORT_B); 14102 } 14103 14104 /* Before G4X SDVOC doesn't have its own detect register */ 14105 14106 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 14107 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 14108 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 14109 } 14110 14111 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 14112 14113 if (IS_G4X(dev_priv)) { 14114 drm_dbg_kms(&dev_priv->drm, 14115 "probing HDMI on SDVOC\n"); 14116 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 14117 } 14118 if (IS_G4X(dev_priv)) 14119 intel_dp_init(dev_priv, DP_C, PORT_C); 14120 } 14121 14122 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 14123 intel_dp_init(dev_priv, DP_D, PORT_D); 14124 14125 if (SUPPORTS_TV(dev_priv)) 14126 intel_tv_init(dev_priv); 14127 } else if (IS_GEN(dev_priv, 2)) { 14128 if (IS_I85X(dev_priv)) 14129 intel_lvds_init(dev_priv); 14130 14131 intel_crt_init(dev_priv); 14132 intel_dvo_init(dev_priv); 14133 } 14134 14135 intel_psr_init(dev_priv); 14136 14137 for_each_intel_encoder(&dev_priv->drm, encoder) { 14138 encoder->base.possible_crtcs = 14139 intel_encoder_possible_crtcs(encoder); 14140 encoder->base.possible_clones = 14141 intel_encoder_possible_clones(encoder); 14142 } 14143 14144 intel_init_pch_refclk(dev_priv); 14145 14146 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 14147 } 14148 14149 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 14150 { 14151 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14152 14153 drm_framebuffer_cleanup(fb); 14154 intel_frontbuffer_put(intel_fb->frontbuffer); 14155 14156 kfree(intel_fb); 14157 } 14158 14159 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 14160 struct drm_file *file, 14161 unsigned int *handle) 14162 { 14163 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14164 struct drm_i915_private *i915 = to_i915(obj->base.dev); 14165 14166 if (obj->userptr.mm) { 14167 drm_dbg(&i915->drm, 14168 "attempting to use a userptr for a framebuffer, denied\n"); 14169 return -EINVAL; 14170 } 14171 14172 return drm_gem_handle_create(file, &obj->base, handle); 14173 } 14174 14175 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 14176 struct drm_file *file, 14177 unsigned flags, unsigned color, 14178 struct drm_clip_rect *clips, 14179 unsigned num_clips) 14180 { 14181 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14182 14183 i915_gem_object_flush_if_display(obj); 14184 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 14185 14186 return 0; 14187 } 14188 14189 static const struct drm_framebuffer_funcs intel_fb_funcs = { 14190 .destroy = intel_user_framebuffer_destroy, 14191 .create_handle = intel_user_framebuffer_create_handle, 14192 .dirty = intel_user_framebuffer_dirty, 14193 }; 14194 14195 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 14196 struct drm_i915_gem_object *obj, 14197 struct drm_mode_fb_cmd2 *mode_cmd) 14198 { 14199 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 14200 struct drm_framebuffer *fb = &intel_fb->base; 14201 u32 max_stride; 14202 unsigned int tiling, stride; 14203 int ret = -EINVAL; 14204 int i; 14205 14206 intel_fb->frontbuffer = intel_frontbuffer_get(obj); 14207 if (!intel_fb->frontbuffer) 14208 return -ENOMEM; 14209 14210 i915_gem_object_lock(obj, NULL); 14211 tiling = i915_gem_object_get_tiling(obj); 14212 stride = i915_gem_object_get_stride(obj); 14213 i915_gem_object_unlock(obj); 14214 14215 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 14216 /* 14217 * If there's a fence, enforce that 14218 * the fb modifier and tiling mode match. 14219 */ 14220 if (tiling != I915_TILING_NONE && 14221 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 14222 drm_dbg_kms(&dev_priv->drm, 14223 "tiling_mode doesn't match fb modifier\n"); 14224 goto err; 14225 } 14226 } else { 14227 if (tiling == I915_TILING_X) { 14228 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 14229 } else if (tiling == I915_TILING_Y) { 14230 drm_dbg_kms(&dev_priv->drm, 14231 "No Y tiling for legacy addfb\n"); 14232 goto err; 14233 } 14234 } 14235 14236 if (!drm_any_plane_has_format(&dev_priv->drm, 14237 mode_cmd->pixel_format, 14238 mode_cmd->modifier[0])) { 14239 struct drm_format_name_buf format_name; 14240 14241 drm_dbg_kms(&dev_priv->drm, 14242 "unsupported pixel format %s / modifier 0x%llx\n", 14243 drm_get_format_name(mode_cmd->pixel_format, 14244 &format_name), 14245 mode_cmd->modifier[0]); 14246 goto err; 14247 } 14248 14249 /* 14250 * gen2/3 display engine uses the fence if present, 14251 * so the tiling mode must match the fb modifier exactly. 14252 */ 14253 if (INTEL_GEN(dev_priv) < 4 && 14254 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 14255 drm_dbg_kms(&dev_priv->drm, 14256 "tiling_mode must match fb modifier exactly on gen2/3\n"); 14257 goto err; 14258 } 14259 14260 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, 14261 mode_cmd->modifier[0]); 14262 if (mode_cmd->pitches[0] > max_stride) { 14263 drm_dbg_kms(&dev_priv->drm, 14264 "%s pitch (%u) must be at most %d\n", 14265 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 14266 "tiled" : "linear", 14267 mode_cmd->pitches[0], max_stride); 14268 goto err; 14269 } 14270 14271 /* 14272 * If there's a fence, enforce that 14273 * the fb pitch and fence stride match. 14274 */ 14275 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 14276 drm_dbg_kms(&dev_priv->drm, 14277 "pitch (%d) must match tiling stride (%d)\n", 14278 mode_cmd->pitches[0], stride); 14279 goto err; 14280 } 14281 14282 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 14283 if (mode_cmd->offsets[0] != 0) { 14284 drm_dbg_kms(&dev_priv->drm, 14285 "plane 0 offset (0x%08x) must be 0\n", 14286 mode_cmd->offsets[0]); 14287 goto err; 14288 } 14289 14290 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 14291 14292 for (i = 0; i < fb->format->num_planes; i++) { 14293 u32 stride_alignment; 14294 14295 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 14296 drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n", 14297 i); 14298 goto err; 14299 } 14300 14301 stride_alignment = intel_fb_stride_alignment(fb, i); 14302 if (fb->pitches[i] & (stride_alignment - 1)) { 14303 drm_dbg_kms(&dev_priv->drm, 14304 "plane %d pitch (%d) must be at least %u byte aligned\n", 14305 i, fb->pitches[i], stride_alignment); 14306 goto err; 14307 } 14308 14309 if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) { 14310 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i); 14311 14312 if (fb->pitches[i] != ccs_aux_stride) { 14313 drm_dbg_kms(&dev_priv->drm, 14314 "ccs aux plane %d pitch (%d) must be %d\n", 14315 i, 14316 fb->pitches[i], ccs_aux_stride); 14317 goto err; 14318 } 14319 } 14320 14321 fb->obj[i] = &obj->base; 14322 } 14323 14324 ret = intel_fill_fb_info(dev_priv, fb); 14325 if (ret) 14326 goto err; 14327 14328 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 14329 if (ret) { 14330 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); 14331 goto err; 14332 } 14333 14334 return 0; 14335 14336 err: 14337 intel_frontbuffer_put(intel_fb->frontbuffer); 14338 return ret; 14339 } 14340 14341 static struct drm_framebuffer * 14342 intel_user_framebuffer_create(struct drm_device *dev, 14343 struct drm_file *filp, 14344 const struct drm_mode_fb_cmd2 *user_mode_cmd) 14345 { 14346 struct drm_framebuffer *fb; 14347 struct drm_i915_gem_object *obj; 14348 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 14349 14350 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 14351 if (!obj) 14352 return ERR_PTR(-ENOENT); 14353 14354 fb = intel_framebuffer_create(obj, &mode_cmd); 14355 i915_gem_object_put(obj); 14356 14357 return fb; 14358 } 14359 14360 static enum drm_mode_status 14361 intel_mode_valid(struct drm_device *dev, 14362 const struct drm_display_mode *mode) 14363 { 14364 struct drm_i915_private *dev_priv = to_i915(dev); 14365 int hdisplay_max, htotal_max; 14366 int vdisplay_max, vtotal_max; 14367 14368 /* 14369 * Can't reject DBLSCAN here because Xorg ddxen can add piles 14370 * of DBLSCAN modes to the output's mode list when they detect 14371 * the scaling mode property on the connector. And they don't 14372 * ask the kernel to validate those modes in any way until 14373 * modeset time at which point the client gets a protocol error. 14374 * So in order to not upset those clients we silently ignore the 14375 * DBLSCAN flag on such connectors. For other connectors we will 14376 * reject modes with the DBLSCAN flag in encoder->compute_config(). 14377 * And we always reject DBLSCAN modes in connector->mode_valid() 14378 * as we never want such modes on the connector's mode list. 14379 */ 14380 14381 if (mode->vscan > 1) 14382 return MODE_NO_VSCAN; 14383 14384 if (mode->flags & DRM_MODE_FLAG_HSKEW) 14385 return MODE_H_ILLEGAL; 14386 14387 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 14388 DRM_MODE_FLAG_NCSYNC | 14389 DRM_MODE_FLAG_PCSYNC)) 14390 return MODE_HSYNC; 14391 14392 if (mode->flags & (DRM_MODE_FLAG_BCAST | 14393 DRM_MODE_FLAG_PIXMUX | 14394 DRM_MODE_FLAG_CLKDIV2)) 14395 return MODE_BAD; 14396 14397 /* Transcoder timing limits */ 14398 if (INTEL_GEN(dev_priv) >= 11) { 14399 hdisplay_max = 16384; 14400 vdisplay_max = 8192; 14401 htotal_max = 16384; 14402 vtotal_max = 8192; 14403 } else if (INTEL_GEN(dev_priv) >= 9 || 14404 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 14405 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 14406 vdisplay_max = 4096; 14407 htotal_max = 8192; 14408 vtotal_max = 8192; 14409 } else if (INTEL_GEN(dev_priv) >= 3) { 14410 hdisplay_max = 4096; 14411 vdisplay_max = 4096; 14412 htotal_max = 8192; 14413 vtotal_max = 8192; 14414 } else { 14415 hdisplay_max = 2048; 14416 vdisplay_max = 2048; 14417 htotal_max = 4096; 14418 vtotal_max = 4096; 14419 } 14420 14421 if (mode->hdisplay > hdisplay_max || 14422 mode->hsync_start > htotal_max || 14423 mode->hsync_end > htotal_max || 14424 mode->htotal > htotal_max) 14425 return MODE_H_ILLEGAL; 14426 14427 if (mode->vdisplay > vdisplay_max || 14428 mode->vsync_start > vtotal_max || 14429 mode->vsync_end > vtotal_max || 14430 mode->vtotal > vtotal_max) 14431 return MODE_V_ILLEGAL; 14432 14433 if (INTEL_GEN(dev_priv) >= 5) { 14434 if (mode->hdisplay < 64 || 14435 mode->htotal - mode->hdisplay < 32) 14436 return MODE_H_ILLEGAL; 14437 14438 if (mode->vtotal - mode->vdisplay < 5) 14439 return MODE_V_ILLEGAL; 14440 } else { 14441 if (mode->htotal - mode->hdisplay < 32) 14442 return MODE_H_ILLEGAL; 14443 14444 if (mode->vtotal - mode->vdisplay < 3) 14445 return MODE_V_ILLEGAL; 14446 } 14447 14448 return MODE_OK; 14449 } 14450 14451 enum drm_mode_status 14452 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 14453 const struct drm_display_mode *mode, 14454 bool bigjoiner) 14455 { 14456 int plane_width_max, plane_height_max; 14457 14458 /* 14459 * intel_mode_valid() should be 14460 * sufficient on older platforms. 14461 */ 14462 if (INTEL_GEN(dev_priv) < 9) 14463 return MODE_OK; 14464 14465 /* 14466 * Most people will probably want a fullscreen 14467 * plane so let's not advertize modes that are 14468 * too big for that. 14469 */ 14470 if (INTEL_GEN(dev_priv) >= 11) { 14471 plane_width_max = 5120 << bigjoiner; 14472 plane_height_max = 4320; 14473 } else { 14474 plane_width_max = 5120; 14475 plane_height_max = 4096; 14476 } 14477 14478 if (mode->hdisplay > plane_width_max) 14479 return MODE_H_ILLEGAL; 14480 14481 if (mode->vdisplay > plane_height_max) 14482 return MODE_V_ILLEGAL; 14483 14484 return MODE_OK; 14485 } 14486 14487 static const struct drm_mode_config_funcs intel_mode_funcs = { 14488 .fb_create = intel_user_framebuffer_create, 14489 .get_format_info = intel_get_format_info, 14490 .output_poll_changed = intel_fbdev_output_poll_changed, 14491 .mode_valid = intel_mode_valid, 14492 .atomic_check = intel_atomic_check, 14493 .atomic_commit = intel_atomic_commit, 14494 .atomic_state_alloc = intel_atomic_state_alloc, 14495 .atomic_state_clear = intel_atomic_state_clear, 14496 .atomic_state_free = intel_atomic_state_free, 14497 }; 14498 14499 /** 14500 * intel_init_display_hooks - initialize the display modesetting hooks 14501 * @dev_priv: device private 14502 */ 14503 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 14504 { 14505 intel_init_cdclk_hooks(dev_priv); 14506 14507 intel_dpll_init_clock_hook(dev_priv); 14508 14509 if (INTEL_GEN(dev_priv) >= 9) { 14510 dev_priv->display.get_pipe_config = hsw_get_pipe_config; 14511 dev_priv->display.crtc_enable = hsw_crtc_enable; 14512 dev_priv->display.crtc_disable = hsw_crtc_disable; 14513 } else if (HAS_DDI(dev_priv)) { 14514 dev_priv->display.get_pipe_config = hsw_get_pipe_config; 14515 dev_priv->display.crtc_enable = hsw_crtc_enable; 14516 dev_priv->display.crtc_disable = hsw_crtc_disable; 14517 } else if (HAS_PCH_SPLIT(dev_priv)) { 14518 dev_priv->display.get_pipe_config = ilk_get_pipe_config; 14519 dev_priv->display.crtc_enable = ilk_crtc_enable; 14520 dev_priv->display.crtc_disable = ilk_crtc_disable; 14521 } else if (IS_CHERRYVIEW(dev_priv) || 14522 IS_VALLEYVIEW(dev_priv)) { 14523 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14524 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14525 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14526 } else { 14527 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14528 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14529 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14530 } 14531 14532 intel_fdi_init_hook(dev_priv); 14533 14534 if (INTEL_GEN(dev_priv) >= 9) { 14535 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; 14536 dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config; 14537 } else { 14538 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; 14539 dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config; 14540 } 14541 14542 } 14543 14544 void intel_modeset_init_hw(struct drm_i915_private *i915) 14545 { 14546 struct intel_cdclk_state *cdclk_state = 14547 to_intel_cdclk_state(i915->cdclk.obj.state); 14548 14549 intel_update_cdclk(i915); 14550 intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK"); 14551 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; 14552 } 14553 14554 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) 14555 { 14556 struct drm_plane *plane; 14557 struct intel_crtc *crtc; 14558 14559 for_each_intel_crtc(state->dev, crtc) { 14560 struct intel_crtc_state *crtc_state; 14561 14562 crtc_state = intel_atomic_get_crtc_state(state, crtc); 14563 if (IS_ERR(crtc_state)) 14564 return PTR_ERR(crtc_state); 14565 14566 if (crtc_state->hw.active) { 14567 /* 14568 * Preserve the inherited flag to avoid 14569 * taking the full modeset path. 14570 */ 14571 crtc_state->inherited = true; 14572 } 14573 } 14574 14575 drm_for_each_plane(plane, state->dev) { 14576 struct drm_plane_state *plane_state; 14577 14578 plane_state = drm_atomic_get_plane_state(state, plane); 14579 if (IS_ERR(plane_state)) 14580 return PTR_ERR(plane_state); 14581 } 14582 14583 return 0; 14584 } 14585 14586 /* 14587 * Calculate what we think the watermarks should be for the state we've read 14588 * out of the hardware and then immediately program those watermarks so that 14589 * we ensure the hardware settings match our internal state. 14590 * 14591 * We can calculate what we think WM's should be by creating a duplicate of the 14592 * current state (which was constructed during hardware readout) and running it 14593 * through the atomic check code to calculate new watermark values in the 14594 * state object. 14595 */ 14596 static void sanitize_watermarks(struct drm_i915_private *dev_priv) 14597 { 14598 struct drm_atomic_state *state; 14599 struct intel_atomic_state *intel_state; 14600 struct intel_crtc *crtc; 14601 struct intel_crtc_state *crtc_state; 14602 struct drm_modeset_acquire_ctx ctx; 14603 int ret; 14604 int i; 14605 14606 /* Only supported on platforms that use atomic watermark design */ 14607 if (!dev_priv->display.optimize_watermarks) 14608 return; 14609 14610 state = drm_atomic_state_alloc(&dev_priv->drm); 14611 if (drm_WARN_ON(&dev_priv->drm, !state)) 14612 return; 14613 14614 intel_state = to_intel_atomic_state(state); 14615 14616 drm_modeset_acquire_init(&ctx, 0); 14617 14618 retry: 14619 state->acquire_ctx = &ctx; 14620 14621 /* 14622 * Hardware readout is the only time we don't want to calculate 14623 * intermediate watermarks (since we don't trust the current 14624 * watermarks). 14625 */ 14626 if (!HAS_GMCH(dev_priv)) 14627 intel_state->skip_intermediate_wm = true; 14628 14629 ret = sanitize_watermarks_add_affected(state); 14630 if (ret) 14631 goto fail; 14632 14633 ret = intel_atomic_check(&dev_priv->drm, state); 14634 if (ret) 14635 goto fail; 14636 14637 /* Write calculated watermark values back */ 14638 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 14639 crtc_state->wm.need_postvbl_update = true; 14640 dev_priv->display.optimize_watermarks(intel_state, crtc); 14641 14642 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 14643 } 14644 14645 fail: 14646 if (ret == -EDEADLK) { 14647 drm_atomic_state_clear(state); 14648 drm_modeset_backoff(&ctx); 14649 goto retry; 14650 } 14651 14652 /* 14653 * If we fail here, it means that the hardware appears to be 14654 * programmed in a way that shouldn't be possible, given our 14655 * understanding of watermark requirements. This might mean a 14656 * mistake in the hardware readout code or a mistake in the 14657 * watermark calculations for a given platform. Raise a WARN 14658 * so that this is noticeable. 14659 * 14660 * If this actually happens, we'll have to just leave the 14661 * BIOS-programmed watermarks untouched and hope for the best. 14662 */ 14663 drm_WARN(&dev_priv->drm, ret, 14664 "Could not determine valid watermarks for inherited state\n"); 14665 14666 drm_atomic_state_put(state); 14667 14668 drm_modeset_drop_locks(&ctx); 14669 drm_modeset_acquire_fini(&ctx); 14670 } 14671 14672 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 14673 { 14674 if (IS_GEN(dev_priv, 5)) { 14675 u32 fdi_pll_clk = 14676 intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 14677 14678 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 14679 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { 14680 dev_priv->fdi_pll_freq = 270000; 14681 } else { 14682 return; 14683 } 14684 14685 drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); 14686 } 14687 14688 static int intel_initial_commit(struct drm_device *dev) 14689 { 14690 struct drm_atomic_state *state = NULL; 14691 struct drm_modeset_acquire_ctx ctx; 14692 struct intel_crtc *crtc; 14693 int ret = 0; 14694 14695 state = drm_atomic_state_alloc(dev); 14696 if (!state) 14697 return -ENOMEM; 14698 14699 drm_modeset_acquire_init(&ctx, 0); 14700 14701 retry: 14702 state->acquire_ctx = &ctx; 14703 14704 for_each_intel_crtc(dev, crtc) { 14705 struct intel_crtc_state *crtc_state = 14706 intel_atomic_get_crtc_state(state, crtc); 14707 14708 if (IS_ERR(crtc_state)) { 14709 ret = PTR_ERR(crtc_state); 14710 goto out; 14711 } 14712 14713 if (crtc_state->hw.active) { 14714 struct intel_encoder *encoder; 14715 14716 /* 14717 * We've not yet detected sink capabilities 14718 * (audio,infoframes,etc.) and thus we don't want to 14719 * force a full state recomputation yet. We want that to 14720 * happen only for the first real commit from userspace. 14721 * So preserve the inherited flag for the time being. 14722 */ 14723 crtc_state->inherited = true; 14724 14725 ret = drm_atomic_add_affected_planes(state, &crtc->base); 14726 if (ret) 14727 goto out; 14728 14729 /* 14730 * FIXME hack to force a LUT update to avoid the 14731 * plane update forcing the pipe gamma on without 14732 * having a proper LUT loaded. Remove once we 14733 * have readout for pipe gamma enable. 14734 */ 14735 crtc_state->uapi.color_mgmt_changed = true; 14736 14737 for_each_intel_encoder_mask(dev, encoder, 14738 crtc_state->uapi.encoder_mask) { 14739 if (encoder->initial_fastset_check && 14740 !encoder->initial_fastset_check(encoder, crtc_state)) { 14741 ret = drm_atomic_add_affected_connectors(state, 14742 &crtc->base); 14743 if (ret) 14744 goto out; 14745 } 14746 } 14747 } 14748 } 14749 14750 ret = drm_atomic_commit(state); 14751 14752 out: 14753 if (ret == -EDEADLK) { 14754 drm_atomic_state_clear(state); 14755 drm_modeset_backoff(&ctx); 14756 goto retry; 14757 } 14758 14759 drm_atomic_state_put(state); 14760 14761 drm_modeset_drop_locks(&ctx); 14762 drm_modeset_acquire_fini(&ctx); 14763 14764 return ret; 14765 } 14766 14767 static void intel_mode_config_init(struct drm_i915_private *i915) 14768 { 14769 struct drm_mode_config *mode_config = &i915->drm.mode_config; 14770 14771 drm_mode_config_init(&i915->drm); 14772 INIT_LIST_HEAD(&i915->global_obj_list); 14773 14774 mode_config->min_width = 0; 14775 mode_config->min_height = 0; 14776 14777 mode_config->preferred_depth = 24; 14778 mode_config->prefer_shadow = 1; 14779 14780 mode_config->allow_fb_modifiers = true; 14781 14782 mode_config->funcs = &intel_mode_funcs; 14783 14784 mode_config->async_page_flip = has_async_flips(i915); 14785 14786 /* 14787 * Maximum framebuffer dimensions, chosen to match 14788 * the maximum render engine surface size on gen4+. 14789 */ 14790 if (INTEL_GEN(i915) >= 7) { 14791 mode_config->max_width = 16384; 14792 mode_config->max_height = 16384; 14793 } else if (INTEL_GEN(i915) >= 4) { 14794 mode_config->max_width = 8192; 14795 mode_config->max_height = 8192; 14796 } else if (IS_GEN(i915, 3)) { 14797 mode_config->max_width = 4096; 14798 mode_config->max_height = 4096; 14799 } else { 14800 mode_config->max_width = 2048; 14801 mode_config->max_height = 2048; 14802 } 14803 14804 if (IS_I845G(i915) || IS_I865G(i915)) { 14805 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; 14806 mode_config->cursor_height = 1023; 14807 } else if (IS_I830(i915) || IS_I85X(i915) || 14808 IS_I915G(i915) || IS_I915GM(i915)) { 14809 mode_config->cursor_width = 64; 14810 mode_config->cursor_height = 64; 14811 } else { 14812 mode_config->cursor_width = 256; 14813 mode_config->cursor_height = 256; 14814 } 14815 } 14816 14817 static void intel_mode_config_cleanup(struct drm_i915_private *i915) 14818 { 14819 intel_atomic_global_obj_cleanup(i915); 14820 drm_mode_config_cleanup(&i915->drm); 14821 } 14822 14823 static void plane_config_fini(struct intel_initial_plane_config *plane_config) 14824 { 14825 if (plane_config->fb) { 14826 struct drm_framebuffer *fb = &plane_config->fb->base; 14827 14828 /* We may only have the stub and not a full framebuffer */ 14829 if (drm_framebuffer_read_refcount(fb)) 14830 drm_framebuffer_put(fb); 14831 else 14832 kfree(fb); 14833 } 14834 14835 if (plane_config->vma) 14836 i915_vma_put(plane_config->vma); 14837 } 14838 14839 /* part #1: call before irq install */ 14840 int intel_modeset_init_noirq(struct drm_i915_private *i915) 14841 { 14842 int ret; 14843 14844 if (i915_inject_probe_failure(i915)) 14845 return -ENODEV; 14846 14847 if (HAS_DISPLAY(i915)) { 14848 ret = drm_vblank_init(&i915->drm, 14849 INTEL_NUM_PIPES(i915)); 14850 if (ret) 14851 return ret; 14852 } 14853 14854 intel_bios_init(i915); 14855 14856 ret = intel_vga_register(i915); 14857 if (ret) 14858 goto cleanup_bios; 14859 14860 /* FIXME: completely on the wrong abstraction layer */ 14861 intel_power_domains_init_hw(i915, false); 14862 14863 intel_csr_ucode_init(i915); 14864 14865 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 14866 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | 14867 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 14868 14869 i915->framestart_delay = 1; /* 1-4 */ 14870 14871 intel_mode_config_init(i915); 14872 14873 ret = intel_cdclk_init(i915); 14874 if (ret) 14875 goto cleanup_vga_client_pw_domain_csr; 14876 14877 ret = intel_dbuf_init(i915); 14878 if (ret) 14879 goto cleanup_vga_client_pw_domain_csr; 14880 14881 ret = intel_bw_init(i915); 14882 if (ret) 14883 goto cleanup_vga_client_pw_domain_csr; 14884 14885 init_llist_head(&i915->atomic_helper.free_list); 14886 INIT_WORK(&i915->atomic_helper.free_work, 14887 intel_atomic_helper_free_state_worker); 14888 14889 intel_init_quirks(i915); 14890 14891 intel_fbc_init(i915); 14892 14893 return 0; 14894 14895 cleanup_vga_client_pw_domain_csr: 14896 intel_csr_ucode_fini(i915); 14897 intel_power_domains_driver_remove(i915); 14898 intel_vga_unregister(i915); 14899 cleanup_bios: 14900 intel_bios_driver_remove(i915); 14901 14902 return ret; 14903 } 14904 14905 /* part #2: call after irq install, but before gem init */ 14906 int intel_modeset_init_nogem(struct drm_i915_private *i915) 14907 { 14908 struct drm_device *dev = &i915->drm; 14909 enum pipe pipe; 14910 struct intel_crtc *crtc; 14911 int ret; 14912 14913 intel_init_pm(i915); 14914 14915 intel_panel_sanitize_ssc(i915); 14916 14917 intel_pps_setup(i915); 14918 14919 intel_gmbus_setup(i915); 14920 14921 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", 14922 INTEL_NUM_PIPES(i915), 14923 INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); 14924 14925 if (HAS_DISPLAY(i915)) { 14926 for_each_pipe(i915, pipe) { 14927 ret = intel_crtc_init(i915, pipe); 14928 if (ret) { 14929 intel_mode_config_cleanup(i915); 14930 return ret; 14931 } 14932 } 14933 } 14934 14935 intel_plane_possible_crtcs_init(i915); 14936 intel_shared_dpll_init(dev); 14937 intel_update_fdi_pll_freq(i915); 14938 14939 intel_update_czclk(i915); 14940 intel_modeset_init_hw(i915); 14941 14942 intel_hdcp_component_init(i915); 14943 14944 if (i915->max_cdclk_freq == 0) 14945 intel_update_max_cdclk(i915); 14946 14947 /* 14948 * If the platform has HTI, we need to find out whether it has reserved 14949 * any display resources before we create our display outputs. 14950 */ 14951 if (INTEL_INFO(i915)->display.has_hti) 14952 i915->hti_state = intel_de_read(i915, HDPORT_STATE); 14953 14954 /* Just disable it once at startup */ 14955 intel_vga_disable(i915); 14956 intel_setup_outputs(i915); 14957 14958 drm_modeset_lock_all(dev); 14959 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 14960 drm_modeset_unlock_all(dev); 14961 14962 for_each_intel_crtc(dev, crtc) { 14963 struct intel_initial_plane_config plane_config = {}; 14964 14965 if (!to_intel_crtc_state(crtc->base.state)->uapi.active) 14966 continue; 14967 14968 /* 14969 * Note that reserving the BIOS fb up front prevents us 14970 * from stuffing other stolen allocations like the ring 14971 * on top. This prevents some ugliness at boot time, and 14972 * can even allow for smooth boot transitions if the BIOS 14973 * fb is large enough for the active pipe configuration. 14974 */ 14975 i915->display.get_initial_plane_config(crtc, &plane_config); 14976 14977 /* 14978 * If the fb is shared between multiple heads, we'll 14979 * just get the first one. 14980 */ 14981 intel_find_initial_plane_obj(crtc, &plane_config); 14982 14983 plane_config_fini(&plane_config); 14984 } 14985 14986 /* 14987 * Make sure hardware watermarks really match the state we read out. 14988 * Note that we need to do this after reconstructing the BIOS fb's 14989 * since the watermark calculation done here will use pstate->fb. 14990 */ 14991 if (!HAS_GMCH(i915)) 14992 sanitize_watermarks(i915); 14993 14994 return 0; 14995 } 14996 14997 /* part #3: call after gem init */ 14998 int intel_modeset_init(struct drm_i915_private *i915) 14999 { 15000 int ret; 15001 15002 if (!HAS_DISPLAY(i915)) 15003 return 0; 15004 15005 /* 15006 * Force all active planes to recompute their states. So that on 15007 * mode_setcrtc after probe, all the intel_plane_state variables 15008 * are already calculated and there is no assert_plane warnings 15009 * during bootup. 15010 */ 15011 ret = intel_initial_commit(&i915->drm); 15012 if (ret) 15013 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); 15014 15015 intel_overlay_setup(i915); 15016 15017 ret = intel_fbdev_init(&i915->drm); 15018 if (ret) 15019 return ret; 15020 15021 /* Only enable hotplug handling once the fbdev is fully set up. */ 15022 intel_hpd_init(i915); 15023 intel_hpd_poll_disable(i915); 15024 15025 intel_init_ipc(i915); 15026 15027 return 0; 15028 } 15029 15030 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 15031 { 15032 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15033 /* 640x480@60Hz, ~25175 kHz */ 15034 struct dpll clock = { 15035 .m1 = 18, 15036 .m2 = 7, 15037 .p1 = 13, 15038 .p2 = 4, 15039 .n = 2, 15040 }; 15041 u32 dpll, fp; 15042 int i; 15043 15044 drm_WARN_ON(&dev_priv->drm, 15045 i9xx_calc_dpll_params(48000, &clock) != 25154); 15046 15047 drm_dbg_kms(&dev_priv->drm, 15048 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 15049 pipe_name(pipe), clock.vco, clock.dot); 15050 15051 fp = i9xx_dpll_compute_fp(&clock); 15052 dpll = DPLL_DVO_2X_MODE | 15053 DPLL_VGA_MODE_DIS | 15054 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 15055 PLL_P2_DIVIDE_BY_4 | 15056 PLL_REF_INPUT_DREFCLK | 15057 DPLL_VCO_ENABLE; 15058 15059 intel_de_write(dev_priv, FP0(pipe), fp); 15060 intel_de_write(dev_priv, FP1(pipe), fp); 15061 15062 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 15063 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 15064 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 15065 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 15066 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 15067 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 15068 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 15069 15070 /* 15071 * Apparently we need to have VGA mode enabled prior to changing 15072 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 15073 * dividers, even though the register value does change. 15074 */ 15075 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 15076 intel_de_write(dev_priv, DPLL(pipe), dpll); 15077 15078 /* Wait for the clocks to stabilize. */ 15079 intel_de_posting_read(dev_priv, DPLL(pipe)); 15080 udelay(150); 15081 15082 /* The pixel multiplier can only be updated once the 15083 * DPLL is enabled and the clocks are stable. 15084 * 15085 * So write it again. 15086 */ 15087 intel_de_write(dev_priv, DPLL(pipe), dpll); 15088 15089 /* We do this three times for luck */ 15090 for (i = 0; i < 3 ; i++) { 15091 intel_de_write(dev_priv, DPLL(pipe), dpll); 15092 intel_de_posting_read(dev_priv, DPLL(pipe)); 15093 udelay(150); /* wait for warmup */ 15094 } 15095 15096 intel_de_write(dev_priv, PIPECONF(pipe), 15097 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 15098 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 15099 15100 intel_wait_for_pipe_scanline_moving(crtc); 15101 } 15102 15103 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 15104 { 15105 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15106 15107 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", 15108 pipe_name(pipe)); 15109 15110 drm_WARN_ON(&dev_priv->drm, 15111 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & 15112 DISPLAY_PLANE_ENABLE); 15113 drm_WARN_ON(&dev_priv->drm, 15114 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & 15115 DISPLAY_PLANE_ENABLE); 15116 drm_WARN_ON(&dev_priv->drm, 15117 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & 15118 DISPLAY_PLANE_ENABLE); 15119 drm_WARN_ON(&dev_priv->drm, 15120 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE); 15121 drm_WARN_ON(&dev_priv->drm, 15122 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE); 15123 15124 intel_de_write(dev_priv, PIPECONF(pipe), 0); 15125 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 15126 15127 intel_wait_for_pipe_scanline_stopped(crtc); 15128 15129 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 15130 intel_de_posting_read(dev_priv, DPLL(pipe)); 15131 } 15132 15133 static void 15134 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 15135 { 15136 struct intel_crtc *crtc; 15137 15138 if (INTEL_GEN(dev_priv) >= 4) 15139 return; 15140 15141 for_each_intel_crtc(&dev_priv->drm, crtc) { 15142 struct intel_plane *plane = 15143 to_intel_plane(crtc->base.primary); 15144 struct intel_crtc *plane_crtc; 15145 enum pipe pipe; 15146 15147 if (!plane->get_hw_state(plane, &pipe)) 15148 continue; 15149 15150 if (pipe == crtc->pipe) 15151 continue; 15152 15153 drm_dbg_kms(&dev_priv->drm, 15154 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 15155 plane->base.base.id, plane->base.name); 15156 15157 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15158 intel_plane_disable_noatomic(plane_crtc, plane); 15159 } 15160 } 15161 15162 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 15163 { 15164 struct drm_device *dev = crtc->base.dev; 15165 struct intel_encoder *encoder; 15166 15167 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15168 return true; 15169 15170 return false; 15171 } 15172 15173 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 15174 { 15175 struct drm_device *dev = encoder->base.dev; 15176 struct intel_connector *connector; 15177 15178 for_each_connector_on_encoder(dev, &encoder->base, connector) 15179 return connector; 15180 15181 return NULL; 15182 } 15183 15184 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 15185 enum pipe pch_transcoder) 15186 { 15187 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 15188 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 15189 } 15190 15191 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state) 15192 { 15193 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 15194 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15195 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 15196 15197 if (INTEL_GEN(dev_priv) >= 9 || 15198 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 15199 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 15200 u32 val; 15201 15202 if (transcoder_is_dsi(cpu_transcoder)) 15203 return; 15204 15205 val = intel_de_read(dev_priv, reg); 15206 val &= ~HSW_FRAME_START_DELAY_MASK; 15207 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 15208 intel_de_write(dev_priv, reg, val); 15209 } else { 15210 i915_reg_t reg = PIPECONF(cpu_transcoder); 15211 u32 val; 15212 15213 val = intel_de_read(dev_priv, reg); 15214 val &= ~PIPECONF_FRAME_START_DELAY_MASK; 15215 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 15216 intel_de_write(dev_priv, reg, val); 15217 } 15218 15219 if (!crtc_state->has_pch_encoder) 15220 return; 15221 15222 if (HAS_PCH_IBX(dev_priv)) { 15223 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe); 15224 u32 val; 15225 15226 val = intel_de_read(dev_priv, reg); 15227 val &= ~TRANS_FRAME_START_DELAY_MASK; 15228 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 15229 intel_de_write(dev_priv, reg, val); 15230 } else { 15231 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc); 15232 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder); 15233 u32 val; 15234 15235 val = intel_de_read(dev_priv, reg); 15236 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 15237 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 15238 intel_de_write(dev_priv, reg, val); 15239 } 15240 } 15241 15242 static void intel_sanitize_crtc(struct intel_crtc *crtc, 15243 struct drm_modeset_acquire_ctx *ctx) 15244 { 15245 struct drm_device *dev = crtc->base.dev; 15246 struct drm_i915_private *dev_priv = to_i915(dev); 15247 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 15248 15249 if (crtc_state->hw.active) { 15250 struct intel_plane *plane; 15251 15252 /* Clear any frame start delays used for debugging left by the BIOS */ 15253 intel_sanitize_frame_start_delay(crtc_state); 15254 15255 /* Disable everything but the primary plane */ 15256 for_each_intel_plane_on_crtc(dev, crtc, plane) { 15257 const struct intel_plane_state *plane_state = 15258 to_intel_plane_state(plane->base.state); 15259 15260 if (plane_state->uapi.visible && 15261 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 15262 intel_plane_disable_noatomic(crtc, plane); 15263 } 15264 15265 /* 15266 * Disable any background color set by the BIOS, but enable the 15267 * gamma and CSC to match how we program our planes. 15268 */ 15269 if (INTEL_GEN(dev_priv) >= 9) 15270 intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe), 15271 SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE); 15272 } 15273 15274 /* Adjust the state of the output pipe according to whether we 15275 * have active connectors/encoders. */ 15276 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) && 15277 !crtc_state->bigjoiner_slave) 15278 intel_crtc_disable_noatomic(crtc, ctx); 15279 15280 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) { 15281 /* 15282 * We start out with underrun reporting disabled to avoid races. 15283 * For correct bookkeeping mark this on active crtcs. 15284 * 15285 * Also on gmch platforms we dont have any hardware bits to 15286 * disable the underrun reporting. Which means we need to start 15287 * out with underrun reporting disabled also on inactive pipes, 15288 * since otherwise we'll complain about the garbage we read when 15289 * e.g. coming up after runtime pm. 15290 * 15291 * No protection against concurrent access is required - at 15292 * worst a fifo underrun happens which also sets this to false. 15293 */ 15294 crtc->cpu_fifo_underrun_disabled = true; 15295 /* 15296 * We track the PCH trancoder underrun reporting state 15297 * within the crtc. With crtc for pipe A housing the underrun 15298 * reporting state for PCH transcoder A, crtc for pipe B housing 15299 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 15300 * and marking underrun reporting as disabled for the non-existing 15301 * PCH transcoders B and C would prevent enabling the south 15302 * error interrupt (see cpt_can_enable_serr_int()). 15303 */ 15304 if (has_pch_trancoder(dev_priv, crtc->pipe)) 15305 crtc->pch_fifo_underrun_disabled = true; 15306 } 15307 } 15308 15309 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 15310 { 15311 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 15312 15313 /* 15314 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 15315 * the hardware when a high res displays plugged in. DPLL P 15316 * divider is zero, and the pipe timings are bonkers. We'll 15317 * try to disable everything in that case. 15318 * 15319 * FIXME would be nice to be able to sanitize this state 15320 * without several WARNs, but for now let's take the easy 15321 * road. 15322 */ 15323 return IS_GEN(dev_priv, 6) && 15324 crtc_state->hw.active && 15325 crtc_state->shared_dpll && 15326 crtc_state->port_clock == 0; 15327 } 15328 15329 static void intel_sanitize_encoder(struct intel_encoder *encoder) 15330 { 15331 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 15332 struct intel_connector *connector; 15333 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 15334 struct intel_crtc_state *crtc_state = crtc ? 15335 to_intel_crtc_state(crtc->base.state) : NULL; 15336 15337 /* We need to check both for a crtc link (meaning that the 15338 * encoder is active and trying to read from a pipe) and the 15339 * pipe itself being active. */ 15340 bool has_active_crtc = crtc_state && 15341 crtc_state->hw.active; 15342 15343 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 15344 drm_dbg_kms(&dev_priv->drm, 15345 "BIOS has misprogrammed the hardware. Disabling pipe %c\n", 15346 pipe_name(crtc->pipe)); 15347 has_active_crtc = false; 15348 } 15349 15350 connector = intel_encoder_find_connector(encoder); 15351 if (connector && !has_active_crtc) { 15352 drm_dbg_kms(&dev_priv->drm, 15353 "[ENCODER:%d:%s] has active connectors but no active pipe!\n", 15354 encoder->base.base.id, 15355 encoder->base.name); 15356 15357 /* Connector is active, but has no active pipe. This is 15358 * fallout from our resume register restoring. Disable 15359 * the encoder manually again. */ 15360 if (crtc_state) { 15361 struct drm_encoder *best_encoder; 15362 15363 drm_dbg_kms(&dev_priv->drm, 15364 "[ENCODER:%d:%s] manually disabled\n", 15365 encoder->base.base.id, 15366 encoder->base.name); 15367 15368 /* avoid oopsing in case the hooks consult best_encoder */ 15369 best_encoder = connector->base.state->best_encoder; 15370 connector->base.state->best_encoder = &encoder->base; 15371 15372 /* FIXME NULL atomic state passed! */ 15373 if (encoder->disable) 15374 encoder->disable(NULL, encoder, crtc_state, 15375 connector->base.state); 15376 if (encoder->post_disable) 15377 encoder->post_disable(NULL, encoder, crtc_state, 15378 connector->base.state); 15379 15380 connector->base.state->best_encoder = best_encoder; 15381 } 15382 encoder->base.crtc = NULL; 15383 15384 /* Inconsistent output/port/pipe state happens presumably due to 15385 * a bug in one of the get_hw_state functions. Or someplace else 15386 * in our code, like the register restore mess on resume. Clamp 15387 * things to off as a safer default. */ 15388 15389 connector->base.dpms = DRM_MODE_DPMS_OFF; 15390 connector->base.encoder = NULL; 15391 } 15392 15393 /* notify opregion of the sanitized encoder state */ 15394 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 15395 15396 if (INTEL_GEN(dev_priv) >= 11) 15397 icl_sanitize_encoder_pll_mapping(encoder); 15398 } 15399 15400 /* FIXME read out full plane state for all planes */ 15401 static void readout_plane_state(struct drm_i915_private *dev_priv) 15402 { 15403 struct intel_plane *plane; 15404 struct intel_crtc *crtc; 15405 15406 for_each_intel_plane(&dev_priv->drm, plane) { 15407 struct intel_plane_state *plane_state = 15408 to_intel_plane_state(plane->base.state); 15409 struct intel_crtc_state *crtc_state; 15410 enum pipe pipe = PIPE_A; 15411 bool visible; 15412 15413 visible = plane->get_hw_state(plane, &pipe); 15414 15415 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15416 crtc_state = to_intel_crtc_state(crtc->base.state); 15417 15418 intel_set_plane_visible(crtc_state, plane_state, visible); 15419 15420 drm_dbg_kms(&dev_priv->drm, 15421 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 15422 plane->base.base.id, plane->base.name, 15423 enableddisabled(visible), pipe_name(pipe)); 15424 } 15425 15426 for_each_intel_crtc(&dev_priv->drm, crtc) { 15427 struct intel_crtc_state *crtc_state = 15428 to_intel_crtc_state(crtc->base.state); 15429 15430 fixup_plane_bitmasks(crtc_state); 15431 } 15432 } 15433 15434 static void intel_modeset_readout_hw_state(struct drm_device *dev) 15435 { 15436 struct drm_i915_private *dev_priv = to_i915(dev); 15437 struct intel_cdclk_state *cdclk_state = 15438 to_intel_cdclk_state(dev_priv->cdclk.obj.state); 15439 struct intel_dbuf_state *dbuf_state = 15440 to_intel_dbuf_state(dev_priv->dbuf.obj.state); 15441 enum pipe pipe; 15442 struct intel_crtc *crtc; 15443 struct intel_encoder *encoder; 15444 struct intel_connector *connector; 15445 struct drm_connector_list_iter conn_iter; 15446 u8 active_pipes = 0; 15447 15448 for_each_intel_crtc(dev, crtc) { 15449 struct intel_crtc_state *crtc_state = 15450 to_intel_crtc_state(crtc->base.state); 15451 15452 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); 15453 intel_crtc_free_hw_state(crtc_state); 15454 intel_crtc_state_reset(crtc_state, crtc); 15455 15456 intel_crtc_get_pipe_config(crtc_state); 15457 15458 crtc_state->hw.enable = crtc_state->hw.active; 15459 15460 crtc->base.enabled = crtc_state->hw.enable; 15461 crtc->active = crtc_state->hw.active; 15462 15463 if (crtc_state->hw.active) 15464 active_pipes |= BIT(crtc->pipe); 15465 15466 drm_dbg_kms(&dev_priv->drm, 15467 "[CRTC:%d:%s] hw state readout: %s\n", 15468 crtc->base.base.id, crtc->base.name, 15469 enableddisabled(crtc_state->hw.active)); 15470 } 15471 15472 dev_priv->active_pipes = cdclk_state->active_pipes = 15473 dbuf_state->active_pipes = active_pipes; 15474 15475 readout_plane_state(dev_priv); 15476 15477 intel_dpll_readout_hw_state(dev_priv); 15478 15479 for_each_intel_encoder(dev, encoder) { 15480 pipe = 0; 15481 15482 if (encoder->get_hw_state(encoder, &pipe)) { 15483 struct intel_crtc_state *crtc_state; 15484 15485 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15486 crtc_state = to_intel_crtc_state(crtc->base.state); 15487 15488 encoder->base.crtc = &crtc->base; 15489 intel_encoder_get_config(encoder, crtc_state); 15490 if (encoder->sync_state) 15491 encoder->sync_state(encoder, crtc_state); 15492 15493 /* read out to slave crtc as well for bigjoiner */ 15494 if (crtc_state->bigjoiner) { 15495 /* encoder should read be linked to bigjoiner master */ 15496 WARN_ON(crtc_state->bigjoiner_slave); 15497 15498 crtc = crtc_state->bigjoiner_linked_crtc; 15499 crtc_state = to_intel_crtc_state(crtc->base.state); 15500 intel_encoder_get_config(encoder, crtc_state); 15501 } 15502 } else { 15503 encoder->base.crtc = NULL; 15504 } 15505 15506 drm_dbg_kms(&dev_priv->drm, 15507 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 15508 encoder->base.base.id, encoder->base.name, 15509 enableddisabled(encoder->base.crtc), 15510 pipe_name(pipe)); 15511 } 15512 15513 drm_connector_list_iter_begin(dev, &conn_iter); 15514 for_each_intel_connector_iter(connector, &conn_iter) { 15515 if (connector->get_hw_state(connector)) { 15516 struct intel_crtc_state *crtc_state; 15517 struct intel_crtc *crtc; 15518 15519 connector->base.dpms = DRM_MODE_DPMS_ON; 15520 15521 encoder = intel_attached_encoder(connector); 15522 connector->base.encoder = &encoder->base; 15523 15524 crtc = to_intel_crtc(encoder->base.crtc); 15525 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; 15526 15527 if (crtc_state && crtc_state->hw.active) { 15528 /* 15529 * This has to be done during hardware readout 15530 * because anything calling .crtc_disable may 15531 * rely on the connector_mask being accurate. 15532 */ 15533 crtc_state->uapi.connector_mask |= 15534 drm_connector_mask(&connector->base); 15535 crtc_state->uapi.encoder_mask |= 15536 drm_encoder_mask(&encoder->base); 15537 } 15538 } else { 15539 connector->base.dpms = DRM_MODE_DPMS_OFF; 15540 connector->base.encoder = NULL; 15541 } 15542 drm_dbg_kms(&dev_priv->drm, 15543 "[CONNECTOR:%d:%s] hw state readout: %s\n", 15544 connector->base.base.id, connector->base.name, 15545 enableddisabled(connector->base.encoder)); 15546 } 15547 drm_connector_list_iter_end(&conn_iter); 15548 15549 for_each_intel_crtc(dev, crtc) { 15550 struct intel_bw_state *bw_state = 15551 to_intel_bw_state(dev_priv->bw_obj.state); 15552 struct intel_crtc_state *crtc_state = 15553 to_intel_crtc_state(crtc->base.state); 15554 struct intel_plane *plane; 15555 int min_cdclk = 0; 15556 15557 if (crtc_state->bigjoiner_slave) 15558 continue; 15559 15560 if (crtc_state->hw.active) { 15561 /* 15562 * The initial mode needs to be set in order to keep 15563 * the atomic core happy. It wants a valid mode if the 15564 * crtc's enabled, so we do the above call. 15565 * 15566 * But we don't set all the derived state fully, hence 15567 * set a flag to indicate that a full recalculation is 15568 * needed on the next commit. 15569 */ 15570 crtc_state->inherited = true; 15571 15572 intel_crtc_update_active_timings(crtc_state); 15573 15574 intel_crtc_copy_hw_to_uapi_state(crtc_state); 15575 } 15576 15577 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 15578 const struct intel_plane_state *plane_state = 15579 to_intel_plane_state(plane->base.state); 15580 15581 /* 15582 * FIXME don't have the fb yet, so can't 15583 * use intel_plane_data_rate() :( 15584 */ 15585 if (plane_state->uapi.visible) 15586 crtc_state->data_rate[plane->id] = 15587 4 * crtc_state->pixel_rate; 15588 /* 15589 * FIXME don't have the fb yet, so can't 15590 * use plane->min_cdclk() :( 15591 */ 15592 if (plane_state->uapi.visible && plane->min_cdclk) { 15593 if (crtc_state->double_wide || 15594 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 15595 crtc_state->min_cdclk[plane->id] = 15596 DIV_ROUND_UP(crtc_state->pixel_rate, 2); 15597 else 15598 crtc_state->min_cdclk[plane->id] = 15599 crtc_state->pixel_rate; 15600 } 15601 drm_dbg_kms(&dev_priv->drm, 15602 "[PLANE:%d:%s] min_cdclk %d kHz\n", 15603 plane->base.base.id, plane->base.name, 15604 crtc_state->min_cdclk[plane->id]); 15605 } 15606 15607 if (crtc_state->hw.active) { 15608 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 15609 if (drm_WARN_ON(dev, min_cdclk < 0)) 15610 min_cdclk = 0; 15611 } 15612 15613 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; 15614 cdclk_state->min_voltage_level[crtc->pipe] = 15615 crtc_state->min_voltage_level; 15616 15617 intel_bw_crtc_update(bw_state, crtc_state); 15618 15619 intel_pipe_config_sanity_check(dev_priv, crtc_state); 15620 15621 /* discard our incomplete slave state, copy it from master */ 15622 if (crtc_state->bigjoiner && crtc_state->hw.active) { 15623 struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc; 15624 struct intel_crtc_state *slave_crtc_state = 15625 to_intel_crtc_state(slave->base.state); 15626 15627 copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state); 15628 slave->base.mode = crtc->base.mode; 15629 15630 cdclk_state->min_cdclk[slave->pipe] = min_cdclk; 15631 cdclk_state->min_voltage_level[slave->pipe] = 15632 crtc_state->min_voltage_level; 15633 15634 for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) { 15635 const struct intel_plane_state *plane_state = 15636 to_intel_plane_state(plane->base.state); 15637 15638 /* 15639 * FIXME don't have the fb yet, so can't 15640 * use intel_plane_data_rate() :( 15641 */ 15642 if (plane_state->uapi.visible) 15643 crtc_state->data_rate[plane->id] = 15644 4 * crtc_state->pixel_rate; 15645 else 15646 crtc_state->data_rate[plane->id] = 0; 15647 } 15648 15649 intel_bw_crtc_update(bw_state, slave_crtc_state); 15650 drm_calc_timestamping_constants(&slave->base, 15651 &slave_crtc_state->hw.adjusted_mode); 15652 } 15653 } 15654 } 15655 15656 static void 15657 get_encoder_power_domains(struct drm_i915_private *dev_priv) 15658 { 15659 struct intel_encoder *encoder; 15660 15661 for_each_intel_encoder(&dev_priv->drm, encoder) { 15662 struct intel_crtc_state *crtc_state; 15663 15664 if (!encoder->get_power_domains) 15665 continue; 15666 15667 /* 15668 * MST-primary and inactive encoders don't have a crtc state 15669 * and neither of these require any power domain references. 15670 */ 15671 if (!encoder->base.crtc) 15672 continue; 15673 15674 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 15675 encoder->get_power_domains(encoder, crtc_state); 15676 } 15677 } 15678 15679 static void intel_early_display_was(struct drm_i915_private *dev_priv) 15680 { 15681 /* 15682 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl 15683 * Also known as Wa_14010480278. 15684 */ 15685 if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv)) 15686 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0, 15687 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); 15688 15689 if (IS_HASWELL(dev_priv)) { 15690 /* 15691 * WaRsPkgCStateDisplayPMReq:hsw 15692 * System hang if this isn't done before disabling all planes! 15693 */ 15694 intel_de_write(dev_priv, CHICKEN_PAR1_1, 15695 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 15696 } 15697 15698 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) { 15699 /* Display WA #1142:kbl,cfl,cml */ 15700 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, 15701 KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22); 15702 intel_de_rmw(dev_priv, CHICKEN_MISC_2, 15703 KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14, 15704 KBL_ARB_FILL_SPARE_14); 15705 } 15706 } 15707 15708 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 15709 enum port port, i915_reg_t hdmi_reg) 15710 { 15711 u32 val = intel_de_read(dev_priv, hdmi_reg); 15712 15713 if (val & SDVO_ENABLE || 15714 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 15715 return; 15716 15717 drm_dbg_kms(&dev_priv->drm, 15718 "Sanitizing transcoder select for HDMI %c\n", 15719 port_name(port)); 15720 15721 val &= ~SDVO_PIPE_SEL_MASK; 15722 val |= SDVO_PIPE_SEL(PIPE_A); 15723 15724 intel_de_write(dev_priv, hdmi_reg, val); 15725 } 15726 15727 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 15728 enum port port, i915_reg_t dp_reg) 15729 { 15730 u32 val = intel_de_read(dev_priv, dp_reg); 15731 15732 if (val & DP_PORT_EN || 15733 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 15734 return; 15735 15736 drm_dbg_kms(&dev_priv->drm, 15737 "Sanitizing transcoder select for DP %c\n", 15738 port_name(port)); 15739 15740 val &= ~DP_PIPE_SEL_MASK; 15741 val |= DP_PIPE_SEL(PIPE_A); 15742 15743 intel_de_write(dev_priv, dp_reg, val); 15744 } 15745 15746 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 15747 { 15748 /* 15749 * The BIOS may select transcoder B on some of the PCH 15750 * ports even it doesn't enable the port. This would trip 15751 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 15752 * Sanitize the transcoder select bits to prevent that. We 15753 * assume that the BIOS never actually enabled the port, 15754 * because if it did we'd actually have to toggle the port 15755 * on and back off to make the transcoder A select stick 15756 * (see. intel_dp_link_down(), intel_disable_hdmi(), 15757 * intel_disable_sdvo()). 15758 */ 15759 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 15760 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 15761 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 15762 15763 /* PCH SDVOB multiplex with HDMIB */ 15764 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 15765 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 15766 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 15767 } 15768 15769 /* Scan out the current hw modeset state, 15770 * and sanitizes it to the current state 15771 */ 15772 static void 15773 intel_modeset_setup_hw_state(struct drm_device *dev, 15774 struct drm_modeset_acquire_ctx *ctx) 15775 { 15776 struct drm_i915_private *dev_priv = to_i915(dev); 15777 struct intel_encoder *encoder; 15778 struct intel_crtc *crtc; 15779 intel_wakeref_t wakeref; 15780 15781 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 15782 15783 intel_early_display_was(dev_priv); 15784 intel_modeset_readout_hw_state(dev); 15785 15786 /* HW state is read out, now we need to sanitize this mess. */ 15787 15788 /* Sanitize the TypeC port mode upfront, encoders depend on this */ 15789 for_each_intel_encoder(dev, encoder) { 15790 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 15791 15792 /* We need to sanitize only the MST primary port. */ 15793 if (encoder->type != INTEL_OUTPUT_DP_MST && 15794 intel_phy_is_tc(dev_priv, phy)) 15795 intel_tc_port_sanitize(enc_to_dig_port(encoder)); 15796 } 15797 15798 get_encoder_power_domains(dev_priv); 15799 15800 if (HAS_PCH_IBX(dev_priv)) 15801 ibx_sanitize_pch_ports(dev_priv); 15802 15803 /* 15804 * intel_sanitize_plane_mapping() may need to do vblank 15805 * waits, so we need vblank interrupts restored beforehand. 15806 */ 15807 for_each_intel_crtc(&dev_priv->drm, crtc) { 15808 struct intel_crtc_state *crtc_state = 15809 to_intel_crtc_state(crtc->base.state); 15810 15811 drm_crtc_vblank_reset(&crtc->base); 15812 15813 if (crtc_state->hw.active) 15814 intel_crtc_vblank_on(crtc_state); 15815 } 15816 15817 intel_sanitize_plane_mapping(dev_priv); 15818 15819 for_each_intel_encoder(dev, encoder) 15820 intel_sanitize_encoder(encoder); 15821 15822 for_each_intel_crtc(&dev_priv->drm, crtc) { 15823 struct intel_crtc_state *crtc_state = 15824 to_intel_crtc_state(crtc->base.state); 15825 15826 intel_sanitize_crtc(crtc, ctx); 15827 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 15828 } 15829 15830 intel_modeset_update_connector_atomic_state(dev); 15831 15832 intel_dpll_sanitize_state(dev_priv); 15833 15834 if (IS_G4X(dev_priv)) { 15835 g4x_wm_get_hw_state(dev_priv); 15836 g4x_wm_sanitize(dev_priv); 15837 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15838 vlv_wm_get_hw_state(dev_priv); 15839 vlv_wm_sanitize(dev_priv); 15840 } else if (INTEL_GEN(dev_priv) >= 9) { 15841 skl_wm_get_hw_state(dev_priv); 15842 } else if (HAS_PCH_SPLIT(dev_priv)) { 15843 ilk_wm_get_hw_state(dev_priv); 15844 } 15845 15846 for_each_intel_crtc(dev, crtc) { 15847 struct intel_crtc_state *crtc_state = 15848 to_intel_crtc_state(crtc->base.state); 15849 u64 put_domains; 15850 15851 put_domains = modeset_get_crtc_power_domains(crtc_state); 15852 if (drm_WARN_ON(dev, put_domains)) 15853 modeset_put_crtc_power_domains(crtc, put_domains); 15854 } 15855 15856 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 15857 } 15858 15859 void intel_display_resume(struct drm_device *dev) 15860 { 15861 struct drm_i915_private *dev_priv = to_i915(dev); 15862 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 15863 struct drm_modeset_acquire_ctx ctx; 15864 int ret; 15865 15866 dev_priv->modeset_restore_state = NULL; 15867 if (state) 15868 state->acquire_ctx = &ctx; 15869 15870 drm_modeset_acquire_init(&ctx, 0); 15871 15872 while (1) { 15873 ret = drm_modeset_lock_all_ctx(dev, &ctx); 15874 if (ret != -EDEADLK) 15875 break; 15876 15877 drm_modeset_backoff(&ctx); 15878 } 15879 15880 if (!ret) 15881 ret = __intel_display_resume(dev, state, &ctx); 15882 15883 intel_enable_ipc(dev_priv); 15884 drm_modeset_drop_locks(&ctx); 15885 drm_modeset_acquire_fini(&ctx); 15886 15887 if (ret) 15888 drm_err(&dev_priv->drm, 15889 "Restoring old state failed with %i\n", ret); 15890 if (state) 15891 drm_atomic_state_put(state); 15892 } 15893 15894 static void intel_hpd_poll_fini(struct drm_i915_private *i915) 15895 { 15896 struct intel_connector *connector; 15897 struct drm_connector_list_iter conn_iter; 15898 15899 /* Kill all the work that may have been queued by hpd. */ 15900 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 15901 for_each_intel_connector_iter(connector, &conn_iter) { 15902 if (connector->modeset_retry_work.func) 15903 cancel_work_sync(&connector->modeset_retry_work); 15904 if (connector->hdcp.shim) { 15905 cancel_delayed_work_sync(&connector->hdcp.check_work); 15906 cancel_work_sync(&connector->hdcp.prop_work); 15907 } 15908 } 15909 drm_connector_list_iter_end(&conn_iter); 15910 } 15911 15912 /* part #1: call before irq uninstall */ 15913 void intel_modeset_driver_remove(struct drm_i915_private *i915) 15914 { 15915 flush_workqueue(i915->flip_wq); 15916 flush_workqueue(i915->modeset_wq); 15917 15918 flush_work(&i915->atomic_helper.free_work); 15919 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list)); 15920 } 15921 15922 /* part #2: call after irq uninstall */ 15923 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) 15924 { 15925 /* 15926 * Due to the hpd irq storm handling the hotplug work can re-arm the 15927 * poll handlers. Hence disable polling after hpd handling is shut down. 15928 */ 15929 intel_hpd_poll_fini(i915); 15930 15931 /* 15932 * MST topology needs to be suspended so we don't have any calls to 15933 * fbdev after it's finalized. MST will be destroyed later as part of 15934 * drm_mode_config_cleanup() 15935 */ 15936 intel_dp_mst_suspend(i915); 15937 15938 /* poll work can call into fbdev, hence clean that up afterwards */ 15939 intel_fbdev_fini(i915); 15940 15941 intel_unregister_dsm_handler(); 15942 15943 intel_fbc_global_disable(i915); 15944 15945 /* flush any delayed tasks or pending work */ 15946 flush_scheduled_work(); 15947 15948 intel_hdcp_component_fini(i915); 15949 15950 intel_mode_config_cleanup(i915); 15951 15952 intel_overlay_cleanup(i915); 15953 15954 intel_gmbus_teardown(i915); 15955 15956 destroy_workqueue(i915->flip_wq); 15957 destroy_workqueue(i915->modeset_wq); 15958 15959 intel_fbc_cleanup_cfb(i915); 15960 } 15961 15962 /* part #3: call after gem init */ 15963 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) 15964 { 15965 intel_csr_ucode_fini(i915); 15966 15967 intel_power_domains_driver_remove(i915); 15968 15969 intel_vga_unregister(i915); 15970 15971 intel_bios_driver_remove(i915); 15972 } 15973 15974 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 15975 15976 struct intel_display_error_state { 15977 15978 u32 power_well_driver; 15979 15980 struct intel_cursor_error_state { 15981 u32 control; 15982 u32 position; 15983 u32 base; 15984 u32 size; 15985 } cursor[I915_MAX_PIPES]; 15986 15987 struct intel_pipe_error_state { 15988 bool power_domain_on; 15989 u32 source; 15990 u32 stat; 15991 } pipe[I915_MAX_PIPES]; 15992 15993 struct intel_plane_error_state { 15994 u32 control; 15995 u32 stride; 15996 u32 size; 15997 u32 pos; 15998 u32 addr; 15999 u32 surface; 16000 u32 tile_offset; 16001 } plane[I915_MAX_PIPES]; 16002 16003 struct intel_transcoder_error_state { 16004 bool available; 16005 bool power_domain_on; 16006 enum transcoder cpu_transcoder; 16007 16008 u32 conf; 16009 16010 u32 htotal; 16011 u32 hblank; 16012 u32 hsync; 16013 u32 vtotal; 16014 u32 vblank; 16015 u32 vsync; 16016 } transcoder[5]; 16017 }; 16018 16019 struct intel_display_error_state * 16020 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 16021 { 16022 struct intel_display_error_state *error; 16023 int transcoders[] = { 16024 TRANSCODER_A, 16025 TRANSCODER_B, 16026 TRANSCODER_C, 16027 TRANSCODER_D, 16028 TRANSCODER_EDP, 16029 }; 16030 int i; 16031 16032 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); 16033 16034 if (!HAS_DISPLAY(dev_priv)) 16035 return NULL; 16036 16037 error = kzalloc(sizeof(*error), GFP_ATOMIC); 16038 if (error == NULL) 16039 return NULL; 16040 16041 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 16042 error->power_well_driver = intel_de_read(dev_priv, 16043 HSW_PWR_WELL_CTL2); 16044 16045 for_each_pipe(dev_priv, i) { 16046 error->pipe[i].power_domain_on = 16047 __intel_display_power_is_enabled(dev_priv, 16048 POWER_DOMAIN_PIPE(i)); 16049 if (!error->pipe[i].power_domain_on) 16050 continue; 16051 16052 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i)); 16053 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i)); 16054 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i)); 16055 16056 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i)); 16057 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i)); 16058 if (INTEL_GEN(dev_priv) <= 3) { 16059 error->plane[i].size = intel_de_read(dev_priv, 16060 DSPSIZE(i)); 16061 error->plane[i].pos = intel_de_read(dev_priv, 16062 DSPPOS(i)); 16063 } 16064 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 16065 error->plane[i].addr = intel_de_read(dev_priv, 16066 DSPADDR(i)); 16067 if (INTEL_GEN(dev_priv) >= 4) { 16068 error->plane[i].surface = intel_de_read(dev_priv, 16069 DSPSURF(i)); 16070 error->plane[i].tile_offset = intel_de_read(dev_priv, 16071 DSPTILEOFF(i)); 16072 } 16073 16074 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i)); 16075 16076 if (HAS_GMCH(dev_priv)) 16077 error->pipe[i].stat = intel_de_read(dev_priv, 16078 PIPESTAT(i)); 16079 } 16080 16081 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 16082 enum transcoder cpu_transcoder = transcoders[i]; 16083 16084 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder)) 16085 continue; 16086 16087 error->transcoder[i].available = true; 16088 error->transcoder[i].power_domain_on = 16089 __intel_display_power_is_enabled(dev_priv, 16090 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 16091 if (!error->transcoder[i].power_domain_on) 16092 continue; 16093 16094 error->transcoder[i].cpu_transcoder = cpu_transcoder; 16095 16096 error->transcoder[i].conf = intel_de_read(dev_priv, 16097 PIPECONF(cpu_transcoder)); 16098 error->transcoder[i].htotal = intel_de_read(dev_priv, 16099 HTOTAL(cpu_transcoder)); 16100 error->transcoder[i].hblank = intel_de_read(dev_priv, 16101 HBLANK(cpu_transcoder)); 16102 error->transcoder[i].hsync = intel_de_read(dev_priv, 16103 HSYNC(cpu_transcoder)); 16104 error->transcoder[i].vtotal = intel_de_read(dev_priv, 16105 VTOTAL(cpu_transcoder)); 16106 error->transcoder[i].vblank = intel_de_read(dev_priv, 16107 VBLANK(cpu_transcoder)); 16108 error->transcoder[i].vsync = intel_de_read(dev_priv, 16109 VSYNC(cpu_transcoder)); 16110 } 16111 16112 return error; 16113 } 16114 16115 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 16116 16117 void 16118 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 16119 struct intel_display_error_state *error) 16120 { 16121 struct drm_i915_private *dev_priv = m->i915; 16122 int i; 16123 16124 if (!error) 16125 return; 16126 16127 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv)); 16128 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 16129 err_printf(m, "PWR_WELL_CTL2: %08x\n", 16130 error->power_well_driver); 16131 for_each_pipe(dev_priv, i) { 16132 err_printf(m, "Pipe [%d]:\n", i); 16133 err_printf(m, " Power: %s\n", 16134 onoff(error->pipe[i].power_domain_on)); 16135 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 16136 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 16137 16138 err_printf(m, "Plane [%d]:\n", i); 16139 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 16140 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 16141 if (INTEL_GEN(dev_priv) <= 3) { 16142 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 16143 err_printf(m, " POS: %08x\n", error->plane[i].pos); 16144 } 16145 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 16146 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 16147 if (INTEL_GEN(dev_priv) >= 4) { 16148 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 16149 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 16150 } 16151 16152 err_printf(m, "Cursor [%d]:\n", i); 16153 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 16154 err_printf(m, " POS: %08x\n", error->cursor[i].position); 16155 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 16156 } 16157 16158 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 16159 if (!error->transcoder[i].available) 16160 continue; 16161 16162 err_printf(m, "CPU transcoder: %s\n", 16163 transcoder_name(error->transcoder[i].cpu_transcoder)); 16164 err_printf(m, " Power: %s\n", 16165 onoff(error->transcoder[i].power_domain_on)); 16166 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 16167 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 16168 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 16169 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 16170 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 16171 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 16172 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 16173 } 16174 } 16175 16176 #endif 16177