1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/i2c.h> 28 #include <linux/input.h> 29 #include <linux/intel-iommu.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/dma-resv.h> 33 #include <linux/slab.h> 34 35 #include <drm/drm_atomic.h> 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_atomic_uapi.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_fourcc.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_probe_helper.h> 43 #include <drm/drm_rect.h> 44 #include <drm/i915_drm.h> 45 46 #include "display/intel_crt.h" 47 #include "display/intel_ddi.h" 48 #include "display/intel_dp.h" 49 #include "display/intel_dsi.h" 50 #include "display/intel_dvo.h" 51 #include "display/intel_gmbus.h" 52 #include "display/intel_hdmi.h" 53 #include "display/intel_lvds.h" 54 #include "display/intel_sdvo.h" 55 #include "display/intel_tv.h" 56 #include "display/intel_vdsc.h" 57 58 #include "gt/intel_rps.h" 59 60 #include "i915_drv.h" 61 #include "i915_trace.h" 62 #include "intel_acpi.h" 63 #include "intel_atomic.h" 64 #include "intel_atomic_plane.h" 65 #include "intel_bw.h" 66 #include "intel_cdclk.h" 67 #include "intel_color.h" 68 #include "intel_display_types.h" 69 #include "intel_dp_link_training.h" 70 #include "intel_fbc.h" 71 #include "intel_fbdev.h" 72 #include "intel_fifo_underrun.h" 73 #include "intel_frontbuffer.h" 74 #include "intel_hdcp.h" 75 #include "intel_hotplug.h" 76 #include "intel_overlay.h" 77 #include "intel_pipe_crc.h" 78 #include "intel_pm.h" 79 #include "intel_psr.h" 80 #include "intel_quirks.h" 81 #include "intel_sideband.h" 82 #include "intel_sprite.h" 83 #include "intel_tc.h" 84 #include "intel_vga.h" 85 86 /* Primary plane formats for gen <= 3 */ 87 static const u32 i8xx_primary_formats[] = { 88 DRM_FORMAT_C8, 89 DRM_FORMAT_RGB565, 90 DRM_FORMAT_XRGB1555, 91 DRM_FORMAT_XRGB8888, 92 }; 93 94 /* Primary plane formats for ivb (no fp16 due to hw issue) */ 95 static const u32 ivb_primary_formats[] = { 96 DRM_FORMAT_C8, 97 DRM_FORMAT_RGB565, 98 DRM_FORMAT_XRGB8888, 99 DRM_FORMAT_XBGR8888, 100 DRM_FORMAT_XRGB2101010, 101 DRM_FORMAT_XBGR2101010, 102 }; 103 104 /* Primary plane formats for gen >= 4, except ivb */ 105 static const u32 i965_primary_formats[] = { 106 DRM_FORMAT_C8, 107 DRM_FORMAT_RGB565, 108 DRM_FORMAT_XRGB8888, 109 DRM_FORMAT_XBGR8888, 110 DRM_FORMAT_XRGB2101010, 111 DRM_FORMAT_XBGR2101010, 112 DRM_FORMAT_XBGR16161616F, 113 }; 114 115 static const u64 i9xx_format_modifiers[] = { 116 I915_FORMAT_MOD_X_TILED, 117 DRM_FORMAT_MOD_LINEAR, 118 DRM_FORMAT_MOD_INVALID 119 }; 120 121 /* Cursor formats */ 122 static const u32 intel_cursor_formats[] = { 123 DRM_FORMAT_ARGB8888, 124 }; 125 126 static const u64 cursor_format_modifiers[] = { 127 DRM_FORMAT_MOD_LINEAR, 128 DRM_FORMAT_MOD_INVALID 129 }; 130 131 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 132 struct intel_crtc_state *pipe_config); 133 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 134 struct intel_crtc_state *pipe_config); 135 136 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 137 struct drm_i915_gem_object *obj, 138 struct drm_mode_fb_cmd2 *mode_cmd); 139 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); 140 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 141 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 142 const struct intel_link_m_n *m_n, 143 const struct intel_link_m_n *m2_n2); 144 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 145 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state); 146 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state); 147 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 148 static void vlv_prepare_pll(struct intel_crtc *crtc, 149 const struct intel_crtc_state *pipe_config); 150 static void chv_prepare_pll(struct intel_crtc *crtc, 151 const struct intel_crtc_state *pipe_config); 152 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 153 struct intel_crtc_state *crtc_state); 154 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); 155 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state); 156 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state); 157 static void intel_modeset_setup_hw_state(struct drm_device *dev, 158 struct drm_modeset_acquire_ctx *ctx); 159 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 160 161 struct intel_limit { 162 struct { 163 int min, max; 164 } dot, vco, n, m, m1, m2, p, p1; 165 166 struct { 167 int dot_limit; 168 int p2_slow, p2_fast; 169 } p2; 170 }; 171 172 /* returns HPLL frequency in kHz */ 173 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 174 { 175 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 176 177 /* Obtain SKU information */ 178 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 179 CCK_FUSE_HPLL_FREQ_MASK; 180 181 return vco_freq[hpll_freq] * 1000; 182 } 183 184 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 185 const char *name, u32 reg, int ref_freq) 186 { 187 u32 val; 188 int divider; 189 190 val = vlv_cck_read(dev_priv, reg); 191 divider = val & CCK_FREQUENCY_VALUES; 192 193 WARN((val & CCK_FREQUENCY_STATUS) != 194 (divider << CCK_FREQUENCY_STATUS_SHIFT), 195 "%s change in progress\n", name); 196 197 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 198 } 199 200 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 201 const char *name, u32 reg) 202 { 203 int hpll; 204 205 vlv_cck_get(dev_priv); 206 207 if (dev_priv->hpll_freq == 0) 208 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 209 210 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 211 212 vlv_cck_put(dev_priv); 213 214 return hpll; 215 } 216 217 static void intel_update_czclk(struct drm_i915_private *dev_priv) 218 { 219 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 220 return; 221 222 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 223 CCK_CZ_CLOCK_CONTROL); 224 225 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 226 } 227 228 static inline u32 /* units of 100MHz */ 229 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 230 const struct intel_crtc_state *pipe_config) 231 { 232 if (HAS_DDI(dev_priv)) 233 return pipe_config->port_clock; /* SPLL */ 234 else 235 return dev_priv->fdi_pll_freq; 236 } 237 238 static const struct intel_limit intel_limits_i8xx_dac = { 239 .dot = { .min = 25000, .max = 350000 }, 240 .vco = { .min = 908000, .max = 1512000 }, 241 .n = { .min = 2, .max = 16 }, 242 .m = { .min = 96, .max = 140 }, 243 .m1 = { .min = 18, .max = 26 }, 244 .m2 = { .min = 6, .max = 16 }, 245 .p = { .min = 4, .max = 128 }, 246 .p1 = { .min = 2, .max = 33 }, 247 .p2 = { .dot_limit = 165000, 248 .p2_slow = 4, .p2_fast = 2 }, 249 }; 250 251 static const struct intel_limit intel_limits_i8xx_dvo = { 252 .dot = { .min = 25000, .max = 350000 }, 253 .vco = { .min = 908000, .max = 1512000 }, 254 .n = { .min = 2, .max = 16 }, 255 .m = { .min = 96, .max = 140 }, 256 .m1 = { .min = 18, .max = 26 }, 257 .m2 = { .min = 6, .max = 16 }, 258 .p = { .min = 4, .max = 128 }, 259 .p1 = { .min = 2, .max = 33 }, 260 .p2 = { .dot_limit = 165000, 261 .p2_slow = 4, .p2_fast = 4 }, 262 }; 263 264 static const struct intel_limit intel_limits_i8xx_lvds = { 265 .dot = { .min = 25000, .max = 350000 }, 266 .vco = { .min = 908000, .max = 1512000 }, 267 .n = { .min = 2, .max = 16 }, 268 .m = { .min = 96, .max = 140 }, 269 .m1 = { .min = 18, .max = 26 }, 270 .m2 = { .min = 6, .max = 16 }, 271 .p = { .min = 4, .max = 128 }, 272 .p1 = { .min = 1, .max = 6 }, 273 .p2 = { .dot_limit = 165000, 274 .p2_slow = 14, .p2_fast = 7 }, 275 }; 276 277 static const struct intel_limit intel_limits_i9xx_sdvo = { 278 .dot = { .min = 20000, .max = 400000 }, 279 .vco = { .min = 1400000, .max = 2800000 }, 280 .n = { .min = 1, .max = 6 }, 281 .m = { .min = 70, .max = 120 }, 282 .m1 = { .min = 8, .max = 18 }, 283 .m2 = { .min = 3, .max = 7 }, 284 .p = { .min = 5, .max = 80 }, 285 .p1 = { .min = 1, .max = 8 }, 286 .p2 = { .dot_limit = 200000, 287 .p2_slow = 10, .p2_fast = 5 }, 288 }; 289 290 static const struct intel_limit intel_limits_i9xx_lvds = { 291 .dot = { .min = 20000, .max = 400000 }, 292 .vco = { .min = 1400000, .max = 2800000 }, 293 .n = { .min = 1, .max = 6 }, 294 .m = { .min = 70, .max = 120 }, 295 .m1 = { .min = 8, .max = 18 }, 296 .m2 = { .min = 3, .max = 7 }, 297 .p = { .min = 7, .max = 98 }, 298 .p1 = { .min = 1, .max = 8 }, 299 .p2 = { .dot_limit = 112000, 300 .p2_slow = 14, .p2_fast = 7 }, 301 }; 302 303 304 static const struct intel_limit intel_limits_g4x_sdvo = { 305 .dot = { .min = 25000, .max = 270000 }, 306 .vco = { .min = 1750000, .max = 3500000}, 307 .n = { .min = 1, .max = 4 }, 308 .m = { .min = 104, .max = 138 }, 309 .m1 = { .min = 17, .max = 23 }, 310 .m2 = { .min = 5, .max = 11 }, 311 .p = { .min = 10, .max = 30 }, 312 .p1 = { .min = 1, .max = 3}, 313 .p2 = { .dot_limit = 270000, 314 .p2_slow = 10, 315 .p2_fast = 10 316 }, 317 }; 318 319 static const struct intel_limit intel_limits_g4x_hdmi = { 320 .dot = { .min = 22000, .max = 400000 }, 321 .vco = { .min = 1750000, .max = 3500000}, 322 .n = { .min = 1, .max = 4 }, 323 .m = { .min = 104, .max = 138 }, 324 .m1 = { .min = 16, .max = 23 }, 325 .m2 = { .min = 5, .max = 11 }, 326 .p = { .min = 5, .max = 80 }, 327 .p1 = { .min = 1, .max = 8}, 328 .p2 = { .dot_limit = 165000, 329 .p2_slow = 10, .p2_fast = 5 }, 330 }; 331 332 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 333 .dot = { .min = 20000, .max = 115000 }, 334 .vco = { .min = 1750000, .max = 3500000 }, 335 .n = { .min = 1, .max = 3 }, 336 .m = { .min = 104, .max = 138 }, 337 .m1 = { .min = 17, .max = 23 }, 338 .m2 = { .min = 5, .max = 11 }, 339 .p = { .min = 28, .max = 112 }, 340 .p1 = { .min = 2, .max = 8 }, 341 .p2 = { .dot_limit = 0, 342 .p2_slow = 14, .p2_fast = 14 343 }, 344 }; 345 346 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 347 .dot = { .min = 80000, .max = 224000 }, 348 .vco = { .min = 1750000, .max = 3500000 }, 349 .n = { .min = 1, .max = 3 }, 350 .m = { .min = 104, .max = 138 }, 351 .m1 = { .min = 17, .max = 23 }, 352 .m2 = { .min = 5, .max = 11 }, 353 .p = { .min = 14, .max = 42 }, 354 .p1 = { .min = 2, .max = 6 }, 355 .p2 = { .dot_limit = 0, 356 .p2_slow = 7, .p2_fast = 7 357 }, 358 }; 359 360 static const struct intel_limit intel_limits_pineview_sdvo = { 361 .dot = { .min = 20000, .max = 400000}, 362 .vco = { .min = 1700000, .max = 3500000 }, 363 /* Pineview's Ncounter is a ring counter */ 364 .n = { .min = 3, .max = 6 }, 365 .m = { .min = 2, .max = 256 }, 366 /* Pineview only has one combined m divider, which we treat as m2. */ 367 .m1 = { .min = 0, .max = 0 }, 368 .m2 = { .min = 0, .max = 254 }, 369 .p = { .min = 5, .max = 80 }, 370 .p1 = { .min = 1, .max = 8 }, 371 .p2 = { .dot_limit = 200000, 372 .p2_slow = 10, .p2_fast = 5 }, 373 }; 374 375 static const struct intel_limit intel_limits_pineview_lvds = { 376 .dot = { .min = 20000, .max = 400000 }, 377 .vco = { .min = 1700000, .max = 3500000 }, 378 .n = { .min = 3, .max = 6 }, 379 .m = { .min = 2, .max = 256 }, 380 .m1 = { .min = 0, .max = 0 }, 381 .m2 = { .min = 0, .max = 254 }, 382 .p = { .min = 7, .max = 112 }, 383 .p1 = { .min = 1, .max = 8 }, 384 .p2 = { .dot_limit = 112000, 385 .p2_slow = 14, .p2_fast = 14 }, 386 }; 387 388 /* Ironlake / Sandybridge 389 * 390 * We calculate clock using (register_value + 2) for N/M1/M2, so here 391 * the range value for them is (actual_value - 2). 392 */ 393 static const struct intel_limit intel_limits_ironlake_dac = { 394 .dot = { .min = 25000, .max = 350000 }, 395 .vco = { .min = 1760000, .max = 3510000 }, 396 .n = { .min = 1, .max = 5 }, 397 .m = { .min = 79, .max = 127 }, 398 .m1 = { .min = 12, .max = 22 }, 399 .m2 = { .min = 5, .max = 9 }, 400 .p = { .min = 5, .max = 80 }, 401 .p1 = { .min = 1, .max = 8 }, 402 .p2 = { .dot_limit = 225000, 403 .p2_slow = 10, .p2_fast = 5 }, 404 }; 405 406 static const struct intel_limit intel_limits_ironlake_single_lvds = { 407 .dot = { .min = 25000, .max = 350000 }, 408 .vco = { .min = 1760000, .max = 3510000 }, 409 .n = { .min = 1, .max = 3 }, 410 .m = { .min = 79, .max = 118 }, 411 .m1 = { .min = 12, .max = 22 }, 412 .m2 = { .min = 5, .max = 9 }, 413 .p = { .min = 28, .max = 112 }, 414 .p1 = { .min = 2, .max = 8 }, 415 .p2 = { .dot_limit = 225000, 416 .p2_slow = 14, .p2_fast = 14 }, 417 }; 418 419 static const struct intel_limit intel_limits_ironlake_dual_lvds = { 420 .dot = { .min = 25000, .max = 350000 }, 421 .vco = { .min = 1760000, .max = 3510000 }, 422 .n = { .min = 1, .max = 3 }, 423 .m = { .min = 79, .max = 127 }, 424 .m1 = { .min = 12, .max = 22 }, 425 .m2 = { .min = 5, .max = 9 }, 426 .p = { .min = 14, .max = 56 }, 427 .p1 = { .min = 2, .max = 8 }, 428 .p2 = { .dot_limit = 225000, 429 .p2_slow = 7, .p2_fast = 7 }, 430 }; 431 432 /* LVDS 100mhz refclk limits. */ 433 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 434 .dot = { .min = 25000, .max = 350000 }, 435 .vco = { .min = 1760000, .max = 3510000 }, 436 .n = { .min = 1, .max = 2 }, 437 .m = { .min = 79, .max = 126 }, 438 .m1 = { .min = 12, .max = 22 }, 439 .m2 = { .min = 5, .max = 9 }, 440 .p = { .min = 28, .max = 112 }, 441 .p1 = { .min = 2, .max = 8 }, 442 .p2 = { .dot_limit = 225000, 443 .p2_slow = 14, .p2_fast = 14 }, 444 }; 445 446 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 447 .dot = { .min = 25000, .max = 350000 }, 448 .vco = { .min = 1760000, .max = 3510000 }, 449 .n = { .min = 1, .max = 3 }, 450 .m = { .min = 79, .max = 126 }, 451 .m1 = { .min = 12, .max = 22 }, 452 .m2 = { .min = 5, .max = 9 }, 453 .p = { .min = 14, .max = 42 }, 454 .p1 = { .min = 2, .max = 6 }, 455 .p2 = { .dot_limit = 225000, 456 .p2_slow = 7, .p2_fast = 7 }, 457 }; 458 459 static const struct intel_limit intel_limits_vlv = { 460 /* 461 * These are the data rate limits (measured in fast clocks) 462 * since those are the strictest limits we have. The fast 463 * clock and actual rate limits are more relaxed, so checking 464 * them would make no difference. 465 */ 466 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 467 .vco = { .min = 4000000, .max = 6000000 }, 468 .n = { .min = 1, .max = 7 }, 469 .m1 = { .min = 2, .max = 3 }, 470 .m2 = { .min = 11, .max = 156 }, 471 .p1 = { .min = 2, .max = 3 }, 472 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 473 }; 474 475 static const struct intel_limit intel_limits_chv = { 476 /* 477 * These are the data rate limits (measured in fast clocks) 478 * since those are the strictest limits we have. The fast 479 * clock and actual rate limits are more relaxed, so checking 480 * them would make no difference. 481 */ 482 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 483 .vco = { .min = 4800000, .max = 6480000 }, 484 .n = { .min = 1, .max = 1 }, 485 .m1 = { .min = 2, .max = 2 }, 486 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 487 .p1 = { .min = 2, .max = 4 }, 488 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 489 }; 490 491 static const struct intel_limit intel_limits_bxt = { 492 /* FIXME: find real dot limits */ 493 .dot = { .min = 0, .max = INT_MAX }, 494 .vco = { .min = 4800000, .max = 6700000 }, 495 .n = { .min = 1, .max = 1 }, 496 .m1 = { .min = 2, .max = 2 }, 497 /* FIXME: find real m2 limits */ 498 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 499 .p1 = { .min = 2, .max = 4 }, 500 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 501 }; 502 503 /* WA Display #0827: Gen9:all */ 504 static void 505 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 506 { 507 if (enable) 508 I915_WRITE(CLKGATE_DIS_PSL(pipe), 509 I915_READ(CLKGATE_DIS_PSL(pipe)) | 510 DUPS1_GATING_DIS | DUPS2_GATING_DIS); 511 else 512 I915_WRITE(CLKGATE_DIS_PSL(pipe), 513 I915_READ(CLKGATE_DIS_PSL(pipe)) & 514 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 515 } 516 517 /* Wa_2006604312:icl */ 518 static void 519 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 520 bool enable) 521 { 522 if (enable) 523 I915_WRITE(CLKGATE_DIS_PSL(pipe), 524 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 525 else 526 I915_WRITE(CLKGATE_DIS_PSL(pipe), 527 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 528 } 529 530 static bool 531 needs_modeset(const struct intel_crtc_state *state) 532 { 533 return drm_atomic_crtc_needs_modeset(&state->base); 534 } 535 536 bool 537 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 538 { 539 return (crtc_state->master_transcoder != INVALID_TRANSCODER || 540 crtc_state->sync_mode_slaves_mask); 541 } 542 543 static bool 544 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 545 { 546 return (crtc_state->master_transcoder == INVALID_TRANSCODER && 547 crtc_state->sync_mode_slaves_mask); 548 } 549 550 /* 551 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 552 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 553 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 554 * The helpers' return value is the rate of the clock that is fed to the 555 * display engine's pipe which can be the above fast dot clock rate or a 556 * divided-down version of it. 557 */ 558 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 559 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 560 { 561 clock->m = clock->m2 + 2; 562 clock->p = clock->p1 * clock->p2; 563 if (WARN_ON(clock->n == 0 || clock->p == 0)) 564 return 0; 565 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 566 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 567 568 return clock->dot; 569 } 570 571 static u32 i9xx_dpll_compute_m(struct dpll *dpll) 572 { 573 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 574 } 575 576 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 577 { 578 clock->m = i9xx_dpll_compute_m(clock); 579 clock->p = clock->p1 * clock->p2; 580 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 581 return 0; 582 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 583 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 584 585 return clock->dot; 586 } 587 588 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 589 { 590 clock->m = clock->m1 * clock->m2; 591 clock->p = clock->p1 * clock->p2; 592 if (WARN_ON(clock->n == 0 || clock->p == 0)) 593 return 0; 594 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 595 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 596 597 return clock->dot / 5; 598 } 599 600 int chv_calc_dpll_params(int refclk, struct dpll *clock) 601 { 602 clock->m = clock->m1 * clock->m2; 603 clock->p = clock->p1 * clock->p2; 604 if (WARN_ON(clock->n == 0 || clock->p == 0)) 605 return 0; 606 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), 607 clock->n << 22); 608 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 609 610 return clock->dot / 5; 611 } 612 613 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 614 615 /* 616 * Returns whether the given set of divisors are valid for a given refclk with 617 * the given connectors. 618 */ 619 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 620 const struct intel_limit *limit, 621 const struct dpll *clock) 622 { 623 if (clock->n < limit->n.min || limit->n.max < clock->n) 624 INTELPllInvalid("n out of range\n"); 625 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 626 INTELPllInvalid("p1 out of range\n"); 627 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 628 INTELPllInvalid("m2 out of range\n"); 629 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 630 INTELPllInvalid("m1 out of range\n"); 631 632 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 633 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 634 if (clock->m1 <= clock->m2) 635 INTELPllInvalid("m1 <= m2\n"); 636 637 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 638 !IS_GEN9_LP(dev_priv)) { 639 if (clock->p < limit->p.min || limit->p.max < clock->p) 640 INTELPllInvalid("p out of range\n"); 641 if (clock->m < limit->m.min || limit->m.max < clock->m) 642 INTELPllInvalid("m out of range\n"); 643 } 644 645 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 646 INTELPllInvalid("vco out of range\n"); 647 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 648 * connector, etc., rather than just a single range. 649 */ 650 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 651 INTELPllInvalid("dot out of range\n"); 652 653 return true; 654 } 655 656 static int 657 i9xx_select_p2_div(const struct intel_limit *limit, 658 const struct intel_crtc_state *crtc_state, 659 int target) 660 { 661 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 662 663 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 664 /* 665 * For LVDS just rely on its current settings for dual-channel. 666 * We haven't figured out how to reliably set up different 667 * single/dual channel state, if we even can. 668 */ 669 if (intel_is_dual_link_lvds(dev_priv)) 670 return limit->p2.p2_fast; 671 else 672 return limit->p2.p2_slow; 673 } else { 674 if (target < limit->p2.dot_limit) 675 return limit->p2.p2_slow; 676 else 677 return limit->p2.p2_fast; 678 } 679 } 680 681 /* 682 * Returns a set of divisors for the desired target clock with the given 683 * refclk, or FALSE. The returned values represent the clock equation: 684 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 685 * 686 * Target and reference clocks are specified in kHz. 687 * 688 * If match_clock is provided, then best_clock P divider must match the P 689 * divider from @match_clock used for LVDS downclocking. 690 */ 691 static bool 692 i9xx_find_best_dpll(const struct intel_limit *limit, 693 struct intel_crtc_state *crtc_state, 694 int target, int refclk, struct dpll *match_clock, 695 struct dpll *best_clock) 696 { 697 struct drm_device *dev = crtc_state->base.crtc->dev; 698 struct dpll clock; 699 int err = target; 700 701 memset(best_clock, 0, sizeof(*best_clock)); 702 703 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 704 705 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 706 clock.m1++) { 707 for (clock.m2 = limit->m2.min; 708 clock.m2 <= limit->m2.max; clock.m2++) { 709 if (clock.m2 >= clock.m1) 710 break; 711 for (clock.n = limit->n.min; 712 clock.n <= limit->n.max; clock.n++) { 713 for (clock.p1 = limit->p1.min; 714 clock.p1 <= limit->p1.max; clock.p1++) { 715 int this_err; 716 717 i9xx_calc_dpll_params(refclk, &clock); 718 if (!intel_PLL_is_valid(to_i915(dev), 719 limit, 720 &clock)) 721 continue; 722 if (match_clock && 723 clock.p != match_clock->p) 724 continue; 725 726 this_err = abs(clock.dot - target); 727 if (this_err < err) { 728 *best_clock = clock; 729 err = this_err; 730 } 731 } 732 } 733 } 734 } 735 736 return (err != target); 737 } 738 739 /* 740 * Returns a set of divisors for the desired target clock with the given 741 * refclk, or FALSE. The returned values represent the clock equation: 742 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 743 * 744 * Target and reference clocks are specified in kHz. 745 * 746 * If match_clock is provided, then best_clock P divider must match the P 747 * divider from @match_clock used for LVDS downclocking. 748 */ 749 static bool 750 pnv_find_best_dpll(const struct intel_limit *limit, 751 struct intel_crtc_state *crtc_state, 752 int target, int refclk, struct dpll *match_clock, 753 struct dpll *best_clock) 754 { 755 struct drm_device *dev = crtc_state->base.crtc->dev; 756 struct dpll clock; 757 int err = target; 758 759 memset(best_clock, 0, sizeof(*best_clock)); 760 761 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 762 763 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 764 clock.m1++) { 765 for (clock.m2 = limit->m2.min; 766 clock.m2 <= limit->m2.max; clock.m2++) { 767 for (clock.n = limit->n.min; 768 clock.n <= limit->n.max; clock.n++) { 769 for (clock.p1 = limit->p1.min; 770 clock.p1 <= limit->p1.max; clock.p1++) { 771 int this_err; 772 773 pnv_calc_dpll_params(refclk, &clock); 774 if (!intel_PLL_is_valid(to_i915(dev), 775 limit, 776 &clock)) 777 continue; 778 if (match_clock && 779 clock.p != match_clock->p) 780 continue; 781 782 this_err = abs(clock.dot - target); 783 if (this_err < err) { 784 *best_clock = clock; 785 err = this_err; 786 } 787 } 788 } 789 } 790 } 791 792 return (err != target); 793 } 794 795 /* 796 * Returns a set of divisors for the desired target clock with the given 797 * refclk, or FALSE. The returned values represent the clock equation: 798 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 799 * 800 * Target and reference clocks are specified in kHz. 801 * 802 * If match_clock is provided, then best_clock P divider must match the P 803 * divider from @match_clock used for LVDS downclocking. 804 */ 805 static bool 806 g4x_find_best_dpll(const struct intel_limit *limit, 807 struct intel_crtc_state *crtc_state, 808 int target, int refclk, struct dpll *match_clock, 809 struct dpll *best_clock) 810 { 811 struct drm_device *dev = crtc_state->base.crtc->dev; 812 struct dpll clock; 813 int max_n; 814 bool found = false; 815 /* approximately equals target * 0.00585 */ 816 int err_most = (target >> 8) + (target >> 9); 817 818 memset(best_clock, 0, sizeof(*best_clock)); 819 820 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 821 822 max_n = limit->n.max; 823 /* based on hardware requirement, prefer smaller n to precision */ 824 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 825 /* based on hardware requirement, prefere larger m1,m2 */ 826 for (clock.m1 = limit->m1.max; 827 clock.m1 >= limit->m1.min; clock.m1--) { 828 for (clock.m2 = limit->m2.max; 829 clock.m2 >= limit->m2.min; clock.m2--) { 830 for (clock.p1 = limit->p1.max; 831 clock.p1 >= limit->p1.min; clock.p1--) { 832 int this_err; 833 834 i9xx_calc_dpll_params(refclk, &clock); 835 if (!intel_PLL_is_valid(to_i915(dev), 836 limit, 837 &clock)) 838 continue; 839 840 this_err = abs(clock.dot - target); 841 if (this_err < err_most) { 842 *best_clock = clock; 843 err_most = this_err; 844 max_n = clock.n; 845 found = true; 846 } 847 } 848 } 849 } 850 } 851 return found; 852 } 853 854 /* 855 * Check if the calculated PLL configuration is more optimal compared to the 856 * best configuration and error found so far. Return the calculated error. 857 */ 858 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 859 const struct dpll *calculated_clock, 860 const struct dpll *best_clock, 861 unsigned int best_error_ppm, 862 unsigned int *error_ppm) 863 { 864 /* 865 * For CHV ignore the error and consider only the P value. 866 * Prefer a bigger P value based on HW requirements. 867 */ 868 if (IS_CHERRYVIEW(to_i915(dev))) { 869 *error_ppm = 0; 870 871 return calculated_clock->p > best_clock->p; 872 } 873 874 if (WARN_ON_ONCE(!target_freq)) 875 return false; 876 877 *error_ppm = div_u64(1000000ULL * 878 abs(target_freq - calculated_clock->dot), 879 target_freq); 880 /* 881 * Prefer a better P value over a better (smaller) error if the error 882 * is small. Ensure this preference for future configurations too by 883 * setting the error to 0. 884 */ 885 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 886 *error_ppm = 0; 887 888 return true; 889 } 890 891 return *error_ppm + 10 < best_error_ppm; 892 } 893 894 /* 895 * Returns a set of divisors for the desired target clock with the given 896 * refclk, or FALSE. The returned values represent the clock equation: 897 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 898 */ 899 static bool 900 vlv_find_best_dpll(const struct intel_limit *limit, 901 struct intel_crtc_state *crtc_state, 902 int target, int refclk, struct dpll *match_clock, 903 struct dpll *best_clock) 904 { 905 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 906 struct drm_device *dev = crtc->base.dev; 907 struct dpll clock; 908 unsigned int bestppm = 1000000; 909 /* min update 19.2 MHz */ 910 int max_n = min(limit->n.max, refclk / 19200); 911 bool found = false; 912 913 target *= 5; /* fast clock */ 914 915 memset(best_clock, 0, sizeof(*best_clock)); 916 917 /* based on hardware requirement, prefer smaller n to precision */ 918 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 919 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 920 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 921 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 922 clock.p = clock.p1 * clock.p2; 923 /* based on hardware requirement, prefer bigger m1,m2 values */ 924 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 925 unsigned int ppm; 926 927 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 928 refclk * clock.m1); 929 930 vlv_calc_dpll_params(refclk, &clock); 931 932 if (!intel_PLL_is_valid(to_i915(dev), 933 limit, 934 &clock)) 935 continue; 936 937 if (!vlv_PLL_is_optimal(dev, target, 938 &clock, 939 best_clock, 940 bestppm, &ppm)) 941 continue; 942 943 *best_clock = clock; 944 bestppm = ppm; 945 found = true; 946 } 947 } 948 } 949 } 950 951 return found; 952 } 953 954 /* 955 * Returns a set of divisors for the desired target clock with the given 956 * refclk, or FALSE. The returned values represent the clock equation: 957 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 958 */ 959 static bool 960 chv_find_best_dpll(const struct intel_limit *limit, 961 struct intel_crtc_state *crtc_state, 962 int target, int refclk, struct dpll *match_clock, 963 struct dpll *best_clock) 964 { 965 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 966 struct drm_device *dev = crtc->base.dev; 967 unsigned int best_error_ppm; 968 struct dpll clock; 969 u64 m2; 970 int found = false; 971 972 memset(best_clock, 0, sizeof(*best_clock)); 973 best_error_ppm = 1000000; 974 975 /* 976 * Based on hardware doc, the n always set to 1, and m1 always 977 * set to 2. If requires to support 200Mhz refclk, we need to 978 * revisit this because n may not 1 anymore. 979 */ 980 clock.n = 1, clock.m1 = 2; 981 target *= 5; /* fast clock */ 982 983 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 984 for (clock.p2 = limit->p2.p2_fast; 985 clock.p2 >= limit->p2.p2_slow; 986 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 987 unsigned int error_ppm; 988 989 clock.p = clock.p1 * clock.p2; 990 991 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 992 refclk * clock.m1); 993 994 if (m2 > INT_MAX/clock.m1) 995 continue; 996 997 clock.m2 = m2; 998 999 chv_calc_dpll_params(refclk, &clock); 1000 1001 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) 1002 continue; 1003 1004 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 1005 best_error_ppm, &error_ppm)) 1006 continue; 1007 1008 *best_clock = clock; 1009 best_error_ppm = error_ppm; 1010 found = true; 1011 } 1012 } 1013 1014 return found; 1015 } 1016 1017 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 1018 struct dpll *best_clock) 1019 { 1020 int refclk = 100000; 1021 const struct intel_limit *limit = &intel_limits_bxt; 1022 1023 return chv_find_best_dpll(limit, crtc_state, 1024 crtc_state->port_clock, refclk, 1025 NULL, best_clock); 1026 } 1027 1028 bool intel_crtc_active(struct intel_crtc *crtc) 1029 { 1030 /* Be paranoid as we can arrive here with only partial 1031 * state retrieved from the hardware during setup. 1032 * 1033 * We can ditch the adjusted_mode.crtc_clock check as soon 1034 * as Haswell has gained clock readout/fastboot support. 1035 * 1036 * We can ditch the crtc->primary->state->fb check as soon as we can 1037 * properly reconstruct framebuffers. 1038 * 1039 * FIXME: The intel_crtc->active here should be switched to 1040 * crtc->state->active once we have proper CRTC states wired up 1041 * for atomic. 1042 */ 1043 return crtc->active && crtc->base.primary->state->fb && 1044 crtc->config->base.adjusted_mode.crtc_clock; 1045 } 1046 1047 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1048 enum pipe pipe) 1049 { 1050 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1051 1052 return crtc->config->cpu_transcoder; 1053 } 1054 1055 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 1056 enum pipe pipe) 1057 { 1058 i915_reg_t reg = PIPEDSL(pipe); 1059 u32 line1, line2; 1060 u32 line_mask; 1061 1062 if (IS_GEN(dev_priv, 2)) 1063 line_mask = DSL_LINEMASK_GEN2; 1064 else 1065 line_mask = DSL_LINEMASK_GEN3; 1066 1067 line1 = I915_READ(reg) & line_mask; 1068 msleep(5); 1069 line2 = I915_READ(reg) & line_mask; 1070 1071 return line1 != line2; 1072 } 1073 1074 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 1075 { 1076 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1077 enum pipe pipe = crtc->pipe; 1078 1079 /* Wait for the display line to settle/start moving */ 1080 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 1081 DRM_ERROR("pipe %c scanline %s wait timed out\n", 1082 pipe_name(pipe), onoff(state)); 1083 } 1084 1085 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 1086 { 1087 wait_for_pipe_scanline_moving(crtc, false); 1088 } 1089 1090 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 1091 { 1092 wait_for_pipe_scanline_moving(crtc, true); 1093 } 1094 1095 static void 1096 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 1097 { 1098 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 1099 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1100 1101 if (INTEL_GEN(dev_priv) >= 4) { 1102 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1103 i915_reg_t reg = PIPECONF(cpu_transcoder); 1104 1105 /* Wait for the Pipe State to go off */ 1106 if (intel_de_wait_for_clear(dev_priv, reg, 1107 I965_PIPECONF_ACTIVE, 100)) 1108 WARN(1, "pipe_off wait timed out\n"); 1109 } else { 1110 intel_wait_for_pipe_scanline_stopped(crtc); 1111 } 1112 } 1113 1114 /* Only for pre-ILK configs */ 1115 void assert_pll(struct drm_i915_private *dev_priv, 1116 enum pipe pipe, bool state) 1117 { 1118 u32 val; 1119 bool cur_state; 1120 1121 val = I915_READ(DPLL(pipe)); 1122 cur_state = !!(val & DPLL_VCO_ENABLE); 1123 I915_STATE_WARN(cur_state != state, 1124 "PLL state assertion failure (expected %s, current %s)\n", 1125 onoff(state), onoff(cur_state)); 1126 } 1127 1128 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1129 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1130 { 1131 u32 val; 1132 bool cur_state; 1133 1134 vlv_cck_get(dev_priv); 1135 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1136 vlv_cck_put(dev_priv); 1137 1138 cur_state = val & DSI_PLL_VCO_EN; 1139 I915_STATE_WARN(cur_state != state, 1140 "DSI PLL state assertion failure (expected %s, current %s)\n", 1141 onoff(state), onoff(cur_state)); 1142 } 1143 1144 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1145 enum pipe pipe, bool state) 1146 { 1147 bool cur_state; 1148 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1149 pipe); 1150 1151 if (HAS_DDI(dev_priv)) { 1152 /* DDI does not have a specific FDI_TX register */ 1153 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1154 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1155 } else { 1156 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1157 cur_state = !!(val & FDI_TX_ENABLE); 1158 } 1159 I915_STATE_WARN(cur_state != state, 1160 "FDI TX state assertion failure (expected %s, current %s)\n", 1161 onoff(state), onoff(cur_state)); 1162 } 1163 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1164 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1165 1166 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1167 enum pipe pipe, bool state) 1168 { 1169 u32 val; 1170 bool cur_state; 1171 1172 val = I915_READ(FDI_RX_CTL(pipe)); 1173 cur_state = !!(val & FDI_RX_ENABLE); 1174 I915_STATE_WARN(cur_state != state, 1175 "FDI RX state assertion failure (expected %s, current %s)\n", 1176 onoff(state), onoff(cur_state)); 1177 } 1178 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1179 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1180 1181 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1182 enum pipe pipe) 1183 { 1184 u32 val; 1185 1186 /* ILK FDI PLL is always enabled */ 1187 if (IS_GEN(dev_priv, 5)) 1188 return; 1189 1190 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1191 if (HAS_DDI(dev_priv)) 1192 return; 1193 1194 val = I915_READ(FDI_TX_CTL(pipe)); 1195 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1196 } 1197 1198 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1199 enum pipe pipe, bool state) 1200 { 1201 u32 val; 1202 bool cur_state; 1203 1204 val = I915_READ(FDI_RX_CTL(pipe)); 1205 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1206 I915_STATE_WARN(cur_state != state, 1207 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1208 onoff(state), onoff(cur_state)); 1209 } 1210 1211 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1212 { 1213 i915_reg_t pp_reg; 1214 u32 val; 1215 enum pipe panel_pipe = INVALID_PIPE; 1216 bool locked = true; 1217 1218 if (WARN_ON(HAS_DDI(dev_priv))) 1219 return; 1220 1221 if (HAS_PCH_SPLIT(dev_priv)) { 1222 u32 port_sel; 1223 1224 pp_reg = PP_CONTROL(0); 1225 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1226 1227 switch (port_sel) { 1228 case PANEL_PORT_SELECT_LVDS: 1229 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 1230 break; 1231 case PANEL_PORT_SELECT_DPA: 1232 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 1233 break; 1234 case PANEL_PORT_SELECT_DPC: 1235 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 1236 break; 1237 case PANEL_PORT_SELECT_DPD: 1238 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 1239 break; 1240 default: 1241 MISSING_CASE(port_sel); 1242 break; 1243 } 1244 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1245 /* presumably write lock depends on pipe, not port select */ 1246 pp_reg = PP_CONTROL(pipe); 1247 panel_pipe = pipe; 1248 } else { 1249 u32 port_sel; 1250 1251 pp_reg = PP_CONTROL(0); 1252 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1253 1254 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS); 1255 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 1256 } 1257 1258 val = I915_READ(pp_reg); 1259 if (!(val & PANEL_POWER_ON) || 1260 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1261 locked = false; 1262 1263 I915_STATE_WARN(panel_pipe == pipe && locked, 1264 "panel assertion failure, pipe %c regs locked\n", 1265 pipe_name(pipe)); 1266 } 1267 1268 void assert_pipe(struct drm_i915_private *dev_priv, 1269 enum pipe pipe, bool state) 1270 { 1271 bool cur_state; 1272 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1273 pipe); 1274 enum intel_display_power_domain power_domain; 1275 intel_wakeref_t wakeref; 1276 1277 /* we keep both pipes enabled on 830 */ 1278 if (IS_I830(dev_priv)) 1279 state = true; 1280 1281 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1282 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 1283 if (wakeref) { 1284 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1285 cur_state = !!(val & PIPECONF_ENABLE); 1286 1287 intel_display_power_put(dev_priv, power_domain, wakeref); 1288 } else { 1289 cur_state = false; 1290 } 1291 1292 I915_STATE_WARN(cur_state != state, 1293 "pipe %c assertion failure (expected %s, current %s)\n", 1294 pipe_name(pipe), onoff(state), onoff(cur_state)); 1295 } 1296 1297 static void assert_plane(struct intel_plane *plane, bool state) 1298 { 1299 enum pipe pipe; 1300 bool cur_state; 1301 1302 cur_state = plane->get_hw_state(plane, &pipe); 1303 1304 I915_STATE_WARN(cur_state != state, 1305 "%s assertion failure (expected %s, current %s)\n", 1306 plane->base.name, onoff(state), onoff(cur_state)); 1307 } 1308 1309 #define assert_plane_enabled(p) assert_plane(p, true) 1310 #define assert_plane_disabled(p) assert_plane(p, false) 1311 1312 static void assert_planes_disabled(struct intel_crtc *crtc) 1313 { 1314 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1315 struct intel_plane *plane; 1316 1317 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 1318 assert_plane_disabled(plane); 1319 } 1320 1321 static void assert_vblank_disabled(struct drm_crtc *crtc) 1322 { 1323 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1324 drm_crtc_vblank_put(crtc); 1325 } 1326 1327 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1328 enum pipe pipe) 1329 { 1330 u32 val; 1331 bool enabled; 1332 1333 val = I915_READ(PCH_TRANSCONF(pipe)); 1334 enabled = !!(val & TRANS_ENABLE); 1335 I915_STATE_WARN(enabled, 1336 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1337 pipe_name(pipe)); 1338 } 1339 1340 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1341 enum pipe pipe, enum port port, 1342 i915_reg_t dp_reg) 1343 { 1344 enum pipe port_pipe; 1345 bool state; 1346 1347 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 1348 1349 I915_STATE_WARN(state && port_pipe == pipe, 1350 "PCH DP %c enabled on transcoder %c, should be disabled\n", 1351 port_name(port), pipe_name(pipe)); 1352 1353 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1354 "IBX PCH DP %c still using transcoder B\n", 1355 port_name(port)); 1356 } 1357 1358 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1359 enum pipe pipe, enum port port, 1360 i915_reg_t hdmi_reg) 1361 { 1362 enum pipe port_pipe; 1363 bool state; 1364 1365 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 1366 1367 I915_STATE_WARN(state && port_pipe == pipe, 1368 "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 1369 port_name(port), pipe_name(pipe)); 1370 1371 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1372 "IBX PCH HDMI %c still using transcoder B\n", 1373 port_name(port)); 1374 } 1375 1376 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1377 enum pipe pipe) 1378 { 1379 enum pipe port_pipe; 1380 1381 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 1382 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 1383 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 1384 1385 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 1386 port_pipe == pipe, 1387 "PCH VGA enabled on transcoder %c, should be disabled\n", 1388 pipe_name(pipe)); 1389 1390 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 1391 port_pipe == pipe, 1392 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1393 pipe_name(pipe)); 1394 1395 /* PCH SDVOB multiplex with HDMIB */ 1396 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1397 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1398 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1399 } 1400 1401 static void _vlv_enable_pll(struct intel_crtc *crtc, 1402 const struct intel_crtc_state *pipe_config) 1403 { 1404 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1405 enum pipe pipe = crtc->pipe; 1406 1407 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1408 POSTING_READ(DPLL(pipe)); 1409 udelay(150); 1410 1411 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1412 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1413 } 1414 1415 static void vlv_enable_pll(struct intel_crtc *crtc, 1416 const struct intel_crtc_state *pipe_config) 1417 { 1418 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1419 enum pipe pipe = crtc->pipe; 1420 1421 assert_pipe_disabled(dev_priv, pipe); 1422 1423 /* PLL is protected by panel, make sure we can write it */ 1424 assert_panel_unlocked(dev_priv, pipe); 1425 1426 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1427 _vlv_enable_pll(crtc, pipe_config); 1428 1429 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1430 POSTING_READ(DPLL_MD(pipe)); 1431 } 1432 1433 1434 static void _chv_enable_pll(struct intel_crtc *crtc, 1435 const struct intel_crtc_state *pipe_config) 1436 { 1437 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1438 enum pipe pipe = crtc->pipe; 1439 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1440 u32 tmp; 1441 1442 vlv_dpio_get(dev_priv); 1443 1444 /* Enable back the 10bit clock to display controller */ 1445 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1446 tmp |= DPIO_DCLKP_EN; 1447 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1448 1449 vlv_dpio_put(dev_priv); 1450 1451 /* 1452 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1453 */ 1454 udelay(1); 1455 1456 /* Enable PLL */ 1457 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1458 1459 /* Check PLL is locked */ 1460 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1461 DRM_ERROR("PLL %d failed to lock\n", pipe); 1462 } 1463 1464 static void chv_enable_pll(struct intel_crtc *crtc, 1465 const struct intel_crtc_state *pipe_config) 1466 { 1467 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1468 enum pipe pipe = crtc->pipe; 1469 1470 assert_pipe_disabled(dev_priv, pipe); 1471 1472 /* PLL is protected by panel, make sure we can write it */ 1473 assert_panel_unlocked(dev_priv, pipe); 1474 1475 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1476 _chv_enable_pll(crtc, pipe_config); 1477 1478 if (pipe != PIPE_A) { 1479 /* 1480 * WaPixelRepeatModeFixForC0:chv 1481 * 1482 * DPLLCMD is AWOL. Use chicken bits to propagate 1483 * the value from DPLLBMD to either pipe B or C. 1484 */ 1485 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 1486 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1487 I915_WRITE(CBR4_VLV, 0); 1488 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1489 1490 /* 1491 * DPLLB VGA mode also seems to cause problems. 1492 * We should always have it disabled. 1493 */ 1494 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1495 } else { 1496 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1497 POSTING_READ(DPLL_MD(pipe)); 1498 } 1499 } 1500 1501 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1502 { 1503 if (IS_I830(dev_priv)) 1504 return false; 1505 1506 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1507 } 1508 1509 static void i9xx_enable_pll(struct intel_crtc *crtc, 1510 const struct intel_crtc_state *crtc_state) 1511 { 1512 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1513 i915_reg_t reg = DPLL(crtc->pipe); 1514 u32 dpll = crtc_state->dpll_hw_state.dpll; 1515 int i; 1516 1517 assert_pipe_disabled(dev_priv, crtc->pipe); 1518 1519 /* PLL is protected by panel, make sure we can write it */ 1520 if (i9xx_has_pps(dev_priv)) 1521 assert_panel_unlocked(dev_priv, crtc->pipe); 1522 1523 /* 1524 * Apparently we need to have VGA mode enabled prior to changing 1525 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1526 * dividers, even though the register value does change. 1527 */ 1528 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS); 1529 I915_WRITE(reg, dpll); 1530 1531 /* Wait for the clocks to stabilize. */ 1532 POSTING_READ(reg); 1533 udelay(150); 1534 1535 if (INTEL_GEN(dev_priv) >= 4) { 1536 I915_WRITE(DPLL_MD(crtc->pipe), 1537 crtc_state->dpll_hw_state.dpll_md); 1538 } else { 1539 /* The pixel multiplier can only be updated once the 1540 * DPLL is enabled and the clocks are stable. 1541 * 1542 * So write it again. 1543 */ 1544 I915_WRITE(reg, dpll); 1545 } 1546 1547 /* We do this three times for luck */ 1548 for (i = 0; i < 3; i++) { 1549 I915_WRITE(reg, dpll); 1550 POSTING_READ(reg); 1551 udelay(150); /* wait for warmup */ 1552 } 1553 } 1554 1555 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 1556 { 1557 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1558 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1559 enum pipe pipe = crtc->pipe; 1560 1561 /* Don't disable pipe or pipe PLLs if needed */ 1562 if (IS_I830(dev_priv)) 1563 return; 1564 1565 /* Make sure the pipe isn't still relying on us */ 1566 assert_pipe_disabled(dev_priv, pipe); 1567 1568 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1569 POSTING_READ(DPLL(pipe)); 1570 } 1571 1572 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1573 { 1574 u32 val; 1575 1576 /* Make sure the pipe isn't still relying on us */ 1577 assert_pipe_disabled(dev_priv, pipe); 1578 1579 val = DPLL_INTEGRATED_REF_CLK_VLV | 1580 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1581 if (pipe != PIPE_A) 1582 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1583 1584 I915_WRITE(DPLL(pipe), val); 1585 POSTING_READ(DPLL(pipe)); 1586 } 1587 1588 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1589 { 1590 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1591 u32 val; 1592 1593 /* Make sure the pipe isn't still relying on us */ 1594 assert_pipe_disabled(dev_priv, pipe); 1595 1596 val = DPLL_SSC_REF_CLK_CHV | 1597 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1598 if (pipe != PIPE_A) 1599 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1600 1601 I915_WRITE(DPLL(pipe), val); 1602 POSTING_READ(DPLL(pipe)); 1603 1604 vlv_dpio_get(dev_priv); 1605 1606 /* Disable 10bit clock to display controller */ 1607 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1608 val &= ~DPIO_DCLKP_EN; 1609 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1610 1611 vlv_dpio_put(dev_priv); 1612 } 1613 1614 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1615 struct intel_digital_port *dport, 1616 unsigned int expected_mask) 1617 { 1618 u32 port_mask; 1619 i915_reg_t dpll_reg; 1620 1621 switch (dport->base.port) { 1622 case PORT_B: 1623 port_mask = DPLL_PORTB_READY_MASK; 1624 dpll_reg = DPLL(0); 1625 break; 1626 case PORT_C: 1627 port_mask = DPLL_PORTC_READY_MASK; 1628 dpll_reg = DPLL(0); 1629 expected_mask <<= 4; 1630 break; 1631 case PORT_D: 1632 port_mask = DPLL_PORTD_READY_MASK; 1633 dpll_reg = DPIO_PHY_STATUS; 1634 break; 1635 default: 1636 BUG(); 1637 } 1638 1639 if (intel_de_wait_for_register(dev_priv, dpll_reg, 1640 port_mask, expected_mask, 1000)) 1641 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 1642 dport->base.base.base.id, dport->base.base.name, 1643 I915_READ(dpll_reg) & port_mask, expected_mask); 1644 } 1645 1646 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 1647 { 1648 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1649 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1650 enum pipe pipe = crtc->pipe; 1651 i915_reg_t reg; 1652 u32 val, pipeconf_val; 1653 1654 /* Make sure PCH DPLL is enabled */ 1655 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 1656 1657 /* FDI must be feeding us bits for PCH ports */ 1658 assert_fdi_tx_enabled(dev_priv, pipe); 1659 assert_fdi_rx_enabled(dev_priv, pipe); 1660 1661 if (HAS_PCH_CPT(dev_priv)) { 1662 /* Workaround: Set the timing override bit before enabling the 1663 * pch transcoder. */ 1664 reg = TRANS_CHICKEN2(pipe); 1665 val = I915_READ(reg); 1666 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1667 I915_WRITE(reg, val); 1668 } 1669 1670 reg = PCH_TRANSCONF(pipe); 1671 val = I915_READ(reg); 1672 pipeconf_val = I915_READ(PIPECONF(pipe)); 1673 1674 if (HAS_PCH_IBX(dev_priv)) { 1675 /* 1676 * Make the BPC in transcoder be consistent with 1677 * that in pipeconf reg. For HDMI we must use 8bpc 1678 * here for both 8bpc and 12bpc. 1679 */ 1680 val &= ~PIPECONF_BPC_MASK; 1681 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1682 val |= PIPECONF_8BPC; 1683 else 1684 val |= pipeconf_val & PIPECONF_BPC_MASK; 1685 } 1686 1687 val &= ~TRANS_INTERLACE_MASK; 1688 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 1689 if (HAS_PCH_IBX(dev_priv) && 1690 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 1691 val |= TRANS_LEGACY_INTERLACED_ILK; 1692 else 1693 val |= TRANS_INTERLACED; 1694 } else { 1695 val |= TRANS_PROGRESSIVE; 1696 } 1697 1698 I915_WRITE(reg, val | TRANS_ENABLE); 1699 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 1700 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1701 } 1702 1703 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1704 enum transcoder cpu_transcoder) 1705 { 1706 u32 val, pipeconf_val; 1707 1708 /* FDI must be feeding us bits for PCH ports */ 1709 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1710 assert_fdi_rx_enabled(dev_priv, PIPE_A); 1711 1712 /* Workaround: set timing override bit. */ 1713 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1714 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1715 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1716 1717 val = TRANS_ENABLE; 1718 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1719 1720 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1721 PIPECONF_INTERLACED_ILK) 1722 val |= TRANS_INTERLACED; 1723 else 1724 val |= TRANS_PROGRESSIVE; 1725 1726 I915_WRITE(LPT_TRANSCONF, val); 1727 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 1728 TRANS_STATE_ENABLE, 100)) 1729 DRM_ERROR("Failed to enable PCH transcoder\n"); 1730 } 1731 1732 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1733 enum pipe pipe) 1734 { 1735 i915_reg_t reg; 1736 u32 val; 1737 1738 /* FDI relies on the transcoder */ 1739 assert_fdi_tx_disabled(dev_priv, pipe); 1740 assert_fdi_rx_disabled(dev_priv, pipe); 1741 1742 /* Ports must be off as well */ 1743 assert_pch_ports_disabled(dev_priv, pipe); 1744 1745 reg = PCH_TRANSCONF(pipe); 1746 val = I915_READ(reg); 1747 val &= ~TRANS_ENABLE; 1748 I915_WRITE(reg, val); 1749 /* wait for PCH transcoder off, transcoder state */ 1750 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 1751 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1752 1753 if (HAS_PCH_CPT(dev_priv)) { 1754 /* Workaround: Clear the timing override chicken bit again. */ 1755 reg = TRANS_CHICKEN2(pipe); 1756 val = I915_READ(reg); 1757 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1758 I915_WRITE(reg, val); 1759 } 1760 } 1761 1762 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1763 { 1764 u32 val; 1765 1766 val = I915_READ(LPT_TRANSCONF); 1767 val &= ~TRANS_ENABLE; 1768 I915_WRITE(LPT_TRANSCONF, val); 1769 /* wait for PCH transcoder off, transcoder state */ 1770 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 1771 TRANS_STATE_ENABLE, 50)) 1772 DRM_ERROR("Failed to disable PCH transcoder\n"); 1773 1774 /* Workaround: clear timing override bit. */ 1775 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1776 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1777 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1778 } 1779 1780 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1781 { 1782 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1783 1784 if (HAS_PCH_LPT(dev_priv)) 1785 return PIPE_A; 1786 else 1787 return crtc->pipe; 1788 } 1789 1790 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 1791 { 1792 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 1793 1794 /* 1795 * On i965gm the hardware frame counter reads 1796 * zero when the TV encoder is enabled :( 1797 */ 1798 if (IS_I965GM(dev_priv) && 1799 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 1800 return 0; 1801 1802 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1803 return 0xffffffff; /* full 32 bit counter */ 1804 else if (INTEL_GEN(dev_priv) >= 3) 1805 return 0xffffff; /* only 24 bits of frame count */ 1806 else 1807 return 0; /* Gen2 doesn't have a hardware frame counter */ 1808 } 1809 1810 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 1811 { 1812 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1813 1814 drm_crtc_set_max_vblank_count(&crtc->base, 1815 intel_crtc_max_vblank_count(crtc_state)); 1816 drm_crtc_vblank_on(&crtc->base); 1817 } 1818 1819 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) 1820 { 1821 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 1822 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1823 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1824 enum pipe pipe = crtc->pipe; 1825 i915_reg_t reg; 1826 u32 val; 1827 1828 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1829 1830 assert_planes_disabled(crtc); 1831 1832 /* 1833 * A pipe without a PLL won't actually be able to drive bits from 1834 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1835 * need the check. 1836 */ 1837 if (HAS_GMCH(dev_priv)) { 1838 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 1839 assert_dsi_pll_enabled(dev_priv); 1840 else 1841 assert_pll_enabled(dev_priv, pipe); 1842 } else { 1843 if (new_crtc_state->has_pch_encoder) { 1844 /* if driving the PCH, we need FDI enabled */ 1845 assert_fdi_rx_pll_enabled(dev_priv, 1846 intel_crtc_pch_transcoder(crtc)); 1847 assert_fdi_tx_pll_enabled(dev_priv, 1848 (enum pipe) cpu_transcoder); 1849 } 1850 /* FIXME: assert CPU port conditions for SNB+ */ 1851 } 1852 1853 trace_intel_pipe_enable(crtc); 1854 1855 reg = PIPECONF(cpu_transcoder); 1856 val = I915_READ(reg); 1857 if (val & PIPECONF_ENABLE) { 1858 /* we keep both pipes enabled on 830 */ 1859 WARN_ON(!IS_I830(dev_priv)); 1860 return; 1861 } 1862 1863 I915_WRITE(reg, val | PIPECONF_ENABLE); 1864 POSTING_READ(reg); 1865 1866 /* 1867 * Until the pipe starts PIPEDSL reads will return a stale value, 1868 * which causes an apparent vblank timestamp jump when PIPEDSL 1869 * resets to its proper value. That also messes up the frame count 1870 * when it's derived from the timestamps. So let's wait for the 1871 * pipe to start properly before we call drm_crtc_vblank_on() 1872 */ 1873 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 1874 intel_wait_for_pipe_scanline_moving(crtc); 1875 } 1876 1877 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) 1878 { 1879 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 1880 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1881 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1882 enum pipe pipe = crtc->pipe; 1883 i915_reg_t reg; 1884 u32 val; 1885 1886 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 1887 1888 /* 1889 * Make sure planes won't keep trying to pump pixels to us, 1890 * or we might hang the display. 1891 */ 1892 assert_planes_disabled(crtc); 1893 1894 trace_intel_pipe_disable(crtc); 1895 1896 reg = PIPECONF(cpu_transcoder); 1897 val = I915_READ(reg); 1898 if ((val & PIPECONF_ENABLE) == 0) 1899 return; 1900 1901 /* 1902 * Double wide has implications for planes 1903 * so best keep it disabled when not needed. 1904 */ 1905 if (old_crtc_state->double_wide) 1906 val &= ~PIPECONF_DOUBLE_WIDE; 1907 1908 /* Don't disable pipe or pipe PLLs if needed */ 1909 if (!IS_I830(dev_priv)) 1910 val &= ~PIPECONF_ENABLE; 1911 1912 I915_WRITE(reg, val); 1913 if ((val & PIPECONF_ENABLE) == 0) 1914 intel_wait_for_pipe_off(old_crtc_state); 1915 } 1916 1917 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1918 { 1919 return IS_GEN(dev_priv, 2) ? 2048 : 4096; 1920 } 1921 1922 static unsigned int 1923 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) 1924 { 1925 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1926 unsigned int cpp = fb->format->cpp[color_plane]; 1927 1928 switch (fb->modifier) { 1929 case DRM_FORMAT_MOD_LINEAR: 1930 return intel_tile_size(dev_priv); 1931 case I915_FORMAT_MOD_X_TILED: 1932 if (IS_GEN(dev_priv, 2)) 1933 return 128; 1934 else 1935 return 512; 1936 case I915_FORMAT_MOD_Y_TILED_CCS: 1937 if (color_plane == 1) 1938 return 128; 1939 /* fall through */ 1940 case I915_FORMAT_MOD_Y_TILED: 1941 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) 1942 return 128; 1943 else 1944 return 512; 1945 case I915_FORMAT_MOD_Yf_TILED_CCS: 1946 if (color_plane == 1) 1947 return 128; 1948 /* fall through */ 1949 case I915_FORMAT_MOD_Yf_TILED: 1950 switch (cpp) { 1951 case 1: 1952 return 64; 1953 case 2: 1954 case 4: 1955 return 128; 1956 case 8: 1957 case 16: 1958 return 256; 1959 default: 1960 MISSING_CASE(cpp); 1961 return cpp; 1962 } 1963 break; 1964 default: 1965 MISSING_CASE(fb->modifier); 1966 return cpp; 1967 } 1968 } 1969 1970 static unsigned int 1971 intel_tile_height(const struct drm_framebuffer *fb, int color_plane) 1972 { 1973 return intel_tile_size(to_i915(fb->dev)) / 1974 intel_tile_width_bytes(fb, color_plane); 1975 } 1976 1977 /* Return the tile dimensions in pixel units */ 1978 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, 1979 unsigned int *tile_width, 1980 unsigned int *tile_height) 1981 { 1982 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); 1983 unsigned int cpp = fb->format->cpp[color_plane]; 1984 1985 *tile_width = tile_width_bytes / cpp; 1986 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes; 1987 } 1988 1989 unsigned int 1990 intel_fb_align_height(const struct drm_framebuffer *fb, 1991 int color_plane, unsigned int height) 1992 { 1993 unsigned int tile_height = intel_tile_height(fb, color_plane); 1994 1995 return ALIGN(height, tile_height); 1996 } 1997 1998 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 1999 { 2000 unsigned int size = 0; 2001 int i; 2002 2003 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2004 size += rot_info->plane[i].width * rot_info->plane[i].height; 2005 2006 return size; 2007 } 2008 2009 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 2010 { 2011 unsigned int size = 0; 2012 int i; 2013 2014 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 2015 size += rem_info->plane[i].width * rem_info->plane[i].height; 2016 2017 return size; 2018 } 2019 2020 static void 2021 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2022 const struct drm_framebuffer *fb, 2023 unsigned int rotation) 2024 { 2025 view->type = I915_GGTT_VIEW_NORMAL; 2026 if (drm_rotation_90_or_270(rotation)) { 2027 view->type = I915_GGTT_VIEW_ROTATED; 2028 view->rotated = to_intel_framebuffer(fb)->rot_info; 2029 } 2030 } 2031 2032 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2033 { 2034 if (IS_I830(dev_priv)) 2035 return 16 * 1024; 2036 else if (IS_I85X(dev_priv)) 2037 return 256; 2038 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2039 return 32; 2040 else 2041 return 4 * 1024; 2042 } 2043 2044 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2045 { 2046 if (INTEL_GEN(dev_priv) >= 9) 2047 return 256 * 1024; 2048 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2049 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2050 return 128 * 1024; 2051 else if (INTEL_GEN(dev_priv) >= 4) 2052 return 4 * 1024; 2053 else 2054 return 0; 2055 } 2056 2057 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2058 int color_plane) 2059 { 2060 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2061 2062 /* AUX_DIST needs only 4K alignment */ 2063 if (color_plane == 1) 2064 return 4096; 2065 2066 switch (fb->modifier) { 2067 case DRM_FORMAT_MOD_LINEAR: 2068 return intel_linear_alignment(dev_priv); 2069 case I915_FORMAT_MOD_X_TILED: 2070 if (INTEL_GEN(dev_priv) >= 9) 2071 return 256 * 1024; 2072 return 0; 2073 case I915_FORMAT_MOD_Y_TILED_CCS: 2074 case I915_FORMAT_MOD_Yf_TILED_CCS: 2075 case I915_FORMAT_MOD_Y_TILED: 2076 case I915_FORMAT_MOD_Yf_TILED: 2077 return 1 * 1024 * 1024; 2078 default: 2079 MISSING_CASE(fb->modifier); 2080 return 0; 2081 } 2082 } 2083 2084 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 2085 { 2086 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2087 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2088 2089 return INTEL_GEN(dev_priv) < 4 || 2090 (plane->has_fbc && 2091 plane_state->view.type == I915_GGTT_VIEW_NORMAL); 2092 } 2093 2094 struct i915_vma * 2095 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2096 const struct i915_ggtt_view *view, 2097 bool uses_fence, 2098 unsigned long *out_flags) 2099 { 2100 struct drm_device *dev = fb->dev; 2101 struct drm_i915_private *dev_priv = to_i915(dev); 2102 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2103 intel_wakeref_t wakeref; 2104 struct i915_vma *vma; 2105 unsigned int pinctl; 2106 u32 alignment; 2107 2108 if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) 2109 return ERR_PTR(-EINVAL); 2110 2111 alignment = intel_surf_alignment(fb, 0); 2112 2113 /* Note that the w/a also requires 64 PTE of padding following the 2114 * bo. We currently fill all unused PTE with the shadow page and so 2115 * we should always have valid PTE following the scanout preventing 2116 * the VT-d warning. 2117 */ 2118 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2119 alignment = 256 * 1024; 2120 2121 /* 2122 * Global gtt pte registers are special registers which actually forward 2123 * writes to a chunk of system memory. Which means that there is no risk 2124 * that the register values disappear as soon as we call 2125 * intel_runtime_pm_put(), so it is correct to wrap only the 2126 * pin/unpin/fence and not more. 2127 */ 2128 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2129 i915_gem_object_lock(obj); 2130 2131 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2132 2133 pinctl = 0; 2134 2135 /* Valleyview is definitely limited to scanning out the first 2136 * 512MiB. Lets presume this behaviour was inherited from the 2137 * g4x display engine and that all earlier gen are similarly 2138 * limited. Testing suggests that it is a little more 2139 * complicated than this. For example, Cherryview appears quite 2140 * happy to scanout from anywhere within its global aperture. 2141 */ 2142 if (HAS_GMCH(dev_priv)) 2143 pinctl |= PIN_MAPPABLE; 2144 2145 vma = i915_gem_object_pin_to_display_plane(obj, 2146 alignment, view, pinctl); 2147 if (IS_ERR(vma)) 2148 goto err; 2149 2150 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 2151 int ret; 2152 2153 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2154 * fence, whereas 965+ only requires a fence if using 2155 * framebuffer compression. For simplicity, we always, when 2156 * possible, install a fence as the cost is not that onerous. 2157 * 2158 * If we fail to fence the tiled scanout, then either the 2159 * modeset will reject the change (which is highly unlikely as 2160 * the affected systems, all but one, do not have unmappable 2161 * space) or we will not be able to enable full powersaving 2162 * techniques (also likely not to apply due to various limits 2163 * FBC and the like impose on the size of the buffer, which 2164 * presumably we violated anyway with this unmappable buffer). 2165 * Anyway, it is presumably better to stumble onwards with 2166 * something and try to run the system in a "less than optimal" 2167 * mode that matches the user configuration. 2168 */ 2169 ret = i915_vma_pin_fence(vma); 2170 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2171 i915_gem_object_unpin_from_display_plane(vma); 2172 vma = ERR_PTR(ret); 2173 goto err; 2174 } 2175 2176 if (ret == 0 && vma->fence) 2177 *out_flags |= PLANE_HAS_FENCE; 2178 } 2179 2180 i915_vma_get(vma); 2181 err: 2182 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 2183 2184 i915_gem_object_unlock(obj); 2185 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2186 return vma; 2187 } 2188 2189 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2190 { 2191 i915_gem_object_lock(vma->obj); 2192 if (flags & PLANE_HAS_FENCE) 2193 i915_vma_unpin_fence(vma); 2194 i915_gem_object_unpin_from_display_plane(vma); 2195 i915_gem_object_unlock(vma->obj); 2196 2197 i915_vma_put(vma); 2198 } 2199 2200 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane, 2201 unsigned int rotation) 2202 { 2203 if (drm_rotation_90_or_270(rotation)) 2204 return to_intel_framebuffer(fb)->rotated[color_plane].pitch; 2205 else 2206 return fb->pitches[color_plane]; 2207 } 2208 2209 /* 2210 * Convert the x/y offsets into a linear offset. 2211 * Only valid with 0/180 degree rotation, which is fine since linear 2212 * offset is only used with linear buffers on pre-hsw and tiled buffers 2213 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2214 */ 2215 u32 intel_fb_xy_to_linear(int x, int y, 2216 const struct intel_plane_state *state, 2217 int color_plane) 2218 { 2219 const struct drm_framebuffer *fb = state->base.fb; 2220 unsigned int cpp = fb->format->cpp[color_plane]; 2221 unsigned int pitch = state->color_plane[color_plane].stride; 2222 2223 return y * pitch + x * cpp; 2224 } 2225 2226 /* 2227 * Add the x/y offsets derived from fb->offsets[] to the user 2228 * specified plane src x/y offsets. The resulting x/y offsets 2229 * specify the start of scanout from the beginning of the gtt mapping. 2230 */ 2231 void intel_add_fb_offsets(int *x, int *y, 2232 const struct intel_plane_state *state, 2233 int color_plane) 2234 2235 { 2236 *x += state->color_plane[color_plane].x; 2237 *y += state->color_plane[color_plane].y; 2238 } 2239 2240 static u32 intel_adjust_tile_offset(int *x, int *y, 2241 unsigned int tile_width, 2242 unsigned int tile_height, 2243 unsigned int tile_size, 2244 unsigned int pitch_tiles, 2245 u32 old_offset, 2246 u32 new_offset) 2247 { 2248 unsigned int pitch_pixels = pitch_tiles * tile_width; 2249 unsigned int tiles; 2250 2251 WARN_ON(old_offset & (tile_size - 1)); 2252 WARN_ON(new_offset & (tile_size - 1)); 2253 WARN_ON(new_offset > old_offset); 2254 2255 tiles = (old_offset - new_offset) / tile_size; 2256 2257 *y += tiles / pitch_tiles * tile_height; 2258 *x += tiles % pitch_tiles * tile_width; 2259 2260 /* minimize x in case it got needlessly big */ 2261 *y += *x / pitch_pixels * tile_height; 2262 *x %= pitch_pixels; 2263 2264 return new_offset; 2265 } 2266 2267 static bool is_surface_linear(u64 modifier, int color_plane) 2268 { 2269 return modifier == DRM_FORMAT_MOD_LINEAR; 2270 } 2271 2272 static u32 intel_adjust_aligned_offset(int *x, int *y, 2273 const struct drm_framebuffer *fb, 2274 int color_plane, 2275 unsigned int rotation, 2276 unsigned int pitch, 2277 u32 old_offset, u32 new_offset) 2278 { 2279 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2280 unsigned int cpp = fb->format->cpp[color_plane]; 2281 2282 WARN_ON(new_offset > old_offset); 2283 2284 if (!is_surface_linear(fb->modifier, color_plane)) { 2285 unsigned int tile_size, tile_width, tile_height; 2286 unsigned int pitch_tiles; 2287 2288 tile_size = intel_tile_size(dev_priv); 2289 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2290 2291 if (drm_rotation_90_or_270(rotation)) { 2292 pitch_tiles = pitch / tile_height; 2293 swap(tile_width, tile_height); 2294 } else { 2295 pitch_tiles = pitch / (tile_width * cpp); 2296 } 2297 2298 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2299 tile_size, pitch_tiles, 2300 old_offset, new_offset); 2301 } else { 2302 old_offset += *y * pitch + *x * cpp; 2303 2304 *y = (old_offset - new_offset) / pitch; 2305 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2306 } 2307 2308 return new_offset; 2309 } 2310 2311 /* 2312 * Adjust the tile offset by moving the difference into 2313 * the x/y offsets. 2314 */ 2315 static u32 intel_plane_adjust_aligned_offset(int *x, int *y, 2316 const struct intel_plane_state *state, 2317 int color_plane, 2318 u32 old_offset, u32 new_offset) 2319 { 2320 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane, 2321 state->base.rotation, 2322 state->color_plane[color_plane].stride, 2323 old_offset, new_offset); 2324 } 2325 2326 /* 2327 * Computes the aligned offset to the base tile and adjusts 2328 * x, y. bytes per pixel is assumed to be a power-of-two. 2329 * 2330 * In the 90/270 rotated case, x and y are assumed 2331 * to be already rotated to match the rotated GTT view, and 2332 * pitch is the tile_height aligned framebuffer height. 2333 * 2334 * This function is used when computing the derived information 2335 * under intel_framebuffer, so using any of that information 2336 * here is not allowed. Anything under drm_framebuffer can be 2337 * used. This is why the user has to pass in the pitch since it 2338 * is specified in the rotated orientation. 2339 */ 2340 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, 2341 int *x, int *y, 2342 const struct drm_framebuffer *fb, 2343 int color_plane, 2344 unsigned int pitch, 2345 unsigned int rotation, 2346 u32 alignment) 2347 { 2348 unsigned int cpp = fb->format->cpp[color_plane]; 2349 u32 offset, offset_aligned; 2350 2351 if (alignment) 2352 alignment--; 2353 2354 if (!is_surface_linear(fb->modifier, color_plane)) { 2355 unsigned int tile_size, tile_width, tile_height; 2356 unsigned int tile_rows, tiles, pitch_tiles; 2357 2358 tile_size = intel_tile_size(dev_priv); 2359 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2360 2361 if (drm_rotation_90_or_270(rotation)) { 2362 pitch_tiles = pitch / tile_height; 2363 swap(tile_width, tile_height); 2364 } else { 2365 pitch_tiles = pitch / (tile_width * cpp); 2366 } 2367 2368 tile_rows = *y / tile_height; 2369 *y %= tile_height; 2370 2371 tiles = *x / tile_width; 2372 *x %= tile_width; 2373 2374 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2375 offset_aligned = offset & ~alignment; 2376 2377 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2378 tile_size, pitch_tiles, 2379 offset, offset_aligned); 2380 } else { 2381 offset = *y * pitch + *x * cpp; 2382 offset_aligned = offset & ~alignment; 2383 2384 *y = (offset & alignment) / pitch; 2385 *x = ((offset & alignment) - *y * pitch) / cpp; 2386 } 2387 2388 return offset_aligned; 2389 } 2390 2391 static u32 intel_plane_compute_aligned_offset(int *x, int *y, 2392 const struct intel_plane_state *state, 2393 int color_plane) 2394 { 2395 struct intel_plane *intel_plane = to_intel_plane(state->base.plane); 2396 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2397 const struct drm_framebuffer *fb = state->base.fb; 2398 unsigned int rotation = state->base.rotation; 2399 int pitch = state->color_plane[color_plane].stride; 2400 u32 alignment; 2401 2402 if (intel_plane->id == PLANE_CURSOR) 2403 alignment = intel_cursor_alignment(dev_priv); 2404 else 2405 alignment = intel_surf_alignment(fb, color_plane); 2406 2407 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane, 2408 pitch, rotation, alignment); 2409 } 2410 2411 /* Convert the fb->offset[] into x/y offsets */ 2412 static int intel_fb_offset_to_xy(int *x, int *y, 2413 const struct drm_framebuffer *fb, 2414 int color_plane) 2415 { 2416 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2417 unsigned int height; 2418 2419 if (fb->modifier != DRM_FORMAT_MOD_LINEAR && 2420 fb->offsets[color_plane] % intel_tile_size(dev_priv)) { 2421 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", 2422 fb->offsets[color_plane], color_plane); 2423 return -EINVAL; 2424 } 2425 2426 height = drm_framebuffer_plane_height(fb->height, fb, color_plane); 2427 height = ALIGN(height, intel_tile_height(fb, color_plane)); 2428 2429 /* Catch potential overflows early */ 2430 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), 2431 fb->offsets[color_plane])) { 2432 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n", 2433 fb->offsets[color_plane], fb->pitches[color_plane], 2434 color_plane); 2435 return -ERANGE; 2436 } 2437 2438 *x = 0; 2439 *y = 0; 2440 2441 intel_adjust_aligned_offset(x, y, 2442 fb, color_plane, DRM_MODE_ROTATE_0, 2443 fb->pitches[color_plane], 2444 fb->offsets[color_plane], 0); 2445 2446 return 0; 2447 } 2448 2449 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 2450 { 2451 switch (fb_modifier) { 2452 case I915_FORMAT_MOD_X_TILED: 2453 return I915_TILING_X; 2454 case I915_FORMAT_MOD_Y_TILED: 2455 case I915_FORMAT_MOD_Y_TILED_CCS: 2456 return I915_TILING_Y; 2457 default: 2458 return I915_TILING_NONE; 2459 } 2460 } 2461 2462 /* 2463 * From the Sky Lake PRM: 2464 * "The Color Control Surface (CCS) contains the compression status of 2465 * the cache-line pairs. The compression state of the cache-line pair 2466 * is specified by 2 bits in the CCS. Each CCS cache-line represents 2467 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 2468 * cache-line-pairs. CCS is always Y tiled." 2469 * 2470 * Since cache line pairs refers to horizontally adjacent cache lines, 2471 * each cache line in the CCS corresponds to an area of 32x16 cache 2472 * lines on the main surface. Since each pixel is 4 bytes, this gives 2473 * us a ratio of one byte in the CCS for each 8x16 pixels in the 2474 * main surface. 2475 */ 2476 static const struct drm_format_info ccs_formats[] = { 2477 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2478 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2479 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2480 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2481 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2482 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2483 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2484 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2485 }; 2486 2487 static const struct drm_format_info * 2488 lookup_format_info(const struct drm_format_info formats[], 2489 int num_formats, u32 format) 2490 { 2491 int i; 2492 2493 for (i = 0; i < num_formats; i++) { 2494 if (formats[i].format == format) 2495 return &formats[i]; 2496 } 2497 2498 return NULL; 2499 } 2500 2501 static const struct drm_format_info * 2502 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 2503 { 2504 switch (cmd->modifier[0]) { 2505 case I915_FORMAT_MOD_Y_TILED_CCS: 2506 case I915_FORMAT_MOD_Yf_TILED_CCS: 2507 return lookup_format_info(ccs_formats, 2508 ARRAY_SIZE(ccs_formats), 2509 cmd->pixel_format); 2510 default: 2511 return NULL; 2512 } 2513 } 2514 2515 bool is_ccs_modifier(u64 modifier) 2516 { 2517 return modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2518 modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 2519 } 2520 2521 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 2522 u32 pixel_format, u64 modifier) 2523 { 2524 struct intel_crtc *crtc; 2525 struct intel_plane *plane; 2526 2527 /* 2528 * We assume the primary plane for pipe A has 2529 * the highest stride limits of them all. 2530 */ 2531 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); 2532 if (!crtc) 2533 return 0; 2534 2535 plane = to_intel_plane(crtc->base.primary); 2536 2537 return plane->max_stride(plane, pixel_format, modifier, 2538 DRM_MODE_ROTATE_0); 2539 } 2540 2541 static 2542 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, 2543 u32 pixel_format, u64 modifier) 2544 { 2545 /* 2546 * Arbitrary limit for gen4+ chosen to match the 2547 * render engine max stride. 2548 * 2549 * The new CCS hash mode makes remapping impossible 2550 */ 2551 if (!is_ccs_modifier(modifier)) { 2552 if (INTEL_GEN(dev_priv) >= 7) 2553 return 256*1024; 2554 else if (INTEL_GEN(dev_priv) >= 4) 2555 return 128*1024; 2556 } 2557 2558 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 2559 } 2560 2561 static u32 2562 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) 2563 { 2564 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2565 2566 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2567 u32 max_stride = intel_plane_fb_max_stride(dev_priv, 2568 fb->format->format, 2569 fb->modifier); 2570 2571 /* 2572 * To make remapping with linear generally feasible 2573 * we need the stride to be page aligned. 2574 */ 2575 if (fb->pitches[color_plane] > max_stride) 2576 return intel_tile_size(dev_priv); 2577 else 2578 return 64; 2579 } else { 2580 return intel_tile_width_bytes(fb, color_plane); 2581 } 2582 } 2583 2584 bool intel_plane_can_remap(const struct intel_plane_state *plane_state) 2585 { 2586 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2587 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2588 const struct drm_framebuffer *fb = plane_state->base.fb; 2589 int i; 2590 2591 /* We don't want to deal with remapping with cursors */ 2592 if (plane->id == PLANE_CURSOR) 2593 return false; 2594 2595 /* 2596 * The display engine limits already match/exceed the 2597 * render engine limits, so not much point in remapping. 2598 * Would also need to deal with the fence POT alignment 2599 * and gen2 2KiB GTT tile size. 2600 */ 2601 if (INTEL_GEN(dev_priv) < 4) 2602 return false; 2603 2604 /* 2605 * The new CCS hash mode isn't compatible with remapping as 2606 * the virtual address of the pages affects the compressed data. 2607 */ 2608 if (is_ccs_modifier(fb->modifier)) 2609 return false; 2610 2611 /* Linear needs a page aligned stride for remapping */ 2612 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2613 unsigned int alignment = intel_tile_size(dev_priv) - 1; 2614 2615 for (i = 0; i < fb->format->num_planes; i++) { 2616 if (fb->pitches[i] & alignment) 2617 return false; 2618 } 2619 } 2620 2621 return true; 2622 } 2623 2624 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) 2625 { 2626 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2627 const struct drm_framebuffer *fb = plane_state->base.fb; 2628 unsigned int rotation = plane_state->base.rotation; 2629 u32 stride, max_stride; 2630 2631 /* 2632 * No remapping for invisible planes since we don't have 2633 * an actual source viewport to remap. 2634 */ 2635 if (!plane_state->base.visible) 2636 return false; 2637 2638 if (!intel_plane_can_remap(plane_state)) 2639 return false; 2640 2641 /* 2642 * FIXME: aux plane limits on gen9+ are 2643 * unclear in Bspec, for now no checking. 2644 */ 2645 stride = intel_fb_pitch(fb, 0, rotation); 2646 max_stride = plane->max_stride(plane, fb->format->format, 2647 fb->modifier, rotation); 2648 2649 return stride > max_stride; 2650 } 2651 2652 static int 2653 intel_fill_fb_info(struct drm_i915_private *dev_priv, 2654 struct drm_framebuffer *fb) 2655 { 2656 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2657 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2658 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2659 u32 gtt_offset_rotated = 0; 2660 unsigned int max_size = 0; 2661 int i, num_planes = fb->format->num_planes; 2662 unsigned int tile_size = intel_tile_size(dev_priv); 2663 2664 for (i = 0; i < num_planes; i++) { 2665 unsigned int width, height; 2666 unsigned int cpp, size; 2667 u32 offset; 2668 int x, y; 2669 int ret; 2670 2671 cpp = fb->format->cpp[i]; 2672 width = drm_framebuffer_plane_width(fb->width, fb, i); 2673 height = drm_framebuffer_plane_height(fb->height, fb, i); 2674 2675 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 2676 if (ret) { 2677 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2678 i, fb->offsets[i]); 2679 return ret; 2680 } 2681 2682 if (is_ccs_modifier(fb->modifier) && i == 1) { 2683 int hsub = fb->format->hsub; 2684 int vsub = fb->format->vsub; 2685 int tile_width, tile_height; 2686 int main_x, main_y; 2687 int ccs_x, ccs_y; 2688 2689 intel_tile_dims(fb, i, &tile_width, &tile_height); 2690 tile_width *= hsub; 2691 tile_height *= vsub; 2692 2693 ccs_x = (x * hsub) % tile_width; 2694 ccs_y = (y * vsub) % tile_height; 2695 main_x = intel_fb->normal[0].x % tile_width; 2696 main_y = intel_fb->normal[0].y % tile_height; 2697 2698 /* 2699 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2700 * x/y offsets must match between CCS and the main surface. 2701 */ 2702 if (main_x != ccs_x || main_y != ccs_y) { 2703 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2704 main_x, main_y, 2705 ccs_x, ccs_y, 2706 intel_fb->normal[0].x, 2707 intel_fb->normal[0].y, 2708 x, y); 2709 return -EINVAL; 2710 } 2711 } 2712 2713 /* 2714 * The fence (if used) is aligned to the start of the object 2715 * so having the framebuffer wrap around across the edge of the 2716 * fenced region doesn't really work. We have no API to configure 2717 * the fence start offset within the object (nor could we probably 2718 * on gen2/3). So it's just easier if we just require that the 2719 * fb layout agrees with the fence layout. We already check that the 2720 * fb stride matches the fence stride elsewhere. 2721 */ 2722 if (i == 0 && i915_gem_object_is_tiled(obj) && 2723 (x + width) * cpp > fb->pitches[i]) { 2724 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2725 i, fb->offsets[i]); 2726 return -EINVAL; 2727 } 2728 2729 /* 2730 * First pixel of the framebuffer from 2731 * the start of the normal gtt mapping. 2732 */ 2733 intel_fb->normal[i].x = x; 2734 intel_fb->normal[i].y = y; 2735 2736 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i, 2737 fb->pitches[i], 2738 DRM_MODE_ROTATE_0, 2739 tile_size); 2740 offset /= tile_size; 2741 2742 if (!is_surface_linear(fb->modifier, i)) { 2743 unsigned int tile_width, tile_height; 2744 unsigned int pitch_tiles; 2745 struct drm_rect r; 2746 2747 intel_tile_dims(fb, i, &tile_width, &tile_height); 2748 2749 rot_info->plane[i].offset = offset; 2750 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); 2751 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2752 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2753 2754 intel_fb->rotated[i].pitch = 2755 rot_info->plane[i].height * tile_height; 2756 2757 /* how many tiles does this plane need */ 2758 size = rot_info->plane[i].stride * rot_info->plane[i].height; 2759 /* 2760 * If the plane isn't horizontally tile aligned, 2761 * we need one more tile. 2762 */ 2763 if (x != 0) 2764 size++; 2765 2766 /* rotate the x/y offsets to match the GTT view */ 2767 drm_rect_init(&r, x, y, width, height); 2768 drm_rect_rotate(&r, 2769 rot_info->plane[i].width * tile_width, 2770 rot_info->plane[i].height * tile_height, 2771 DRM_MODE_ROTATE_270); 2772 x = r.x1; 2773 y = r.y1; 2774 2775 /* rotate the tile dimensions to match the GTT view */ 2776 pitch_tiles = intel_fb->rotated[i].pitch / tile_height; 2777 swap(tile_width, tile_height); 2778 2779 /* 2780 * We only keep the x/y offsets, so push all of the 2781 * gtt offset into the x/y offsets. 2782 */ 2783 intel_adjust_tile_offset(&x, &y, 2784 tile_width, tile_height, 2785 tile_size, pitch_tiles, 2786 gtt_offset_rotated * tile_size, 0); 2787 2788 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2789 2790 /* 2791 * First pixel of the framebuffer from 2792 * the start of the rotated gtt mapping. 2793 */ 2794 intel_fb->rotated[i].x = x; 2795 intel_fb->rotated[i].y = y; 2796 } else { 2797 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 2798 x * cpp, tile_size); 2799 } 2800 2801 /* how many tiles in total needed in the bo */ 2802 max_size = max(max_size, offset + size); 2803 } 2804 2805 if (mul_u32_u32(max_size, tile_size) > obj->base.size) { 2806 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n", 2807 mul_u32_u32(max_size, tile_size), obj->base.size); 2808 return -EINVAL; 2809 } 2810 2811 return 0; 2812 } 2813 2814 static void 2815 intel_plane_remap_gtt(struct intel_plane_state *plane_state) 2816 { 2817 struct drm_i915_private *dev_priv = 2818 to_i915(plane_state->base.plane->dev); 2819 struct drm_framebuffer *fb = plane_state->base.fb; 2820 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2821 struct intel_rotation_info *info = &plane_state->view.rotated; 2822 unsigned int rotation = plane_state->base.rotation; 2823 int i, num_planes = fb->format->num_planes; 2824 unsigned int tile_size = intel_tile_size(dev_priv); 2825 unsigned int src_x, src_y; 2826 unsigned int src_w, src_h; 2827 u32 gtt_offset = 0; 2828 2829 memset(&plane_state->view, 0, sizeof(plane_state->view)); 2830 plane_state->view.type = drm_rotation_90_or_270(rotation) ? 2831 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; 2832 2833 src_x = plane_state->base.src.x1 >> 16; 2834 src_y = plane_state->base.src.y1 >> 16; 2835 src_w = drm_rect_width(&plane_state->base.src) >> 16; 2836 src_h = drm_rect_height(&plane_state->base.src) >> 16; 2837 2838 WARN_ON(is_ccs_modifier(fb->modifier)); 2839 2840 /* Make src coordinates relative to the viewport */ 2841 drm_rect_translate(&plane_state->base.src, 2842 -(src_x << 16), -(src_y << 16)); 2843 2844 /* Rotate src coordinates to match rotated GTT view */ 2845 if (drm_rotation_90_or_270(rotation)) 2846 drm_rect_rotate(&plane_state->base.src, 2847 src_w << 16, src_h << 16, 2848 DRM_MODE_ROTATE_270); 2849 2850 for (i = 0; i < num_planes; i++) { 2851 unsigned int hsub = i ? fb->format->hsub : 1; 2852 unsigned int vsub = i ? fb->format->vsub : 1; 2853 unsigned int cpp = fb->format->cpp[i]; 2854 unsigned int tile_width, tile_height; 2855 unsigned int width, height; 2856 unsigned int pitch_tiles; 2857 unsigned int x, y; 2858 u32 offset; 2859 2860 intel_tile_dims(fb, i, &tile_width, &tile_height); 2861 2862 x = src_x / hsub; 2863 y = src_y / vsub; 2864 width = src_w / hsub; 2865 height = src_h / vsub; 2866 2867 /* 2868 * First pixel of the src viewport from the 2869 * start of the normal gtt mapping. 2870 */ 2871 x += intel_fb->normal[i].x; 2872 y += intel_fb->normal[i].y; 2873 2874 offset = intel_compute_aligned_offset(dev_priv, &x, &y, 2875 fb, i, fb->pitches[i], 2876 DRM_MODE_ROTATE_0, tile_size); 2877 offset /= tile_size; 2878 2879 info->plane[i].offset = offset; 2880 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], 2881 tile_width * cpp); 2882 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2883 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2884 2885 if (drm_rotation_90_or_270(rotation)) { 2886 struct drm_rect r; 2887 2888 /* rotate the x/y offsets to match the GTT view */ 2889 drm_rect_init(&r, x, y, width, height); 2890 drm_rect_rotate(&r, 2891 info->plane[i].width * tile_width, 2892 info->plane[i].height * tile_height, 2893 DRM_MODE_ROTATE_270); 2894 x = r.x1; 2895 y = r.y1; 2896 2897 pitch_tiles = info->plane[i].height; 2898 plane_state->color_plane[i].stride = pitch_tiles * tile_height; 2899 2900 /* rotate the tile dimensions to match the GTT view */ 2901 swap(tile_width, tile_height); 2902 } else { 2903 pitch_tiles = info->plane[i].width; 2904 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp; 2905 } 2906 2907 /* 2908 * We only keep the x/y offsets, so push all of the 2909 * gtt offset into the x/y offsets. 2910 */ 2911 intel_adjust_tile_offset(&x, &y, 2912 tile_width, tile_height, 2913 tile_size, pitch_tiles, 2914 gtt_offset * tile_size, 0); 2915 2916 gtt_offset += info->plane[i].width * info->plane[i].height; 2917 2918 plane_state->color_plane[i].offset = 0; 2919 plane_state->color_plane[i].x = x; 2920 plane_state->color_plane[i].y = y; 2921 } 2922 } 2923 2924 static int 2925 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 2926 { 2927 const struct intel_framebuffer *fb = 2928 to_intel_framebuffer(plane_state->base.fb); 2929 unsigned int rotation = plane_state->base.rotation; 2930 int i, num_planes; 2931 2932 if (!fb) 2933 return 0; 2934 2935 num_planes = fb->base.format->num_planes; 2936 2937 if (intel_plane_needs_remap(plane_state)) { 2938 intel_plane_remap_gtt(plane_state); 2939 2940 /* 2941 * Sometimes even remapping can't overcome 2942 * the stride limitations :( Can happen with 2943 * big plane sizes and suitably misaligned 2944 * offsets. 2945 */ 2946 return intel_plane_check_stride(plane_state); 2947 } 2948 2949 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation); 2950 2951 for (i = 0; i < num_planes; i++) { 2952 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation); 2953 plane_state->color_plane[i].offset = 0; 2954 2955 if (drm_rotation_90_or_270(rotation)) { 2956 plane_state->color_plane[i].x = fb->rotated[i].x; 2957 plane_state->color_plane[i].y = fb->rotated[i].y; 2958 } else { 2959 plane_state->color_plane[i].x = fb->normal[i].x; 2960 plane_state->color_plane[i].y = fb->normal[i].y; 2961 } 2962 } 2963 2964 /* Rotate src coordinates to match rotated GTT view */ 2965 if (drm_rotation_90_or_270(rotation)) 2966 drm_rect_rotate(&plane_state->base.src, 2967 fb->base.width << 16, fb->base.height << 16, 2968 DRM_MODE_ROTATE_270); 2969 2970 return intel_plane_check_stride(plane_state); 2971 } 2972 2973 static int i9xx_format_to_fourcc(int format) 2974 { 2975 switch (format) { 2976 case DISPPLANE_8BPP: 2977 return DRM_FORMAT_C8; 2978 case DISPPLANE_BGRX555: 2979 return DRM_FORMAT_XRGB1555; 2980 case DISPPLANE_BGRX565: 2981 return DRM_FORMAT_RGB565; 2982 default: 2983 case DISPPLANE_BGRX888: 2984 return DRM_FORMAT_XRGB8888; 2985 case DISPPLANE_RGBX888: 2986 return DRM_FORMAT_XBGR8888; 2987 case DISPPLANE_BGRX101010: 2988 return DRM_FORMAT_XRGB2101010; 2989 case DISPPLANE_RGBX101010: 2990 return DRM_FORMAT_XBGR2101010; 2991 case DISPPLANE_RGBX161616: 2992 return DRM_FORMAT_XBGR16161616F; 2993 } 2994 } 2995 2996 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2997 { 2998 switch (format) { 2999 case PLANE_CTL_FORMAT_RGB_565: 3000 return DRM_FORMAT_RGB565; 3001 case PLANE_CTL_FORMAT_NV12: 3002 return DRM_FORMAT_NV12; 3003 case PLANE_CTL_FORMAT_P010: 3004 return DRM_FORMAT_P010; 3005 case PLANE_CTL_FORMAT_P012: 3006 return DRM_FORMAT_P012; 3007 case PLANE_CTL_FORMAT_P016: 3008 return DRM_FORMAT_P016; 3009 case PLANE_CTL_FORMAT_Y210: 3010 return DRM_FORMAT_Y210; 3011 case PLANE_CTL_FORMAT_Y212: 3012 return DRM_FORMAT_Y212; 3013 case PLANE_CTL_FORMAT_Y216: 3014 return DRM_FORMAT_Y216; 3015 case PLANE_CTL_FORMAT_Y410: 3016 return DRM_FORMAT_XVYU2101010; 3017 case PLANE_CTL_FORMAT_Y412: 3018 return DRM_FORMAT_XVYU12_16161616; 3019 case PLANE_CTL_FORMAT_Y416: 3020 return DRM_FORMAT_XVYU16161616; 3021 default: 3022 case PLANE_CTL_FORMAT_XRGB_8888: 3023 if (rgb_order) { 3024 if (alpha) 3025 return DRM_FORMAT_ABGR8888; 3026 else 3027 return DRM_FORMAT_XBGR8888; 3028 } else { 3029 if (alpha) 3030 return DRM_FORMAT_ARGB8888; 3031 else 3032 return DRM_FORMAT_XRGB8888; 3033 } 3034 case PLANE_CTL_FORMAT_XRGB_2101010: 3035 if (rgb_order) 3036 return DRM_FORMAT_XBGR2101010; 3037 else 3038 return DRM_FORMAT_XRGB2101010; 3039 case PLANE_CTL_FORMAT_XRGB_16161616F: 3040 if (rgb_order) { 3041 if (alpha) 3042 return DRM_FORMAT_ABGR16161616F; 3043 else 3044 return DRM_FORMAT_XBGR16161616F; 3045 } else { 3046 if (alpha) 3047 return DRM_FORMAT_ARGB16161616F; 3048 else 3049 return DRM_FORMAT_XRGB16161616F; 3050 } 3051 } 3052 } 3053 3054 static bool 3055 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 3056 struct intel_initial_plane_config *plane_config) 3057 { 3058 struct drm_device *dev = crtc->base.dev; 3059 struct drm_i915_private *dev_priv = to_i915(dev); 3060 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 3061 struct drm_framebuffer *fb = &plane_config->fb->base; 3062 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 3063 u32 size_aligned = round_up(plane_config->base + plane_config->size, 3064 PAGE_SIZE); 3065 struct drm_i915_gem_object *obj; 3066 bool ret = false; 3067 3068 size_aligned -= base_aligned; 3069 3070 if (plane_config->size == 0) 3071 return false; 3072 3073 /* If the FB is too big, just don't use it since fbdev is not very 3074 * important and we should probably use that space with FBC or other 3075 * features. */ 3076 if (size_aligned * 2 > dev_priv->stolen_usable_size) 3077 return false; 3078 3079 switch (fb->modifier) { 3080 case DRM_FORMAT_MOD_LINEAR: 3081 case I915_FORMAT_MOD_X_TILED: 3082 case I915_FORMAT_MOD_Y_TILED: 3083 break; 3084 default: 3085 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n", 3086 fb->modifier); 3087 return false; 3088 } 3089 3090 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, 3091 base_aligned, 3092 base_aligned, 3093 size_aligned); 3094 if (IS_ERR(obj)) 3095 return false; 3096 3097 switch (plane_config->tiling) { 3098 case I915_TILING_NONE: 3099 break; 3100 case I915_TILING_X: 3101 case I915_TILING_Y: 3102 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling; 3103 break; 3104 default: 3105 MISSING_CASE(plane_config->tiling); 3106 goto out; 3107 } 3108 3109 mode_cmd.pixel_format = fb->format->format; 3110 mode_cmd.width = fb->width; 3111 mode_cmd.height = fb->height; 3112 mode_cmd.pitches[0] = fb->pitches[0]; 3113 mode_cmd.modifier[0] = fb->modifier; 3114 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 3115 3116 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { 3117 DRM_DEBUG_KMS("intel fb init failed\n"); 3118 goto out; 3119 } 3120 3121 3122 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 3123 ret = true; 3124 out: 3125 i915_gem_object_put(obj); 3126 return ret; 3127 } 3128 3129 static void 3130 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 3131 struct intel_plane_state *plane_state, 3132 bool visible) 3133 { 3134 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 3135 3136 plane_state->base.visible = visible; 3137 3138 if (visible) 3139 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); 3140 else 3141 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); 3142 } 3143 3144 static void fixup_active_planes(struct intel_crtc_state *crtc_state) 3145 { 3146 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 3147 struct drm_plane *plane; 3148 3149 /* 3150 * Active_planes aliases if multiple "primary" or cursor planes 3151 * have been used on the same (or wrong) pipe. plane_mask uses 3152 * unique ids, hence we can use that to reconstruct active_planes. 3153 */ 3154 crtc_state->active_planes = 0; 3155 3156 drm_for_each_plane_mask(plane, &dev_priv->drm, 3157 crtc_state->base.plane_mask) 3158 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 3159 } 3160 3161 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 3162 struct intel_plane *plane) 3163 { 3164 struct intel_crtc_state *crtc_state = 3165 to_intel_crtc_state(crtc->base.state); 3166 struct intel_plane_state *plane_state = 3167 to_intel_plane_state(plane->base.state); 3168 3169 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 3170 plane->base.base.id, plane->base.name, 3171 crtc->base.base.id, crtc->base.name); 3172 3173 intel_set_plane_visible(crtc_state, plane_state, false); 3174 fixup_active_planes(crtc_state); 3175 crtc_state->data_rate[plane->id] = 0; 3176 crtc_state->min_cdclk[plane->id] = 0; 3177 3178 if (plane->id == PLANE_PRIMARY) 3179 intel_pre_disable_primary_noatomic(&crtc->base); 3180 3181 intel_disable_plane(plane, crtc_state); 3182 } 3183 3184 static struct intel_frontbuffer * 3185 to_intel_frontbuffer(struct drm_framebuffer *fb) 3186 { 3187 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 3188 } 3189 3190 static void 3191 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 3192 struct intel_initial_plane_config *plane_config) 3193 { 3194 struct drm_device *dev = intel_crtc->base.dev; 3195 struct drm_i915_private *dev_priv = to_i915(dev); 3196 struct drm_crtc *c; 3197 struct drm_plane *primary = intel_crtc->base.primary; 3198 struct drm_plane_state *plane_state = primary->state; 3199 struct intel_plane *intel_plane = to_intel_plane(primary); 3200 struct intel_plane_state *intel_state = 3201 to_intel_plane_state(plane_state); 3202 struct drm_framebuffer *fb; 3203 3204 if (!plane_config->fb) 3205 return; 3206 3207 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 3208 fb = &plane_config->fb->base; 3209 goto valid_fb; 3210 } 3211 3212 kfree(plane_config->fb); 3213 3214 /* 3215 * Failed to alloc the obj, check to see if we should share 3216 * an fb with another CRTC instead 3217 */ 3218 for_each_crtc(dev, c) { 3219 struct intel_plane_state *state; 3220 3221 if (c == &intel_crtc->base) 3222 continue; 3223 3224 if (!to_intel_crtc(c)->active) 3225 continue; 3226 3227 state = to_intel_plane_state(c->primary->state); 3228 if (!state->vma) 3229 continue; 3230 3231 if (intel_plane_ggtt_offset(state) == plane_config->base) { 3232 fb = state->base.fb; 3233 drm_framebuffer_get(fb); 3234 goto valid_fb; 3235 } 3236 } 3237 3238 /* 3239 * We've failed to reconstruct the BIOS FB. Current display state 3240 * indicates that the primary plane is visible, but has a NULL FB, 3241 * which will lead to problems later if we don't fix it up. The 3242 * simplest solution is to just disable the primary plane now and 3243 * pretend the BIOS never had it enabled. 3244 */ 3245 intel_plane_disable_noatomic(intel_crtc, intel_plane); 3246 3247 return; 3248 3249 valid_fb: 3250 intel_state->base.rotation = plane_config->rotation; 3251 intel_fill_fb_ggtt_view(&intel_state->view, fb, 3252 intel_state->base.rotation); 3253 intel_state->color_plane[0].stride = 3254 intel_fb_pitch(fb, 0, intel_state->base.rotation); 3255 3256 intel_state->vma = 3257 intel_pin_and_fence_fb_obj(fb, 3258 &intel_state->view, 3259 intel_plane_uses_fence(intel_state), 3260 &intel_state->flags); 3261 if (IS_ERR(intel_state->vma)) { 3262 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 3263 intel_crtc->pipe, PTR_ERR(intel_state->vma)); 3264 3265 intel_state->vma = NULL; 3266 drm_framebuffer_put(fb); 3267 return; 3268 } 3269 3270 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 3271 3272 plane_state->src_x = 0; 3273 plane_state->src_y = 0; 3274 plane_state->src_w = fb->width << 16; 3275 plane_state->src_h = fb->height << 16; 3276 3277 plane_state->crtc_x = 0; 3278 plane_state->crtc_y = 0; 3279 plane_state->crtc_w = fb->width; 3280 plane_state->crtc_h = fb->height; 3281 3282 intel_state->base.src = drm_plane_state_src(plane_state); 3283 intel_state->base.dst = drm_plane_state_dest(plane_state); 3284 3285 if (plane_config->tiling) 3286 dev_priv->preserve_bios_swizzle = true; 3287 3288 plane_state->fb = fb; 3289 plane_state->crtc = &intel_crtc->base; 3290 3291 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 3292 &to_intel_frontbuffer(fb)->bits); 3293 } 3294 3295 static int skl_max_plane_width(const struct drm_framebuffer *fb, 3296 int color_plane, 3297 unsigned int rotation) 3298 { 3299 int cpp = fb->format->cpp[color_plane]; 3300 3301 switch (fb->modifier) { 3302 case DRM_FORMAT_MOD_LINEAR: 3303 case I915_FORMAT_MOD_X_TILED: 3304 /* 3305 * Validated limit is 4k, but has 5k should 3306 * work apart from the following features: 3307 * - Ytile (already limited to 4k) 3308 * - FP16 (already limited to 4k) 3309 * - render compression (already limited to 4k) 3310 * - KVMR sprite and cursor (don't care) 3311 * - horizontal panning (TODO verify this) 3312 * - pipe and plane scaling (TODO verify this) 3313 */ 3314 if (cpp == 8) 3315 return 4096; 3316 else 3317 return 5120; 3318 case I915_FORMAT_MOD_Y_TILED_CCS: 3319 case I915_FORMAT_MOD_Yf_TILED_CCS: 3320 /* FIXME AUX plane? */ 3321 case I915_FORMAT_MOD_Y_TILED: 3322 case I915_FORMAT_MOD_Yf_TILED: 3323 if (cpp == 8) 3324 return 2048; 3325 else 3326 return 4096; 3327 default: 3328 MISSING_CASE(fb->modifier); 3329 return 2048; 3330 } 3331 } 3332 3333 static int glk_max_plane_width(const struct drm_framebuffer *fb, 3334 int color_plane, 3335 unsigned int rotation) 3336 { 3337 int cpp = fb->format->cpp[color_plane]; 3338 3339 switch (fb->modifier) { 3340 case DRM_FORMAT_MOD_LINEAR: 3341 case I915_FORMAT_MOD_X_TILED: 3342 if (cpp == 8) 3343 return 4096; 3344 else 3345 return 5120; 3346 case I915_FORMAT_MOD_Y_TILED_CCS: 3347 case I915_FORMAT_MOD_Yf_TILED_CCS: 3348 /* FIXME AUX plane? */ 3349 case I915_FORMAT_MOD_Y_TILED: 3350 case I915_FORMAT_MOD_Yf_TILED: 3351 if (cpp == 8) 3352 return 2048; 3353 else 3354 return 5120; 3355 default: 3356 MISSING_CASE(fb->modifier); 3357 return 2048; 3358 } 3359 } 3360 3361 static int icl_max_plane_width(const struct drm_framebuffer *fb, 3362 int color_plane, 3363 unsigned int rotation) 3364 { 3365 return 5120; 3366 } 3367 3368 static int skl_max_plane_height(void) 3369 { 3370 return 4096; 3371 } 3372 3373 static int icl_max_plane_height(void) 3374 { 3375 return 4320; 3376 } 3377 3378 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 3379 int main_x, int main_y, u32 main_offset) 3380 { 3381 const struct drm_framebuffer *fb = plane_state->base.fb; 3382 int hsub = fb->format->hsub; 3383 int vsub = fb->format->vsub; 3384 int aux_x = plane_state->color_plane[1].x; 3385 int aux_y = plane_state->color_plane[1].y; 3386 u32 aux_offset = plane_state->color_plane[1].offset; 3387 u32 alignment = intel_surf_alignment(fb, 1); 3388 3389 while (aux_offset >= main_offset && aux_y <= main_y) { 3390 int x, y; 3391 3392 if (aux_x == main_x && aux_y == main_y) 3393 break; 3394 3395 if (aux_offset == 0) 3396 break; 3397 3398 x = aux_x / hsub; 3399 y = aux_y / vsub; 3400 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1, 3401 aux_offset, aux_offset - alignment); 3402 aux_x = x * hsub + aux_x % hsub; 3403 aux_y = y * vsub + aux_y % vsub; 3404 } 3405 3406 if (aux_x != main_x || aux_y != main_y) 3407 return false; 3408 3409 plane_state->color_plane[1].offset = aux_offset; 3410 plane_state->color_plane[1].x = aux_x; 3411 plane_state->color_plane[1].y = aux_y; 3412 3413 return true; 3414 } 3415 3416 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3417 { 3418 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); 3419 const struct drm_framebuffer *fb = plane_state->base.fb; 3420 unsigned int rotation = plane_state->base.rotation; 3421 int x = plane_state->base.src.x1 >> 16; 3422 int y = plane_state->base.src.y1 >> 16; 3423 int w = drm_rect_width(&plane_state->base.src) >> 16; 3424 int h = drm_rect_height(&plane_state->base.src) >> 16; 3425 int max_width; 3426 int max_height; 3427 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset; 3428 3429 if (INTEL_GEN(dev_priv) >= 11) 3430 max_width = icl_max_plane_width(fb, 0, rotation); 3431 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 3432 max_width = glk_max_plane_width(fb, 0, rotation); 3433 else 3434 max_width = skl_max_plane_width(fb, 0, rotation); 3435 3436 if (INTEL_GEN(dev_priv) >= 11) 3437 max_height = icl_max_plane_height(); 3438 else 3439 max_height = skl_max_plane_height(); 3440 3441 if (w > max_width || h > max_height) { 3442 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 3443 w, h, max_width, max_height); 3444 return -EINVAL; 3445 } 3446 3447 intel_add_fb_offsets(&x, &y, plane_state, 0); 3448 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); 3449 alignment = intel_surf_alignment(fb, 0); 3450 3451 /* 3452 * AUX surface offset is specified as the distance from the 3453 * main surface offset, and it must be non-negative. Make 3454 * sure that is what we will get. 3455 */ 3456 if (offset > aux_offset) 3457 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3458 offset, aux_offset & ~(alignment - 1)); 3459 3460 /* 3461 * When using an X-tiled surface, the plane blows up 3462 * if the x offset + width exceed the stride. 3463 * 3464 * TODO: linear and Y-tiled seem fine, Yf untested, 3465 */ 3466 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3467 int cpp = fb->format->cpp[0]; 3468 3469 while ((x + w) * cpp > plane_state->color_plane[0].stride) { 3470 if (offset == 0) { 3471 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n"); 3472 return -EINVAL; 3473 } 3474 3475 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3476 offset, offset - alignment); 3477 } 3478 } 3479 3480 /* 3481 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3482 * they match with the main surface x/y offsets. 3483 */ 3484 if (is_ccs_modifier(fb->modifier)) { 3485 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) { 3486 if (offset == 0) 3487 break; 3488 3489 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3490 offset, offset - alignment); 3491 } 3492 3493 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) { 3494 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); 3495 return -EINVAL; 3496 } 3497 } 3498 3499 plane_state->color_plane[0].offset = offset; 3500 plane_state->color_plane[0].x = x; 3501 plane_state->color_plane[0].y = y; 3502 3503 /* 3504 * Put the final coordinates back so that the src 3505 * coordinate checks will see the right values. 3506 */ 3507 drm_rect_translate_to(&plane_state->base.src, 3508 x << 16, y << 16); 3509 3510 return 0; 3511 } 3512 3513 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3514 { 3515 const struct drm_framebuffer *fb = plane_state->base.fb; 3516 unsigned int rotation = plane_state->base.rotation; 3517 int max_width = skl_max_plane_width(fb, 1, rotation); 3518 int max_height = 4096; 3519 int x = plane_state->base.src.x1 >> 17; 3520 int y = plane_state->base.src.y1 >> 17; 3521 int w = drm_rect_width(&plane_state->base.src) >> 17; 3522 int h = drm_rect_height(&plane_state->base.src) >> 17; 3523 u32 offset; 3524 3525 intel_add_fb_offsets(&x, &y, plane_state, 1); 3526 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); 3527 3528 /* FIXME not quite sure how/if these apply to the chroma plane */ 3529 if (w > max_width || h > max_height) { 3530 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", 3531 w, h, max_width, max_height); 3532 return -EINVAL; 3533 } 3534 3535 plane_state->color_plane[1].offset = offset; 3536 plane_state->color_plane[1].x = x; 3537 plane_state->color_plane[1].y = y; 3538 3539 return 0; 3540 } 3541 3542 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 3543 { 3544 const struct drm_framebuffer *fb = plane_state->base.fb; 3545 int src_x = plane_state->base.src.x1 >> 16; 3546 int src_y = plane_state->base.src.y1 >> 16; 3547 int hsub = fb->format->hsub; 3548 int vsub = fb->format->vsub; 3549 int x = src_x / hsub; 3550 int y = src_y / vsub; 3551 u32 offset; 3552 3553 intel_add_fb_offsets(&x, &y, plane_state, 1); 3554 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); 3555 3556 plane_state->color_plane[1].offset = offset; 3557 plane_state->color_plane[1].x = x * hsub + src_x % hsub; 3558 plane_state->color_plane[1].y = y * vsub + src_y % vsub; 3559 3560 return 0; 3561 } 3562 3563 int skl_check_plane_surface(struct intel_plane_state *plane_state) 3564 { 3565 const struct drm_framebuffer *fb = plane_state->base.fb; 3566 int ret; 3567 3568 ret = intel_plane_compute_gtt(plane_state); 3569 if (ret) 3570 return ret; 3571 3572 if (!plane_state->base.visible) 3573 return 0; 3574 3575 /* 3576 * Handle the AUX surface first since 3577 * the main surface setup depends on it. 3578 */ 3579 if (drm_format_info_is_yuv_semiplanar(fb->format)) { 3580 ret = skl_check_nv12_aux_surface(plane_state); 3581 if (ret) 3582 return ret; 3583 } else if (is_ccs_modifier(fb->modifier)) { 3584 ret = skl_check_ccs_aux_surface(plane_state); 3585 if (ret) 3586 return ret; 3587 } else { 3588 plane_state->color_plane[1].offset = ~0xfff; 3589 plane_state->color_plane[1].x = 0; 3590 plane_state->color_plane[1].y = 0; 3591 } 3592 3593 ret = skl_check_main_surface(plane_state); 3594 if (ret) 3595 return ret; 3596 3597 return 0; 3598 } 3599 3600 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, 3601 const struct intel_plane_state *plane_state, 3602 unsigned int *num, unsigned int *den) 3603 { 3604 const struct drm_framebuffer *fb = plane_state->base.fb; 3605 unsigned int cpp = fb->format->cpp[0]; 3606 3607 /* 3608 * g4x bspec says 64bpp pixel rate can't exceed 80% 3609 * of cdclk when the sprite plane is enabled on the 3610 * same pipe. ilk/snb bspec says 64bpp pixel rate is 3611 * never allowed to exceed 80% of cdclk. Let's just go 3612 * with the ilk/snb limit always. 3613 */ 3614 if (cpp == 8) { 3615 *num = 10; 3616 *den = 8; 3617 } else { 3618 *num = 1; 3619 *den = 1; 3620 } 3621 } 3622 3623 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, 3624 const struct intel_plane_state *plane_state) 3625 { 3626 unsigned int pixel_rate; 3627 unsigned int num, den; 3628 3629 /* 3630 * Note that crtc_state->pixel_rate accounts for both 3631 * horizontal and vertical panel fitter downscaling factors. 3632 * Pre-HSW bspec tells us to only consider the horizontal 3633 * downscaling factor here. We ignore that and just consider 3634 * both for simplicity. 3635 */ 3636 pixel_rate = crtc_state->pixel_rate; 3637 3638 i9xx_plane_ratio(crtc_state, plane_state, &num, &den); 3639 3640 /* two pixels per clock with double wide pipe */ 3641 if (crtc_state->double_wide) 3642 den *= 2; 3643 3644 return DIV_ROUND_UP(pixel_rate * num, den); 3645 } 3646 3647 unsigned int 3648 i9xx_plane_max_stride(struct intel_plane *plane, 3649 u32 pixel_format, u64 modifier, 3650 unsigned int rotation) 3651 { 3652 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3653 3654 if (!HAS_GMCH(dev_priv)) { 3655 return 32*1024; 3656 } else if (INTEL_GEN(dev_priv) >= 4) { 3657 if (modifier == I915_FORMAT_MOD_X_TILED) 3658 return 16*1024; 3659 else 3660 return 32*1024; 3661 } else if (INTEL_GEN(dev_priv) >= 3) { 3662 if (modifier == I915_FORMAT_MOD_X_TILED) 3663 return 8*1024; 3664 else 3665 return 16*1024; 3666 } else { 3667 if (plane->i9xx_plane == PLANE_C) 3668 return 4*1024; 3669 else 3670 return 8*1024; 3671 } 3672 } 3673 3674 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 3675 { 3676 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 3677 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3678 u32 dspcntr = 0; 3679 3680 if (crtc_state->gamma_enable) 3681 dspcntr |= DISPPLANE_GAMMA_ENABLE; 3682 3683 if (crtc_state->csc_enable) 3684 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 3685 3686 if (INTEL_GEN(dev_priv) < 5) 3687 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 3688 3689 return dspcntr; 3690 } 3691 3692 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 3693 const struct intel_plane_state *plane_state) 3694 { 3695 struct drm_i915_private *dev_priv = 3696 to_i915(plane_state->base.plane->dev); 3697 const struct drm_framebuffer *fb = plane_state->base.fb; 3698 unsigned int rotation = plane_state->base.rotation; 3699 u32 dspcntr; 3700 3701 dspcntr = DISPLAY_PLANE_ENABLE; 3702 3703 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 3704 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 3705 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 3706 3707 switch (fb->format->format) { 3708 case DRM_FORMAT_C8: 3709 dspcntr |= DISPPLANE_8BPP; 3710 break; 3711 case DRM_FORMAT_XRGB1555: 3712 dspcntr |= DISPPLANE_BGRX555; 3713 break; 3714 case DRM_FORMAT_RGB565: 3715 dspcntr |= DISPPLANE_BGRX565; 3716 break; 3717 case DRM_FORMAT_XRGB8888: 3718 dspcntr |= DISPPLANE_BGRX888; 3719 break; 3720 case DRM_FORMAT_XBGR8888: 3721 dspcntr |= DISPPLANE_RGBX888; 3722 break; 3723 case DRM_FORMAT_XRGB2101010: 3724 dspcntr |= DISPPLANE_BGRX101010; 3725 break; 3726 case DRM_FORMAT_XBGR2101010: 3727 dspcntr |= DISPPLANE_RGBX101010; 3728 break; 3729 case DRM_FORMAT_XBGR16161616F: 3730 dspcntr |= DISPPLANE_RGBX161616; 3731 break; 3732 default: 3733 MISSING_CASE(fb->format->format); 3734 return 0; 3735 } 3736 3737 if (INTEL_GEN(dev_priv) >= 4 && 3738 fb->modifier == I915_FORMAT_MOD_X_TILED) 3739 dspcntr |= DISPPLANE_TILED; 3740 3741 if (rotation & DRM_MODE_ROTATE_180) 3742 dspcntr |= DISPPLANE_ROTATE_180; 3743 3744 if (rotation & DRM_MODE_REFLECT_X) 3745 dspcntr |= DISPPLANE_MIRROR; 3746 3747 return dspcntr; 3748 } 3749 3750 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 3751 { 3752 struct drm_i915_private *dev_priv = 3753 to_i915(plane_state->base.plane->dev); 3754 const struct drm_framebuffer *fb = plane_state->base.fb; 3755 int src_x, src_y, src_w; 3756 u32 offset; 3757 int ret; 3758 3759 ret = intel_plane_compute_gtt(plane_state); 3760 if (ret) 3761 return ret; 3762 3763 if (!plane_state->base.visible) 3764 return 0; 3765 3766 src_w = drm_rect_width(&plane_state->base.src) >> 16; 3767 src_x = plane_state->base.src.x1 >> 16; 3768 src_y = plane_state->base.src.y1 >> 16; 3769 3770 /* Undocumented hardware limit on i965/g4x/vlv/chv */ 3771 if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) 3772 return -EINVAL; 3773 3774 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 3775 3776 if (INTEL_GEN(dev_priv) >= 4) 3777 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 3778 plane_state, 0); 3779 else 3780 offset = 0; 3781 3782 /* 3783 * Put the final coordinates back so that the src 3784 * coordinate checks will see the right values. 3785 */ 3786 drm_rect_translate_to(&plane_state->base.src, 3787 src_x << 16, src_y << 16); 3788 3789 /* HSW/BDW do this automagically in hardware */ 3790 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 3791 unsigned int rotation = plane_state->base.rotation; 3792 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3793 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3794 3795 if (rotation & DRM_MODE_ROTATE_180) { 3796 src_x += src_w - 1; 3797 src_y += src_h - 1; 3798 } else if (rotation & DRM_MODE_REFLECT_X) { 3799 src_x += src_w - 1; 3800 } 3801 } 3802 3803 plane_state->color_plane[0].offset = offset; 3804 plane_state->color_plane[0].x = src_x; 3805 plane_state->color_plane[0].y = src_y; 3806 3807 return 0; 3808 } 3809 3810 static bool i9xx_plane_has_windowing(struct intel_plane *plane) 3811 { 3812 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3813 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3814 3815 if (IS_CHERRYVIEW(dev_priv)) 3816 return i9xx_plane == PLANE_B; 3817 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 3818 return false; 3819 else if (IS_GEN(dev_priv, 4)) 3820 return i9xx_plane == PLANE_C; 3821 else 3822 return i9xx_plane == PLANE_B || 3823 i9xx_plane == PLANE_C; 3824 } 3825 3826 static int 3827 i9xx_plane_check(struct intel_crtc_state *crtc_state, 3828 struct intel_plane_state *plane_state) 3829 { 3830 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 3831 int ret; 3832 3833 ret = chv_plane_check_rotation(plane_state); 3834 if (ret) 3835 return ret; 3836 3837 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 3838 &crtc_state->base, 3839 DRM_PLANE_HELPER_NO_SCALING, 3840 DRM_PLANE_HELPER_NO_SCALING, 3841 i9xx_plane_has_windowing(plane), 3842 true); 3843 if (ret) 3844 return ret; 3845 3846 ret = i9xx_check_plane_surface(plane_state); 3847 if (ret) 3848 return ret; 3849 3850 if (!plane_state->base.visible) 3851 return 0; 3852 3853 ret = intel_plane_check_src_coordinates(plane_state); 3854 if (ret) 3855 return ret; 3856 3857 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 3858 3859 return 0; 3860 } 3861 3862 static void i9xx_update_plane(struct intel_plane *plane, 3863 const struct intel_crtc_state *crtc_state, 3864 const struct intel_plane_state *plane_state) 3865 { 3866 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3867 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3868 u32 linear_offset; 3869 int x = plane_state->color_plane[0].x; 3870 int y = plane_state->color_plane[0].y; 3871 int crtc_x = plane_state->base.dst.x1; 3872 int crtc_y = plane_state->base.dst.y1; 3873 int crtc_w = drm_rect_width(&plane_state->base.dst); 3874 int crtc_h = drm_rect_height(&plane_state->base.dst); 3875 unsigned long irqflags; 3876 u32 dspaddr_offset; 3877 u32 dspcntr; 3878 3879 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 3880 3881 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 3882 3883 if (INTEL_GEN(dev_priv) >= 4) 3884 dspaddr_offset = plane_state->color_plane[0].offset; 3885 else 3886 dspaddr_offset = linear_offset; 3887 3888 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3889 3890 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); 3891 3892 if (INTEL_GEN(dev_priv) < 4) { 3893 /* 3894 * PLANE_A doesn't actually have a full window 3895 * generator but let's assume we still need to 3896 * program whatever is there. 3897 */ 3898 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 3899 I915_WRITE_FW(DSPSIZE(i9xx_plane), 3900 ((crtc_h - 1) << 16) | (crtc_w - 1)); 3901 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 3902 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 3903 I915_WRITE_FW(PRIMSIZE(i9xx_plane), 3904 ((crtc_h - 1) << 16) | (crtc_w - 1)); 3905 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); 3906 } 3907 3908 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3909 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x); 3910 } else if (INTEL_GEN(dev_priv) >= 4) { 3911 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); 3912 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); 3913 } 3914 3915 /* 3916 * The control register self-arms if the plane was previously 3917 * disabled. Try to make the plane enable atomic by writing 3918 * the control register just before the surface register. 3919 */ 3920 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 3921 if (INTEL_GEN(dev_priv) >= 4) 3922 I915_WRITE_FW(DSPSURF(i9xx_plane), 3923 intel_plane_ggtt_offset(plane_state) + 3924 dspaddr_offset); 3925 else 3926 I915_WRITE_FW(DSPADDR(i9xx_plane), 3927 intel_plane_ggtt_offset(plane_state) + 3928 dspaddr_offset); 3929 3930 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3931 } 3932 3933 static void i9xx_disable_plane(struct intel_plane *plane, 3934 const struct intel_crtc_state *crtc_state) 3935 { 3936 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3937 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3938 unsigned long irqflags; 3939 u32 dspcntr; 3940 3941 /* 3942 * DSPCNTR pipe gamma enable on g4x+ and pipe csc 3943 * enable on ilk+ affect the pipe bottom color as 3944 * well, so we must configure them even if the plane 3945 * is disabled. 3946 * 3947 * On pre-g4x there is no way to gamma correct the 3948 * pipe bottom color but we'll keep on doing this 3949 * anyway so that the crtc state readout works correctly. 3950 */ 3951 dspcntr = i9xx_plane_ctl_crtc(crtc_state); 3952 3953 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3954 3955 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 3956 if (INTEL_GEN(dev_priv) >= 4) 3957 I915_WRITE_FW(DSPSURF(i9xx_plane), 0); 3958 else 3959 I915_WRITE_FW(DSPADDR(i9xx_plane), 0); 3960 3961 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3962 } 3963 3964 static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 3965 enum pipe *pipe) 3966 { 3967 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3968 enum intel_display_power_domain power_domain; 3969 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3970 intel_wakeref_t wakeref; 3971 bool ret; 3972 u32 val; 3973 3974 /* 3975 * Not 100% correct for planes that can move between pipes, 3976 * but that's only the case for gen2-4 which don't have any 3977 * display power wells. 3978 */ 3979 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 3980 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3981 if (!wakeref) 3982 return false; 3983 3984 val = I915_READ(DSPCNTR(i9xx_plane)); 3985 3986 ret = val & DISPLAY_PLANE_ENABLE; 3987 3988 if (INTEL_GEN(dev_priv) >= 5) 3989 *pipe = plane->pipe; 3990 else 3991 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 3992 DISPPLANE_SEL_PIPE_SHIFT; 3993 3994 intel_display_power_put(dev_priv, power_domain, wakeref); 3995 3996 return ret; 3997 } 3998 3999 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 4000 { 4001 struct drm_device *dev = intel_crtc->base.dev; 4002 struct drm_i915_private *dev_priv = to_i915(dev); 4003 4004 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 4005 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 4006 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 4007 } 4008 4009 /* 4010 * This function detaches (aka. unbinds) unused scalers in hardware 4011 */ 4012 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 4013 { 4014 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 4015 const struct intel_crtc_scaler_state *scaler_state = 4016 &crtc_state->scaler_state; 4017 int i; 4018 4019 /* loop through and disable scalers that aren't in use */ 4020 for (i = 0; i < intel_crtc->num_scalers; i++) { 4021 if (!scaler_state->scalers[i].in_use) 4022 skl_detach_scaler(intel_crtc, i); 4023 } 4024 } 4025 4026 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, 4027 int color_plane, unsigned int rotation) 4028 { 4029 /* 4030 * The stride is either expressed as a multiple of 64 bytes chunks for 4031 * linear buffers or in number of tiles for tiled buffers. 4032 */ 4033 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 4034 return 64; 4035 else if (drm_rotation_90_or_270(rotation)) 4036 return intel_tile_height(fb, color_plane); 4037 else 4038 return intel_tile_width_bytes(fb, color_plane); 4039 } 4040 4041 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 4042 int color_plane) 4043 { 4044 const struct drm_framebuffer *fb = plane_state->base.fb; 4045 unsigned int rotation = plane_state->base.rotation; 4046 u32 stride = plane_state->color_plane[color_plane].stride; 4047 4048 if (color_plane >= fb->format->num_planes) 4049 return 0; 4050 4051 return stride / skl_plane_stride_mult(fb, color_plane, rotation); 4052 } 4053 4054 static u32 skl_plane_ctl_format(u32 pixel_format) 4055 { 4056 switch (pixel_format) { 4057 case DRM_FORMAT_C8: 4058 return PLANE_CTL_FORMAT_INDEXED; 4059 case DRM_FORMAT_RGB565: 4060 return PLANE_CTL_FORMAT_RGB_565; 4061 case DRM_FORMAT_XBGR8888: 4062 case DRM_FORMAT_ABGR8888: 4063 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 4064 case DRM_FORMAT_XRGB8888: 4065 case DRM_FORMAT_ARGB8888: 4066 return PLANE_CTL_FORMAT_XRGB_8888; 4067 case DRM_FORMAT_XBGR2101010: 4068 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; 4069 case DRM_FORMAT_XRGB2101010: 4070 return PLANE_CTL_FORMAT_XRGB_2101010; 4071 case DRM_FORMAT_XBGR16161616F: 4072 case DRM_FORMAT_ABGR16161616F: 4073 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; 4074 case DRM_FORMAT_XRGB16161616F: 4075 case DRM_FORMAT_ARGB16161616F: 4076 return PLANE_CTL_FORMAT_XRGB_16161616F; 4077 case DRM_FORMAT_YUYV: 4078 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 4079 case DRM_FORMAT_YVYU: 4080 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 4081 case DRM_FORMAT_UYVY: 4082 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 4083 case DRM_FORMAT_VYUY: 4084 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 4085 case DRM_FORMAT_NV12: 4086 return PLANE_CTL_FORMAT_NV12; 4087 case DRM_FORMAT_P010: 4088 return PLANE_CTL_FORMAT_P010; 4089 case DRM_FORMAT_P012: 4090 return PLANE_CTL_FORMAT_P012; 4091 case DRM_FORMAT_P016: 4092 return PLANE_CTL_FORMAT_P016; 4093 case DRM_FORMAT_Y210: 4094 return PLANE_CTL_FORMAT_Y210; 4095 case DRM_FORMAT_Y212: 4096 return PLANE_CTL_FORMAT_Y212; 4097 case DRM_FORMAT_Y216: 4098 return PLANE_CTL_FORMAT_Y216; 4099 case DRM_FORMAT_XVYU2101010: 4100 return PLANE_CTL_FORMAT_Y410; 4101 case DRM_FORMAT_XVYU12_16161616: 4102 return PLANE_CTL_FORMAT_Y412; 4103 case DRM_FORMAT_XVYU16161616: 4104 return PLANE_CTL_FORMAT_Y416; 4105 default: 4106 MISSING_CASE(pixel_format); 4107 } 4108 4109 return 0; 4110 } 4111 4112 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) 4113 { 4114 if (!plane_state->base.fb->format->has_alpha) 4115 return PLANE_CTL_ALPHA_DISABLE; 4116 4117 switch (plane_state->base.pixel_blend_mode) { 4118 case DRM_MODE_BLEND_PIXEL_NONE: 4119 return PLANE_CTL_ALPHA_DISABLE; 4120 case DRM_MODE_BLEND_PREMULTI: 4121 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 4122 case DRM_MODE_BLEND_COVERAGE: 4123 return PLANE_CTL_ALPHA_HW_PREMULTIPLY; 4124 default: 4125 MISSING_CASE(plane_state->base.pixel_blend_mode); 4126 return PLANE_CTL_ALPHA_DISABLE; 4127 } 4128 } 4129 4130 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) 4131 { 4132 if (!plane_state->base.fb->format->has_alpha) 4133 return PLANE_COLOR_ALPHA_DISABLE; 4134 4135 switch (plane_state->base.pixel_blend_mode) { 4136 case DRM_MODE_BLEND_PIXEL_NONE: 4137 return PLANE_COLOR_ALPHA_DISABLE; 4138 case DRM_MODE_BLEND_PREMULTI: 4139 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 4140 case DRM_MODE_BLEND_COVERAGE: 4141 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; 4142 default: 4143 MISSING_CASE(plane_state->base.pixel_blend_mode); 4144 return PLANE_COLOR_ALPHA_DISABLE; 4145 } 4146 } 4147 4148 static u32 skl_plane_ctl_tiling(u64 fb_modifier) 4149 { 4150 switch (fb_modifier) { 4151 case DRM_FORMAT_MOD_LINEAR: 4152 break; 4153 case I915_FORMAT_MOD_X_TILED: 4154 return PLANE_CTL_TILED_X; 4155 case I915_FORMAT_MOD_Y_TILED: 4156 return PLANE_CTL_TILED_Y; 4157 case I915_FORMAT_MOD_Y_TILED_CCS: 4158 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4159 case I915_FORMAT_MOD_Yf_TILED: 4160 return PLANE_CTL_TILED_YF; 4161 case I915_FORMAT_MOD_Yf_TILED_CCS: 4162 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4163 default: 4164 MISSING_CASE(fb_modifier); 4165 } 4166 4167 return 0; 4168 } 4169 4170 static u32 skl_plane_ctl_rotate(unsigned int rotate) 4171 { 4172 switch (rotate) { 4173 case DRM_MODE_ROTATE_0: 4174 break; 4175 /* 4176 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 4177 * while i915 HW rotation is clockwise, thats why this swapping. 4178 */ 4179 case DRM_MODE_ROTATE_90: 4180 return PLANE_CTL_ROTATE_270; 4181 case DRM_MODE_ROTATE_180: 4182 return PLANE_CTL_ROTATE_180; 4183 case DRM_MODE_ROTATE_270: 4184 return PLANE_CTL_ROTATE_90; 4185 default: 4186 MISSING_CASE(rotate); 4187 } 4188 4189 return 0; 4190 } 4191 4192 static u32 cnl_plane_ctl_flip(unsigned int reflect) 4193 { 4194 switch (reflect) { 4195 case 0: 4196 break; 4197 case DRM_MODE_REFLECT_X: 4198 return PLANE_CTL_FLIP_HORIZONTAL; 4199 case DRM_MODE_REFLECT_Y: 4200 default: 4201 MISSING_CASE(reflect); 4202 } 4203 4204 return 0; 4205 } 4206 4207 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4208 { 4209 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 4210 u32 plane_ctl = 0; 4211 4212 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4213 return plane_ctl; 4214 4215 if (crtc_state->gamma_enable) 4216 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; 4217 4218 if (crtc_state->csc_enable) 4219 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 4220 4221 return plane_ctl; 4222 } 4223 4224 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 4225 const struct intel_plane_state *plane_state) 4226 { 4227 struct drm_i915_private *dev_priv = 4228 to_i915(plane_state->base.plane->dev); 4229 const struct drm_framebuffer *fb = plane_state->base.fb; 4230 unsigned int rotation = plane_state->base.rotation; 4231 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 4232 u32 plane_ctl; 4233 4234 plane_ctl = PLANE_CTL_ENABLE; 4235 4236 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 4237 plane_ctl |= skl_plane_ctl_alpha(plane_state); 4238 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 4239 4240 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 4241 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; 4242 4243 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4244 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; 4245 } 4246 4247 plane_ctl |= skl_plane_ctl_format(fb->format->format); 4248 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 4249 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); 4250 4251 if (INTEL_GEN(dev_priv) >= 10) 4252 plane_ctl |= cnl_plane_ctl_flip(rotation & 4253 DRM_MODE_REFLECT_MASK); 4254 4255 if (key->flags & I915_SET_COLORKEY_DESTINATION) 4256 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 4257 else if (key->flags & I915_SET_COLORKEY_SOURCE) 4258 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 4259 4260 return plane_ctl; 4261 } 4262 4263 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) 4264 { 4265 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 4266 u32 plane_color_ctl = 0; 4267 4268 if (INTEL_GEN(dev_priv) >= 11) 4269 return plane_color_ctl; 4270 4271 if (crtc_state->gamma_enable) 4272 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; 4273 4274 if (crtc_state->csc_enable) 4275 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 4276 4277 return plane_color_ctl; 4278 } 4279 4280 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 4281 const struct intel_plane_state *plane_state) 4282 { 4283 struct drm_i915_private *dev_priv = 4284 to_i915(plane_state->base.plane->dev); 4285 const struct drm_framebuffer *fb = plane_state->base.fb; 4286 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 4287 u32 plane_color_ctl = 0; 4288 4289 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 4290 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 4291 4292 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { 4293 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 4294 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 4295 else 4296 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709; 4297 4298 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4299 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 4300 } else if (fb->format->is_yuv) { 4301 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; 4302 } 4303 4304 return plane_color_ctl; 4305 } 4306 4307 static int 4308 __intel_display_resume(struct drm_device *dev, 4309 struct drm_atomic_state *state, 4310 struct drm_modeset_acquire_ctx *ctx) 4311 { 4312 struct drm_crtc_state *crtc_state; 4313 struct drm_crtc *crtc; 4314 int i, ret; 4315 4316 intel_modeset_setup_hw_state(dev, ctx); 4317 intel_vga_redisable(to_i915(dev)); 4318 4319 if (!state) 4320 return 0; 4321 4322 /* 4323 * We've duplicated the state, pointers to the old state are invalid. 4324 * 4325 * Don't attempt to use the old state until we commit the duplicated state. 4326 */ 4327 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 4328 /* 4329 * Force recalculation even if we restore 4330 * current state. With fast modeset this may not result 4331 * in a modeset when the state is compatible. 4332 */ 4333 crtc_state->mode_changed = true; 4334 } 4335 4336 /* ignore any reset values/BIOS leftovers in the WM registers */ 4337 if (!HAS_GMCH(to_i915(dev))) 4338 to_intel_atomic_state(state)->skip_intermediate_wm = true; 4339 4340 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4341 4342 WARN_ON(ret == -EDEADLK); 4343 return ret; 4344 } 4345 4346 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 4347 { 4348 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 4349 intel_has_gpu_reset(&dev_priv->gt)); 4350 } 4351 4352 void intel_prepare_reset(struct drm_i915_private *dev_priv) 4353 { 4354 struct drm_device *dev = &dev_priv->drm; 4355 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4356 struct drm_atomic_state *state; 4357 int ret; 4358 4359 /* reset doesn't touch the display */ 4360 if (!i915_modparams.force_reset_modeset_test && 4361 !gpu_reset_clobbers_display(dev_priv)) 4362 return; 4363 4364 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 4365 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4366 smp_mb__after_atomic(); 4367 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); 4368 4369 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 4370 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n"); 4371 intel_gt_set_wedged(&dev_priv->gt); 4372 } 4373 4374 /* 4375 * Need mode_config.mutex so that we don't 4376 * trample ongoing ->detect() and whatnot. 4377 */ 4378 mutex_lock(&dev->mode_config.mutex); 4379 drm_modeset_acquire_init(ctx, 0); 4380 while (1) { 4381 ret = drm_modeset_lock_all_ctx(dev, ctx); 4382 if (ret != -EDEADLK) 4383 break; 4384 4385 drm_modeset_backoff(ctx); 4386 } 4387 /* 4388 * Disabling the crtcs gracefully seems nicer. Also the 4389 * g33 docs say we should at least disable all the planes. 4390 */ 4391 state = drm_atomic_helper_duplicate_state(dev, ctx); 4392 if (IS_ERR(state)) { 4393 ret = PTR_ERR(state); 4394 DRM_ERROR("Duplicating state failed with %i\n", ret); 4395 return; 4396 } 4397 4398 ret = drm_atomic_helper_disable_all(dev, ctx); 4399 if (ret) { 4400 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 4401 drm_atomic_state_put(state); 4402 return; 4403 } 4404 4405 dev_priv->modeset_restore_state = state; 4406 state->acquire_ctx = ctx; 4407 } 4408 4409 void intel_finish_reset(struct drm_i915_private *dev_priv) 4410 { 4411 struct drm_device *dev = &dev_priv->drm; 4412 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4413 struct drm_atomic_state *state; 4414 int ret; 4415 4416 /* reset doesn't touch the display */ 4417 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 4418 return; 4419 4420 state = fetch_and_zero(&dev_priv->modeset_restore_state); 4421 if (!state) 4422 goto unlock; 4423 4424 /* reset doesn't touch the display */ 4425 if (!gpu_reset_clobbers_display(dev_priv)) { 4426 /* for testing only restore the display */ 4427 ret = __intel_display_resume(dev, state, ctx); 4428 if (ret) 4429 DRM_ERROR("Restoring old state failed with %i\n", ret); 4430 } else { 4431 /* 4432 * The display has been reset as well, 4433 * so need a full re-initialization. 4434 */ 4435 intel_pps_unlock_regs_wa(dev_priv); 4436 intel_modeset_init_hw(dev_priv); 4437 intel_init_clock_gating(dev_priv); 4438 4439 spin_lock_irq(&dev_priv->irq_lock); 4440 if (dev_priv->display.hpd_irq_setup) 4441 dev_priv->display.hpd_irq_setup(dev_priv); 4442 spin_unlock_irq(&dev_priv->irq_lock); 4443 4444 ret = __intel_display_resume(dev, state, ctx); 4445 if (ret) 4446 DRM_ERROR("Restoring old state failed with %i\n", ret); 4447 4448 intel_hpd_init(dev_priv); 4449 } 4450 4451 drm_atomic_state_put(state); 4452 unlock: 4453 drm_modeset_drop_locks(ctx); 4454 drm_modeset_acquire_fini(ctx); 4455 mutex_unlock(&dev->mode_config.mutex); 4456 4457 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4458 } 4459 4460 static void icl_set_pipe_chicken(struct intel_crtc *crtc) 4461 { 4462 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4463 enum pipe pipe = crtc->pipe; 4464 u32 tmp; 4465 4466 tmp = I915_READ(PIPE_CHICKEN(pipe)); 4467 4468 /* 4469 * Display WA #1153: icl 4470 * enable hardware to bypass the alpha math 4471 * and rounding for per-pixel values 00 and 0xff 4472 */ 4473 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 4474 /* 4475 * Display WA # 1605353570: icl 4476 * Set the pixel rounding bit to 1 for allowing 4477 * passthrough of Frame buffer pixels unmodified 4478 * across pipe 4479 */ 4480 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 4481 I915_WRITE(PIPE_CHICKEN(pipe), tmp); 4482 } 4483 4484 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state) 4485 { 4486 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4488 u32 trans_ddi_func_ctl2_val; 4489 u8 master_select; 4490 4491 /* 4492 * Configure the master select and enable Transcoder Port Sync for 4493 * Slave CRTCs transcoder. 4494 */ 4495 if (crtc_state->master_transcoder == INVALID_TRANSCODER) 4496 return; 4497 4498 if (crtc_state->master_transcoder == TRANSCODER_EDP) 4499 master_select = 0; 4500 else 4501 master_select = crtc_state->master_transcoder + 1; 4502 4503 /* Set the master select bits for Tranascoder Port Sync */ 4504 trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) & 4505 PORT_SYNC_MODE_MASTER_SELECT_MASK) << 4506 PORT_SYNC_MODE_MASTER_SELECT_SHIFT; 4507 /* Enable Transcoder Port Sync */ 4508 trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE; 4509 4510 I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder), 4511 trans_ddi_func_ctl2_val); 4512 } 4513 4514 static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state) 4515 { 4516 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4517 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4518 i915_reg_t reg; 4519 u32 trans_ddi_func_ctl2_val; 4520 4521 if (old_crtc_state->master_transcoder == INVALID_TRANSCODER) 4522 return; 4523 4524 DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n", 4525 transcoder_name(old_crtc_state->cpu_transcoder)); 4526 4527 reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder); 4528 trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE | 4529 PORT_SYNC_MODE_MASTER_SELECT_MASK); 4530 I915_WRITE(reg, trans_ddi_func_ctl2_val); 4531 } 4532 4533 static void intel_fdi_normal_train(struct intel_crtc *crtc) 4534 { 4535 struct drm_device *dev = crtc->base.dev; 4536 struct drm_i915_private *dev_priv = to_i915(dev); 4537 enum pipe pipe = crtc->pipe; 4538 i915_reg_t reg; 4539 u32 temp; 4540 4541 /* enable normal train */ 4542 reg = FDI_TX_CTL(pipe); 4543 temp = I915_READ(reg); 4544 if (IS_IVYBRIDGE(dev_priv)) { 4545 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4546 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 4547 } else { 4548 temp &= ~FDI_LINK_TRAIN_NONE; 4549 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 4550 } 4551 I915_WRITE(reg, temp); 4552 4553 reg = FDI_RX_CTL(pipe); 4554 temp = I915_READ(reg); 4555 if (HAS_PCH_CPT(dev_priv)) { 4556 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4557 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 4558 } else { 4559 temp &= ~FDI_LINK_TRAIN_NONE; 4560 temp |= FDI_LINK_TRAIN_NONE; 4561 } 4562 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 4563 4564 /* wait one idle pattern time */ 4565 POSTING_READ(reg); 4566 udelay(1000); 4567 4568 /* IVB wants error correction enabled */ 4569 if (IS_IVYBRIDGE(dev_priv)) 4570 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 4571 FDI_FE_ERRC_ENABLE); 4572 } 4573 4574 /* The FDI link training functions for ILK/Ibexpeak. */ 4575 static void ironlake_fdi_link_train(struct intel_crtc *crtc, 4576 const struct intel_crtc_state *crtc_state) 4577 { 4578 struct drm_device *dev = crtc->base.dev; 4579 struct drm_i915_private *dev_priv = to_i915(dev); 4580 enum pipe pipe = crtc->pipe; 4581 i915_reg_t reg; 4582 u32 temp, tries; 4583 4584 /* FDI needs bits from pipe first */ 4585 assert_pipe_enabled(dev_priv, pipe); 4586 4587 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4588 for train result */ 4589 reg = FDI_RX_IMR(pipe); 4590 temp = I915_READ(reg); 4591 temp &= ~FDI_RX_SYMBOL_LOCK; 4592 temp &= ~FDI_RX_BIT_LOCK; 4593 I915_WRITE(reg, temp); 4594 I915_READ(reg); 4595 udelay(150); 4596 4597 /* enable CPU FDI TX and PCH FDI RX */ 4598 reg = FDI_TX_CTL(pipe); 4599 temp = I915_READ(reg); 4600 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4601 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4602 temp &= ~FDI_LINK_TRAIN_NONE; 4603 temp |= FDI_LINK_TRAIN_PATTERN_1; 4604 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4605 4606 reg = FDI_RX_CTL(pipe); 4607 temp = I915_READ(reg); 4608 temp &= ~FDI_LINK_TRAIN_NONE; 4609 temp |= FDI_LINK_TRAIN_PATTERN_1; 4610 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4611 4612 POSTING_READ(reg); 4613 udelay(150); 4614 4615 /* Ironlake workaround, enable clock pointer after FDI enable*/ 4616 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4617 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 4618 FDI_RX_PHASE_SYNC_POINTER_EN); 4619 4620 reg = FDI_RX_IIR(pipe); 4621 for (tries = 0; tries < 5; tries++) { 4622 temp = I915_READ(reg); 4623 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4624 4625 if ((temp & FDI_RX_BIT_LOCK)) { 4626 DRM_DEBUG_KMS("FDI train 1 done.\n"); 4627 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4628 break; 4629 } 4630 } 4631 if (tries == 5) 4632 DRM_ERROR("FDI train 1 fail!\n"); 4633 4634 /* Train 2 */ 4635 reg = FDI_TX_CTL(pipe); 4636 temp = I915_READ(reg); 4637 temp &= ~FDI_LINK_TRAIN_NONE; 4638 temp |= FDI_LINK_TRAIN_PATTERN_2; 4639 I915_WRITE(reg, temp); 4640 4641 reg = FDI_RX_CTL(pipe); 4642 temp = I915_READ(reg); 4643 temp &= ~FDI_LINK_TRAIN_NONE; 4644 temp |= FDI_LINK_TRAIN_PATTERN_2; 4645 I915_WRITE(reg, temp); 4646 4647 POSTING_READ(reg); 4648 udelay(150); 4649 4650 reg = FDI_RX_IIR(pipe); 4651 for (tries = 0; tries < 5; tries++) { 4652 temp = I915_READ(reg); 4653 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4654 4655 if (temp & FDI_RX_SYMBOL_LOCK) { 4656 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4657 DRM_DEBUG_KMS("FDI train 2 done.\n"); 4658 break; 4659 } 4660 } 4661 if (tries == 5) 4662 DRM_ERROR("FDI train 2 fail!\n"); 4663 4664 DRM_DEBUG_KMS("FDI train done\n"); 4665 4666 } 4667 4668 static const int snb_b_fdi_train_param[] = { 4669 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 4670 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 4671 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 4672 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 4673 }; 4674 4675 /* The FDI link training functions for SNB/Cougarpoint. */ 4676 static void gen6_fdi_link_train(struct intel_crtc *crtc, 4677 const struct intel_crtc_state *crtc_state) 4678 { 4679 struct drm_device *dev = crtc->base.dev; 4680 struct drm_i915_private *dev_priv = to_i915(dev); 4681 enum pipe pipe = crtc->pipe; 4682 i915_reg_t reg; 4683 u32 temp, i, retry; 4684 4685 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4686 for train result */ 4687 reg = FDI_RX_IMR(pipe); 4688 temp = I915_READ(reg); 4689 temp &= ~FDI_RX_SYMBOL_LOCK; 4690 temp &= ~FDI_RX_BIT_LOCK; 4691 I915_WRITE(reg, temp); 4692 4693 POSTING_READ(reg); 4694 udelay(150); 4695 4696 /* enable CPU FDI TX and PCH FDI RX */ 4697 reg = FDI_TX_CTL(pipe); 4698 temp = I915_READ(reg); 4699 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4700 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4701 temp &= ~FDI_LINK_TRAIN_NONE; 4702 temp |= FDI_LINK_TRAIN_PATTERN_1; 4703 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4704 /* SNB-B */ 4705 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 4706 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4707 4708 I915_WRITE(FDI_RX_MISC(pipe), 4709 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 4710 4711 reg = FDI_RX_CTL(pipe); 4712 temp = I915_READ(reg); 4713 if (HAS_PCH_CPT(dev_priv)) { 4714 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4715 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4716 } else { 4717 temp &= ~FDI_LINK_TRAIN_NONE; 4718 temp |= FDI_LINK_TRAIN_PATTERN_1; 4719 } 4720 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4721 4722 POSTING_READ(reg); 4723 udelay(150); 4724 4725 for (i = 0; i < 4; i++) { 4726 reg = FDI_TX_CTL(pipe); 4727 temp = I915_READ(reg); 4728 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4729 temp |= snb_b_fdi_train_param[i]; 4730 I915_WRITE(reg, temp); 4731 4732 POSTING_READ(reg); 4733 udelay(500); 4734 4735 for (retry = 0; retry < 5; retry++) { 4736 reg = FDI_RX_IIR(pipe); 4737 temp = I915_READ(reg); 4738 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4739 if (temp & FDI_RX_BIT_LOCK) { 4740 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4741 DRM_DEBUG_KMS("FDI train 1 done.\n"); 4742 break; 4743 } 4744 udelay(50); 4745 } 4746 if (retry < 5) 4747 break; 4748 } 4749 if (i == 4) 4750 DRM_ERROR("FDI train 1 fail!\n"); 4751 4752 /* Train 2 */ 4753 reg = FDI_TX_CTL(pipe); 4754 temp = I915_READ(reg); 4755 temp &= ~FDI_LINK_TRAIN_NONE; 4756 temp |= FDI_LINK_TRAIN_PATTERN_2; 4757 if (IS_GEN(dev_priv, 6)) { 4758 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4759 /* SNB-B */ 4760 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 4761 } 4762 I915_WRITE(reg, temp); 4763 4764 reg = FDI_RX_CTL(pipe); 4765 temp = I915_READ(reg); 4766 if (HAS_PCH_CPT(dev_priv)) { 4767 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4768 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4769 } else { 4770 temp &= ~FDI_LINK_TRAIN_NONE; 4771 temp |= FDI_LINK_TRAIN_PATTERN_2; 4772 } 4773 I915_WRITE(reg, temp); 4774 4775 POSTING_READ(reg); 4776 udelay(150); 4777 4778 for (i = 0; i < 4; i++) { 4779 reg = FDI_TX_CTL(pipe); 4780 temp = I915_READ(reg); 4781 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4782 temp |= snb_b_fdi_train_param[i]; 4783 I915_WRITE(reg, temp); 4784 4785 POSTING_READ(reg); 4786 udelay(500); 4787 4788 for (retry = 0; retry < 5; retry++) { 4789 reg = FDI_RX_IIR(pipe); 4790 temp = I915_READ(reg); 4791 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4792 if (temp & FDI_RX_SYMBOL_LOCK) { 4793 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4794 DRM_DEBUG_KMS("FDI train 2 done.\n"); 4795 break; 4796 } 4797 udelay(50); 4798 } 4799 if (retry < 5) 4800 break; 4801 } 4802 if (i == 4) 4803 DRM_ERROR("FDI train 2 fail!\n"); 4804 4805 DRM_DEBUG_KMS("FDI train done.\n"); 4806 } 4807 4808 /* Manual link training for Ivy Bridge A0 parts */ 4809 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 4810 const struct intel_crtc_state *crtc_state) 4811 { 4812 struct drm_device *dev = crtc->base.dev; 4813 struct drm_i915_private *dev_priv = to_i915(dev); 4814 enum pipe pipe = crtc->pipe; 4815 i915_reg_t reg; 4816 u32 temp, i, j; 4817 4818 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4819 for train result */ 4820 reg = FDI_RX_IMR(pipe); 4821 temp = I915_READ(reg); 4822 temp &= ~FDI_RX_SYMBOL_LOCK; 4823 temp &= ~FDI_RX_BIT_LOCK; 4824 I915_WRITE(reg, temp); 4825 4826 POSTING_READ(reg); 4827 udelay(150); 4828 4829 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 4830 I915_READ(FDI_RX_IIR(pipe))); 4831 4832 /* Try each vswing and preemphasis setting twice before moving on */ 4833 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 4834 /* disable first in case we need to retry */ 4835 reg = FDI_TX_CTL(pipe); 4836 temp = I915_READ(reg); 4837 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 4838 temp &= ~FDI_TX_ENABLE; 4839 I915_WRITE(reg, temp); 4840 4841 reg = FDI_RX_CTL(pipe); 4842 temp = I915_READ(reg); 4843 temp &= ~FDI_LINK_TRAIN_AUTO; 4844 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4845 temp &= ~FDI_RX_ENABLE; 4846 I915_WRITE(reg, temp); 4847 4848 /* enable CPU FDI TX and PCH FDI RX */ 4849 reg = FDI_TX_CTL(pipe); 4850 temp = I915_READ(reg); 4851 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4852 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4853 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 4854 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4855 temp |= snb_b_fdi_train_param[j/2]; 4856 temp |= FDI_COMPOSITE_SYNC; 4857 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4858 4859 I915_WRITE(FDI_RX_MISC(pipe), 4860 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 4861 4862 reg = FDI_RX_CTL(pipe); 4863 temp = I915_READ(reg); 4864 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4865 temp |= FDI_COMPOSITE_SYNC; 4866 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4867 4868 POSTING_READ(reg); 4869 udelay(1); /* should be 0.5us */ 4870 4871 for (i = 0; i < 4; i++) { 4872 reg = FDI_RX_IIR(pipe); 4873 temp = I915_READ(reg); 4874 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4875 4876 if (temp & FDI_RX_BIT_LOCK || 4877 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 4878 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4879 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 4880 i); 4881 break; 4882 } 4883 udelay(1); /* should be 0.5us */ 4884 } 4885 if (i == 4) { 4886 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 4887 continue; 4888 } 4889 4890 /* Train 2 */ 4891 reg = FDI_TX_CTL(pipe); 4892 temp = I915_READ(reg); 4893 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4894 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 4895 I915_WRITE(reg, temp); 4896 4897 reg = FDI_RX_CTL(pipe); 4898 temp = I915_READ(reg); 4899 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4900 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4901 I915_WRITE(reg, temp); 4902 4903 POSTING_READ(reg); 4904 udelay(2); /* should be 1.5us */ 4905 4906 for (i = 0; i < 4; i++) { 4907 reg = FDI_RX_IIR(pipe); 4908 temp = I915_READ(reg); 4909 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4910 4911 if (temp & FDI_RX_SYMBOL_LOCK || 4912 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 4913 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4914 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 4915 i); 4916 goto train_done; 4917 } 4918 udelay(2); /* should be 1.5us */ 4919 } 4920 if (i == 4) 4921 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 4922 } 4923 4924 train_done: 4925 DRM_DEBUG_KMS("FDI train done.\n"); 4926 } 4927 4928 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 4929 { 4930 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 4931 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4932 enum pipe pipe = intel_crtc->pipe; 4933 i915_reg_t reg; 4934 u32 temp; 4935 4936 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 4937 reg = FDI_RX_CTL(pipe); 4938 temp = I915_READ(reg); 4939 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 4940 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4941 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4942 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 4943 4944 POSTING_READ(reg); 4945 udelay(200); 4946 4947 /* Switch from Rawclk to PCDclk */ 4948 temp = I915_READ(reg); 4949 I915_WRITE(reg, temp | FDI_PCDCLK); 4950 4951 POSTING_READ(reg); 4952 udelay(200); 4953 4954 /* Enable CPU FDI TX PLL, always on for Ironlake */ 4955 reg = FDI_TX_CTL(pipe); 4956 temp = I915_READ(reg); 4957 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 4958 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 4959 4960 POSTING_READ(reg); 4961 udelay(100); 4962 } 4963 } 4964 4965 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 4966 { 4967 struct drm_device *dev = intel_crtc->base.dev; 4968 struct drm_i915_private *dev_priv = to_i915(dev); 4969 enum pipe pipe = intel_crtc->pipe; 4970 i915_reg_t reg; 4971 u32 temp; 4972 4973 /* Switch from PCDclk to Rawclk */ 4974 reg = FDI_RX_CTL(pipe); 4975 temp = I915_READ(reg); 4976 I915_WRITE(reg, temp & ~FDI_PCDCLK); 4977 4978 /* Disable CPU FDI TX PLL */ 4979 reg = FDI_TX_CTL(pipe); 4980 temp = I915_READ(reg); 4981 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 4982 4983 POSTING_READ(reg); 4984 udelay(100); 4985 4986 reg = FDI_RX_CTL(pipe); 4987 temp = I915_READ(reg); 4988 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 4989 4990 /* Wait for the clocks to turn off. */ 4991 POSTING_READ(reg); 4992 udelay(100); 4993 } 4994 4995 static void ironlake_fdi_disable(struct drm_crtc *crtc) 4996 { 4997 struct drm_device *dev = crtc->dev; 4998 struct drm_i915_private *dev_priv = to_i915(dev); 4999 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5000 enum pipe pipe = intel_crtc->pipe; 5001 i915_reg_t reg; 5002 u32 temp; 5003 5004 /* disable CPU FDI tx and PCH FDI rx */ 5005 reg = FDI_TX_CTL(pipe); 5006 temp = I915_READ(reg); 5007 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 5008 POSTING_READ(reg); 5009 5010 reg = FDI_RX_CTL(pipe); 5011 temp = I915_READ(reg); 5012 temp &= ~(0x7 << 16); 5013 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5014 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 5015 5016 POSTING_READ(reg); 5017 udelay(100); 5018 5019 /* Ironlake workaround, disable clock pointer after downing FDI */ 5020 if (HAS_PCH_IBX(dev_priv)) 5021 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 5022 5023 /* still set train pattern 1 */ 5024 reg = FDI_TX_CTL(pipe); 5025 temp = I915_READ(reg); 5026 temp &= ~FDI_LINK_TRAIN_NONE; 5027 temp |= FDI_LINK_TRAIN_PATTERN_1; 5028 I915_WRITE(reg, temp); 5029 5030 reg = FDI_RX_CTL(pipe); 5031 temp = I915_READ(reg); 5032 if (HAS_PCH_CPT(dev_priv)) { 5033 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5034 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5035 } else { 5036 temp &= ~FDI_LINK_TRAIN_NONE; 5037 temp |= FDI_LINK_TRAIN_PATTERN_1; 5038 } 5039 /* BPC in FDI rx is consistent with that in PIPECONF */ 5040 temp &= ~(0x07 << 16); 5041 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5042 I915_WRITE(reg, temp); 5043 5044 POSTING_READ(reg); 5045 udelay(100); 5046 } 5047 5048 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 5049 { 5050 struct drm_crtc *crtc; 5051 bool cleanup_done; 5052 5053 drm_for_each_crtc(crtc, &dev_priv->drm) { 5054 struct drm_crtc_commit *commit; 5055 spin_lock(&crtc->commit_lock); 5056 commit = list_first_entry_or_null(&crtc->commit_list, 5057 struct drm_crtc_commit, commit_entry); 5058 cleanup_done = commit ? 5059 try_wait_for_completion(&commit->cleanup_done) : true; 5060 spin_unlock(&crtc->commit_lock); 5061 5062 if (cleanup_done) 5063 continue; 5064 5065 drm_crtc_wait_one_vblank(crtc); 5066 5067 return true; 5068 } 5069 5070 return false; 5071 } 5072 5073 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 5074 { 5075 u32 temp; 5076 5077 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 5078 5079 mutex_lock(&dev_priv->sb_lock); 5080 5081 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5082 temp |= SBI_SSCCTL_DISABLE; 5083 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5084 5085 mutex_unlock(&dev_priv->sb_lock); 5086 } 5087 5088 /* Program iCLKIP clock to the desired frequency */ 5089 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 5090 { 5091 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5092 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5093 int clock = crtc_state->base.adjusted_mode.crtc_clock; 5094 u32 divsel, phaseinc, auxdiv, phasedir = 0; 5095 u32 temp; 5096 5097 lpt_disable_iclkip(dev_priv); 5098 5099 /* The iCLK virtual clock root frequency is in MHz, 5100 * but the adjusted_mode->crtc_clock in in KHz. To get the 5101 * divisors, it is necessary to divide one by another, so we 5102 * convert the virtual clock precision to KHz here for higher 5103 * precision. 5104 */ 5105 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 5106 u32 iclk_virtual_root_freq = 172800 * 1000; 5107 u32 iclk_pi_range = 64; 5108 u32 desired_divisor; 5109 5110 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5111 clock << auxdiv); 5112 divsel = (desired_divisor / iclk_pi_range) - 2; 5113 phaseinc = desired_divisor % iclk_pi_range; 5114 5115 /* 5116 * Near 20MHz is a corner case which is 5117 * out of range for the 7-bit divisor 5118 */ 5119 if (divsel <= 0x7f) 5120 break; 5121 } 5122 5123 /* This should not happen with any sane values */ 5124 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 5125 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 5126 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 5127 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 5128 5129 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 5130 clock, 5131 auxdiv, 5132 divsel, 5133 phasedir, 5134 phaseinc); 5135 5136 mutex_lock(&dev_priv->sb_lock); 5137 5138 /* Program SSCDIVINTPHASE6 */ 5139 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5140 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 5141 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 5142 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 5143 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 5144 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 5145 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 5146 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 5147 5148 /* Program SSCAUXDIV */ 5149 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5150 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 5151 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 5152 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 5153 5154 /* Enable modulator and associated divider */ 5155 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5156 temp &= ~SBI_SSCCTL_DISABLE; 5157 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5158 5159 mutex_unlock(&dev_priv->sb_lock); 5160 5161 /* Wait for initialization time */ 5162 udelay(24); 5163 5164 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 5165 } 5166 5167 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 5168 { 5169 u32 divsel, phaseinc, auxdiv; 5170 u32 iclk_virtual_root_freq = 172800 * 1000; 5171 u32 iclk_pi_range = 64; 5172 u32 desired_divisor; 5173 u32 temp; 5174 5175 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 5176 return 0; 5177 5178 mutex_lock(&dev_priv->sb_lock); 5179 5180 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5181 if (temp & SBI_SSCCTL_DISABLE) { 5182 mutex_unlock(&dev_priv->sb_lock); 5183 return 0; 5184 } 5185 5186 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5187 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 5188 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 5189 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 5190 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 5191 5192 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5193 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 5194 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 5195 5196 mutex_unlock(&dev_priv->sb_lock); 5197 5198 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 5199 5200 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5201 desired_divisor << auxdiv); 5202 } 5203 5204 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 5205 enum pipe pch_transcoder) 5206 { 5207 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5208 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5209 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5210 5211 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 5212 I915_READ(HTOTAL(cpu_transcoder))); 5213 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 5214 I915_READ(HBLANK(cpu_transcoder))); 5215 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 5216 I915_READ(HSYNC(cpu_transcoder))); 5217 5218 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 5219 I915_READ(VTOTAL(cpu_transcoder))); 5220 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 5221 I915_READ(VBLANK(cpu_transcoder))); 5222 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 5223 I915_READ(VSYNC(cpu_transcoder))); 5224 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 5225 I915_READ(VSYNCSHIFT(cpu_transcoder))); 5226 } 5227 5228 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 5229 { 5230 u32 temp; 5231 5232 temp = I915_READ(SOUTH_CHICKEN1); 5233 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 5234 return; 5235 5236 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 5237 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 5238 5239 temp &= ~FDI_BC_BIFURCATION_SELECT; 5240 if (enable) 5241 temp |= FDI_BC_BIFURCATION_SELECT; 5242 5243 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 5244 I915_WRITE(SOUTH_CHICKEN1, temp); 5245 POSTING_READ(SOUTH_CHICKEN1); 5246 } 5247 5248 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 5249 { 5250 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5251 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5252 5253 switch (crtc->pipe) { 5254 case PIPE_A: 5255 break; 5256 case PIPE_B: 5257 if (crtc_state->fdi_lanes > 2) 5258 cpt_set_fdi_bc_bifurcation(dev_priv, false); 5259 else 5260 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5261 5262 break; 5263 case PIPE_C: 5264 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5265 5266 break; 5267 default: 5268 BUG(); 5269 } 5270 } 5271 5272 /* 5273 * Finds the encoder associated with the given CRTC. This can only be 5274 * used when we know that the CRTC isn't feeding multiple encoders! 5275 */ 5276 static struct intel_encoder * 5277 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 5278 const struct intel_crtc_state *crtc_state) 5279 { 5280 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5281 const struct drm_connector_state *connector_state; 5282 const struct drm_connector *connector; 5283 struct intel_encoder *encoder = NULL; 5284 int num_encoders = 0; 5285 int i; 5286 5287 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5288 if (connector_state->crtc != &crtc->base) 5289 continue; 5290 5291 encoder = to_intel_encoder(connector_state->best_encoder); 5292 num_encoders++; 5293 } 5294 5295 WARN(num_encoders != 1, "%d encoders for pipe %c\n", 5296 num_encoders, pipe_name(crtc->pipe)); 5297 5298 return encoder; 5299 } 5300 5301 /* 5302 * Enable PCH resources required for PCH ports: 5303 * - PCH PLLs 5304 * - FDI training & RX/TX 5305 * - update transcoder timings 5306 * - DP transcoding bits 5307 * - transcoder 5308 */ 5309 static void ironlake_pch_enable(const struct intel_atomic_state *state, 5310 const struct intel_crtc_state *crtc_state) 5311 { 5312 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5313 struct drm_device *dev = crtc->base.dev; 5314 struct drm_i915_private *dev_priv = to_i915(dev); 5315 enum pipe pipe = crtc->pipe; 5316 u32 temp; 5317 5318 assert_pch_transcoder_disabled(dev_priv, pipe); 5319 5320 if (IS_IVYBRIDGE(dev_priv)) 5321 ivybridge_update_fdi_bc_bifurcation(crtc_state); 5322 5323 /* Write the TU size bits before fdi link training, so that error 5324 * detection works. */ 5325 I915_WRITE(FDI_RX_TUSIZE1(pipe), 5326 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 5327 5328 /* For PCH output, training FDI link */ 5329 dev_priv->display.fdi_link_train(crtc, crtc_state); 5330 5331 /* We need to program the right clock selection before writing the pixel 5332 * mutliplier into the DPLL. */ 5333 if (HAS_PCH_CPT(dev_priv)) { 5334 u32 sel; 5335 5336 temp = I915_READ(PCH_DPLL_SEL); 5337 temp |= TRANS_DPLL_ENABLE(pipe); 5338 sel = TRANS_DPLLB_SEL(pipe); 5339 if (crtc_state->shared_dpll == 5340 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 5341 temp |= sel; 5342 else 5343 temp &= ~sel; 5344 I915_WRITE(PCH_DPLL_SEL, temp); 5345 } 5346 5347 /* XXX: pch pll's can be enabled any time before we enable the PCH 5348 * transcoder, and we actually should do this to not upset any PCH 5349 * transcoder that already use the clock when we share it. 5350 * 5351 * Note that enable_shared_dpll tries to do the right thing, but 5352 * get_shared_dpll unconditionally resets the pll - we need that to have 5353 * the right LVDS enable sequence. */ 5354 intel_enable_shared_dpll(crtc_state); 5355 5356 /* set transcoder timing, panel must allow it */ 5357 assert_panel_unlocked(dev_priv, pipe); 5358 ironlake_pch_transcoder_set_timings(crtc_state, pipe); 5359 5360 intel_fdi_normal_train(crtc); 5361 5362 /* For PCH DP, enable TRANS_DP_CTL */ 5363 if (HAS_PCH_CPT(dev_priv) && 5364 intel_crtc_has_dp_encoder(crtc_state)) { 5365 const struct drm_display_mode *adjusted_mode = 5366 &crtc_state->base.adjusted_mode; 5367 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 5368 i915_reg_t reg = TRANS_DP_CTL(pipe); 5369 enum port port; 5370 5371 temp = I915_READ(reg); 5372 temp &= ~(TRANS_DP_PORT_SEL_MASK | 5373 TRANS_DP_SYNC_MASK | 5374 TRANS_DP_BPC_MASK); 5375 temp |= TRANS_DP_OUTPUT_ENABLE; 5376 temp |= bpc << 9; /* same format but at 11:9 */ 5377 5378 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 5379 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 5380 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 5381 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 5382 5383 port = intel_get_crtc_new_encoder(state, crtc_state)->port; 5384 WARN_ON(port < PORT_B || port > PORT_D); 5385 temp |= TRANS_DP_PORT_SEL(port); 5386 5387 I915_WRITE(reg, temp); 5388 } 5389 5390 ironlake_enable_pch_transcoder(crtc_state); 5391 } 5392 5393 static void lpt_pch_enable(const struct intel_atomic_state *state, 5394 const struct intel_crtc_state *crtc_state) 5395 { 5396 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5397 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5398 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5399 5400 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 5401 5402 lpt_program_iclkip(crtc_state); 5403 5404 /* Set transcoder timing. */ 5405 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A); 5406 5407 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 5408 } 5409 5410 static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe) 5411 { 5412 struct drm_i915_private *dev_priv = to_i915(dev); 5413 i915_reg_t dslreg = PIPEDSL(pipe); 5414 u32 temp; 5415 5416 temp = I915_READ(dslreg); 5417 udelay(500); 5418 if (wait_for(I915_READ(dslreg) != temp, 5)) { 5419 if (wait_for(I915_READ(dslreg) != temp, 5)) 5420 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 5421 } 5422 } 5423 5424 /* 5425 * The hardware phase 0.0 refers to the center of the pixel. 5426 * We want to start from the top/left edge which is phase 5427 * -0.5. That matches how the hardware calculates the scaling 5428 * factors (from top-left of the first pixel to bottom-right 5429 * of the last pixel, as opposed to the pixel centers). 5430 * 5431 * For 4:2:0 subsampled chroma planes we obviously have to 5432 * adjust that so that the chroma sample position lands in 5433 * the right spot. 5434 * 5435 * Note that for packed YCbCr 4:2:2 formats there is no way to 5436 * control chroma siting. The hardware simply replicates the 5437 * chroma samples for both of the luma samples, and thus we don't 5438 * actually get the expected MPEG2 chroma siting convention :( 5439 * The same behaviour is observed on pre-SKL platforms as well. 5440 * 5441 * Theory behind the formula (note that we ignore sub-pixel 5442 * source coordinates): 5443 * s = source sample position 5444 * d = destination sample position 5445 * 5446 * Downscaling 4:1: 5447 * -0.5 5448 * | 0.0 5449 * | | 1.5 (initial phase) 5450 * | | | 5451 * v v v 5452 * | s | s | s | s | 5453 * | d | 5454 * 5455 * Upscaling 1:4: 5456 * -0.5 5457 * | -0.375 (initial phase) 5458 * | | 0.0 5459 * | | | 5460 * v v v 5461 * | s | 5462 * | d | d | d | d | 5463 */ 5464 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 5465 { 5466 int phase = -0x8000; 5467 u16 trip = 0; 5468 5469 if (chroma_cosited) 5470 phase += (sub - 1) * 0x8000 / sub; 5471 5472 phase += scale / (2 * sub); 5473 5474 /* 5475 * Hardware initial phase limited to [-0.5:1.5]. 5476 * Since the max hardware scale factor is 3.0, we 5477 * should never actually excdeed 1.0 here. 5478 */ 5479 WARN_ON(phase < -0x8000 || phase > 0x18000); 5480 5481 if (phase < 0) 5482 phase = 0x10000 + phase; 5483 else 5484 trip = PS_PHASE_TRIP; 5485 5486 return ((phase >> 2) & PS_PHASE_MASK) | trip; 5487 } 5488 5489 #define SKL_MIN_SRC_W 8 5490 #define SKL_MAX_SRC_W 4096 5491 #define SKL_MIN_SRC_H 8 5492 #define SKL_MAX_SRC_H 4096 5493 #define SKL_MIN_DST_W 8 5494 #define SKL_MAX_DST_W 4096 5495 #define SKL_MIN_DST_H 8 5496 #define SKL_MAX_DST_H 4096 5497 #define ICL_MAX_SRC_W 5120 5498 #define ICL_MAX_SRC_H 4096 5499 #define ICL_MAX_DST_W 5120 5500 #define ICL_MAX_DST_H 4096 5501 #define SKL_MIN_YUV_420_SRC_W 16 5502 #define SKL_MIN_YUV_420_SRC_H 16 5503 5504 static int 5505 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 5506 unsigned int scaler_user, int *scaler_id, 5507 int src_w, int src_h, int dst_w, int dst_h, 5508 const struct drm_format_info *format, bool need_scaler) 5509 { 5510 struct intel_crtc_scaler_state *scaler_state = 5511 &crtc_state->scaler_state; 5512 struct intel_crtc *intel_crtc = 5513 to_intel_crtc(crtc_state->base.crtc); 5514 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5515 const struct drm_display_mode *adjusted_mode = 5516 &crtc_state->base.adjusted_mode; 5517 5518 /* 5519 * Src coordinates are already rotated by 270 degrees for 5520 * the 90/270 degree plane rotation cases (to match the 5521 * GTT mapping), hence no need to account for rotation here. 5522 */ 5523 if (src_w != dst_w || src_h != dst_h) 5524 need_scaler = true; 5525 5526 /* 5527 * Scaling/fitting not supported in IF-ID mode in GEN9+ 5528 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 5529 * Once NV12 is enabled, handle it here while allocating scaler 5530 * for NV12. 5531 */ 5532 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && 5533 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5534 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); 5535 return -EINVAL; 5536 } 5537 5538 /* 5539 * if plane is being disabled or scaler is no more required or force detach 5540 * - free scaler binded to this plane/crtc 5541 * - in order to do this, update crtc->scaler_usage 5542 * 5543 * Here scaler state in crtc_state is set free so that 5544 * scaler can be assigned to other user. Actual register 5545 * update to free the scaler is done in plane/panel-fit programming. 5546 * For this purpose crtc/plane_state->scaler_id isn't reset here. 5547 */ 5548 if (force_detach || !need_scaler) { 5549 if (*scaler_id >= 0) { 5550 scaler_state->scaler_users &= ~(1 << scaler_user); 5551 scaler_state->scalers[*scaler_id].in_use = 0; 5552 5553 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5554 "Staged freeing scaler id %d scaler_users = 0x%x\n", 5555 intel_crtc->pipe, scaler_user, *scaler_id, 5556 scaler_state->scaler_users); 5557 *scaler_id = -1; 5558 } 5559 return 0; 5560 } 5561 5562 if (format && drm_format_info_is_yuv_semiplanar(format) && 5563 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 5564 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); 5565 return -EINVAL; 5566 } 5567 5568 /* range checks */ 5569 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 5570 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 5571 (INTEL_GEN(dev_priv) >= 11 && 5572 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 5573 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 5574 (INTEL_GEN(dev_priv) < 11 && 5575 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 5576 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 5577 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 5578 "size is out of scaler range\n", 5579 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 5580 return -EINVAL; 5581 } 5582 5583 /* mark this plane as a scaler user in crtc_state */ 5584 scaler_state->scaler_users |= (1 << scaler_user); 5585 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5586 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 5587 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 5588 scaler_state->scaler_users); 5589 5590 return 0; 5591 } 5592 5593 /** 5594 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 5595 * 5596 * @state: crtc's scaler state 5597 * 5598 * Return 5599 * 0 - scaler_usage updated successfully 5600 * error - requested scaling cannot be supported or other error condition 5601 */ 5602 int skl_update_scaler_crtc(struct intel_crtc_state *state) 5603 { 5604 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 5605 bool need_scaler = false; 5606 5607 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 5608 need_scaler = true; 5609 5610 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 5611 &state->scaler_state.scaler_id, 5612 state->pipe_src_w, state->pipe_src_h, 5613 adjusted_mode->crtc_hdisplay, 5614 adjusted_mode->crtc_vdisplay, NULL, need_scaler); 5615 } 5616 5617 /** 5618 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 5619 * @crtc_state: crtc's scaler state 5620 * @plane_state: atomic plane state to update 5621 * 5622 * Return 5623 * 0 - scaler_usage updated successfully 5624 * error - requested scaling cannot be supported or other error condition 5625 */ 5626 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 5627 struct intel_plane_state *plane_state) 5628 { 5629 struct intel_plane *intel_plane = 5630 to_intel_plane(plane_state->base.plane); 5631 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 5632 struct drm_framebuffer *fb = plane_state->base.fb; 5633 int ret; 5634 bool force_detach = !fb || !plane_state->base.visible; 5635 bool need_scaler = false; 5636 5637 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 5638 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && 5639 fb && drm_format_info_is_yuv_semiplanar(fb->format)) 5640 need_scaler = true; 5641 5642 ret = skl_update_scaler(crtc_state, force_detach, 5643 drm_plane_index(&intel_plane->base), 5644 &plane_state->scaler_id, 5645 drm_rect_width(&plane_state->base.src) >> 16, 5646 drm_rect_height(&plane_state->base.src) >> 16, 5647 drm_rect_width(&plane_state->base.dst), 5648 drm_rect_height(&plane_state->base.dst), 5649 fb ? fb->format : NULL, need_scaler); 5650 5651 if (ret || plane_state->scaler_id < 0) 5652 return ret; 5653 5654 /* check colorkey */ 5655 if (plane_state->ckey.flags) { 5656 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 5657 intel_plane->base.base.id, 5658 intel_plane->base.name); 5659 return -EINVAL; 5660 } 5661 5662 /* Check src format */ 5663 switch (fb->format->format) { 5664 case DRM_FORMAT_RGB565: 5665 case DRM_FORMAT_XBGR8888: 5666 case DRM_FORMAT_XRGB8888: 5667 case DRM_FORMAT_ABGR8888: 5668 case DRM_FORMAT_ARGB8888: 5669 case DRM_FORMAT_XRGB2101010: 5670 case DRM_FORMAT_XBGR2101010: 5671 case DRM_FORMAT_YUYV: 5672 case DRM_FORMAT_YVYU: 5673 case DRM_FORMAT_UYVY: 5674 case DRM_FORMAT_VYUY: 5675 case DRM_FORMAT_NV12: 5676 case DRM_FORMAT_P010: 5677 case DRM_FORMAT_P012: 5678 case DRM_FORMAT_P016: 5679 case DRM_FORMAT_Y210: 5680 case DRM_FORMAT_Y212: 5681 case DRM_FORMAT_Y216: 5682 case DRM_FORMAT_XVYU2101010: 5683 case DRM_FORMAT_XVYU12_16161616: 5684 case DRM_FORMAT_XVYU16161616: 5685 break; 5686 case DRM_FORMAT_XBGR16161616F: 5687 case DRM_FORMAT_ABGR16161616F: 5688 case DRM_FORMAT_XRGB16161616F: 5689 case DRM_FORMAT_ARGB16161616F: 5690 if (INTEL_GEN(dev_priv) >= 11) 5691 break; 5692 /* fall through */ 5693 default: 5694 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 5695 intel_plane->base.base.id, intel_plane->base.name, 5696 fb->base.id, fb->format->format); 5697 return -EINVAL; 5698 } 5699 5700 return 0; 5701 } 5702 5703 static void skylake_scaler_disable(struct intel_crtc *crtc) 5704 { 5705 int i; 5706 5707 for (i = 0; i < crtc->num_scalers; i++) 5708 skl_detach_scaler(crtc, i); 5709 } 5710 5711 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) 5712 { 5713 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5714 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5715 enum pipe pipe = crtc->pipe; 5716 const struct intel_crtc_scaler_state *scaler_state = 5717 &crtc_state->scaler_state; 5718 5719 if (crtc_state->pch_pfit.enabled) { 5720 u16 uv_rgb_hphase, uv_rgb_vphase; 5721 int pfit_w, pfit_h, hscale, vscale; 5722 int id; 5723 5724 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0)) 5725 return; 5726 5727 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; 5728 pfit_h = crtc_state->pch_pfit.size & 0xFFFF; 5729 5730 hscale = (crtc_state->pipe_src_w << 16) / pfit_w; 5731 vscale = (crtc_state->pipe_src_h << 16) / pfit_h; 5732 5733 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 5734 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 5735 5736 id = scaler_state->scaler_id; 5737 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 5738 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 5739 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id), 5740 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 5741 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), 5742 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 5743 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos); 5744 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size); 5745 } 5746 } 5747 5748 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) 5749 { 5750 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5751 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5752 enum pipe pipe = crtc->pipe; 5753 5754 if (crtc_state->pch_pfit.enabled) { 5755 /* Force use of hard-coded filter coefficients 5756 * as some pre-programmed values are broken, 5757 * e.g. x201. 5758 */ 5759 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 5760 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 5761 PF_PIPE_SEL_IVB(pipe)); 5762 else 5763 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 5764 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos); 5765 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size); 5766 } 5767 } 5768 5769 void hsw_enable_ips(const struct intel_crtc_state *crtc_state) 5770 { 5771 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5772 struct drm_device *dev = crtc->base.dev; 5773 struct drm_i915_private *dev_priv = to_i915(dev); 5774 5775 if (!crtc_state->ips_enabled) 5776 return; 5777 5778 /* 5779 * We can only enable IPS after we enable a plane and wait for a vblank 5780 * This function is called from post_plane_update, which is run after 5781 * a vblank wait. 5782 */ 5783 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 5784 5785 if (IS_BROADWELL(dev_priv)) { 5786 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 5787 IPS_ENABLE | IPS_PCODE_CONTROL)); 5788 /* Quoting Art Runyan: "its not safe to expect any particular 5789 * value in IPS_CTL bit 31 after enabling IPS through the 5790 * mailbox." Moreover, the mailbox may return a bogus state, 5791 * so we need to just enable it and continue on. 5792 */ 5793 } else { 5794 I915_WRITE(IPS_CTL, IPS_ENABLE); 5795 /* The bit only becomes 1 in the next vblank, so this wait here 5796 * is essentially intel_wait_for_vblank. If we don't have this 5797 * and don't wait for vblanks until the end of crtc_enable, then 5798 * the HW state readout code will complain that the expected 5799 * IPS_CTL value is not the one we read. */ 5800 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) 5801 DRM_ERROR("Timed out waiting for IPS enable\n"); 5802 } 5803 } 5804 5805 void hsw_disable_ips(const struct intel_crtc_state *crtc_state) 5806 { 5807 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5808 struct drm_device *dev = crtc->base.dev; 5809 struct drm_i915_private *dev_priv = to_i915(dev); 5810 5811 if (!crtc_state->ips_enabled) 5812 return; 5813 5814 if (IS_BROADWELL(dev_priv)) { 5815 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 5816 /* 5817 * Wait for PCODE to finish disabling IPS. The BSpec specified 5818 * 42ms timeout value leads to occasional timeouts so use 100ms 5819 * instead. 5820 */ 5821 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) 5822 DRM_ERROR("Timed out waiting for IPS disable\n"); 5823 } else { 5824 I915_WRITE(IPS_CTL, 0); 5825 POSTING_READ(IPS_CTL); 5826 } 5827 5828 /* We need to wait for a vblank before we can disable the plane. */ 5829 intel_wait_for_vblank(dev_priv, crtc->pipe); 5830 } 5831 5832 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 5833 { 5834 if (intel_crtc->overlay) 5835 (void) intel_overlay_switch_off(intel_crtc->overlay); 5836 5837 /* Let userspace switch the overlay on again. In most cases userspace 5838 * has to recompute where to put it anyway. 5839 */ 5840 } 5841 5842 /** 5843 * intel_post_enable_primary - Perform operations after enabling primary plane 5844 * @crtc: the CRTC whose primary plane was just enabled 5845 * @new_crtc_state: the enabling state 5846 * 5847 * Performs potentially sleeping operations that must be done after the primary 5848 * plane is enabled, such as updating FBC and IPS. Note that this may be 5849 * called due to an explicit primary plane update, or due to an implicit 5850 * re-enable that is caused when a sprite plane is updated to no longer 5851 * completely hide the primary plane. 5852 */ 5853 static void 5854 intel_post_enable_primary(struct drm_crtc *crtc, 5855 const struct intel_crtc_state *new_crtc_state) 5856 { 5857 struct drm_device *dev = crtc->dev; 5858 struct drm_i915_private *dev_priv = to_i915(dev); 5859 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5860 enum pipe pipe = intel_crtc->pipe; 5861 5862 /* 5863 * Gen2 reports pipe underruns whenever all planes are disabled. 5864 * So don't enable underrun reporting before at least some planes 5865 * are enabled. 5866 * FIXME: Need to fix the logic to work when we turn off all planes 5867 * but leave the pipe running. 5868 */ 5869 if (IS_GEN(dev_priv, 2)) 5870 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5871 5872 /* Underruns don't always raise interrupts, so check manually. */ 5873 intel_check_cpu_fifo_underruns(dev_priv); 5874 intel_check_pch_fifo_underruns(dev_priv); 5875 } 5876 5877 /* FIXME get rid of this and use pre_plane_update */ 5878 static void 5879 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 5880 { 5881 struct drm_device *dev = crtc->dev; 5882 struct drm_i915_private *dev_priv = to_i915(dev); 5883 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5884 enum pipe pipe = intel_crtc->pipe; 5885 5886 /* 5887 * Gen2 reports pipe underruns whenever all planes are disabled. 5888 * So disable underrun reporting before all the planes get disabled. 5889 */ 5890 if (IS_GEN(dev_priv, 2)) 5891 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5892 5893 hsw_disable_ips(to_intel_crtc_state(crtc->state)); 5894 5895 /* 5896 * Vblank time updates from the shadow to live plane control register 5897 * are blocked if the memory self-refresh mode is active at that 5898 * moment. So to make sure the plane gets truly disabled, disable 5899 * first the self-refresh mode. The self-refresh enable bit in turn 5900 * will be checked/applied by the HW only at the next frame start 5901 * event which is after the vblank start event, so we need to have a 5902 * wait-for-vblank between disabling the plane and the pipe. 5903 */ 5904 if (HAS_GMCH(dev_priv) && 5905 intel_set_memory_cxsr(dev_priv, false)) 5906 intel_wait_for_vblank(dev_priv, pipe); 5907 } 5908 5909 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, 5910 const struct intel_crtc_state *new_crtc_state) 5911 { 5912 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 5913 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5914 5915 if (!old_crtc_state->ips_enabled) 5916 return false; 5917 5918 if (needs_modeset(new_crtc_state)) 5919 return true; 5920 5921 /* 5922 * Workaround : Do not read or write the pipe palette/gamma data while 5923 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 5924 * 5925 * Disable IPS before we program the LUT. 5926 */ 5927 if (IS_HASWELL(dev_priv) && 5928 (new_crtc_state->base.color_mgmt_changed || 5929 new_crtc_state->update_pipe) && 5930 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 5931 return true; 5932 5933 return !new_crtc_state->ips_enabled; 5934 } 5935 5936 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, 5937 const struct intel_crtc_state *new_crtc_state) 5938 { 5939 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 5940 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5941 5942 if (!new_crtc_state->ips_enabled) 5943 return false; 5944 5945 if (needs_modeset(new_crtc_state)) 5946 return true; 5947 5948 /* 5949 * Workaround : Do not read or write the pipe palette/gamma data while 5950 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 5951 * 5952 * Re-enable IPS after the LUT has been programmed. 5953 */ 5954 if (IS_HASWELL(dev_priv) && 5955 (new_crtc_state->base.color_mgmt_changed || 5956 new_crtc_state->update_pipe) && 5957 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 5958 return true; 5959 5960 /* 5961 * We can't read out IPS on broadwell, assume the worst and 5962 * forcibly enable IPS on the first fastset. 5963 */ 5964 if (new_crtc_state->update_pipe && 5965 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED) 5966 return true; 5967 5968 return !old_crtc_state->ips_enabled; 5969 } 5970 5971 static bool needs_nv12_wa(struct drm_i915_private *dev_priv, 5972 const struct intel_crtc_state *crtc_state) 5973 { 5974 if (!crtc_state->nv12_planes) 5975 return false; 5976 5977 /* WA Display #0827: Gen9:all */ 5978 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) 5979 return true; 5980 5981 return false; 5982 } 5983 5984 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv, 5985 const struct intel_crtc_state *crtc_state) 5986 { 5987 /* Wa_2006604312:icl */ 5988 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv)) 5989 return true; 5990 5991 return false; 5992 } 5993 5994 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) 5995 { 5996 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 5997 struct drm_device *dev = crtc->base.dev; 5998 struct drm_i915_private *dev_priv = to_i915(dev); 5999 struct drm_atomic_state *state = old_crtc_state->base.state; 6000 struct intel_crtc_state *pipe_config = 6001 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state), 6002 crtc); 6003 struct drm_plane *primary = crtc->base.primary; 6004 struct drm_plane_state *old_primary_state = 6005 drm_atomic_get_old_plane_state(state, primary); 6006 6007 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); 6008 6009 if (pipe_config->update_wm_post && pipe_config->base.active) 6010 intel_update_watermarks(crtc); 6011 6012 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config)) 6013 hsw_enable_ips(pipe_config); 6014 6015 if (old_primary_state) { 6016 struct drm_plane_state *new_primary_state = 6017 drm_atomic_get_new_plane_state(state, primary); 6018 6019 intel_fbc_post_update(crtc); 6020 6021 if (new_primary_state->visible && 6022 (needs_modeset(pipe_config) || 6023 !old_primary_state->visible)) 6024 intel_post_enable_primary(&crtc->base, pipe_config); 6025 } 6026 6027 if (needs_nv12_wa(dev_priv, old_crtc_state) && 6028 !needs_nv12_wa(dev_priv, pipe_config)) 6029 skl_wa_827(dev_priv, crtc->pipe, false); 6030 6031 if (needs_scalerclk_wa(dev_priv, old_crtc_state) && 6032 !needs_scalerclk_wa(dev_priv, pipe_config)) 6033 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false); 6034 } 6035 6036 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, 6037 struct intel_crtc_state *pipe_config) 6038 { 6039 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 6040 struct drm_device *dev = crtc->base.dev; 6041 struct drm_i915_private *dev_priv = to_i915(dev); 6042 struct drm_atomic_state *state = old_crtc_state->base.state; 6043 struct drm_plane *primary = crtc->base.primary; 6044 struct drm_plane_state *old_primary_state = 6045 drm_atomic_get_old_plane_state(state, primary); 6046 bool modeset = needs_modeset(pipe_config); 6047 struct intel_atomic_state *intel_state = 6048 to_intel_atomic_state(state); 6049 6050 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config)) 6051 hsw_disable_ips(old_crtc_state); 6052 6053 if (old_primary_state) { 6054 struct intel_plane_state *new_primary_state = 6055 intel_atomic_get_new_plane_state(intel_state, 6056 to_intel_plane(primary)); 6057 6058 intel_fbc_pre_update(crtc, pipe_config, new_primary_state); 6059 /* 6060 * Gen2 reports pipe underruns whenever all planes are disabled. 6061 * So disable underrun reporting before all the planes get disabled. 6062 */ 6063 if (IS_GEN(dev_priv, 2) && old_primary_state->visible && 6064 (modeset || !new_primary_state->base.visible)) 6065 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 6066 } 6067 6068 /* Display WA 827 */ 6069 if (!needs_nv12_wa(dev_priv, old_crtc_state) && 6070 needs_nv12_wa(dev_priv, pipe_config)) 6071 skl_wa_827(dev_priv, crtc->pipe, true); 6072 6073 /* Wa_2006604312:icl */ 6074 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) && 6075 needs_scalerclk_wa(dev_priv, pipe_config)) 6076 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true); 6077 6078 /* 6079 * Vblank time updates from the shadow to live plane control register 6080 * are blocked if the memory self-refresh mode is active at that 6081 * moment. So to make sure the plane gets truly disabled, disable 6082 * first the self-refresh mode. The self-refresh enable bit in turn 6083 * will be checked/applied by the HW only at the next frame start 6084 * event which is after the vblank start event, so we need to have a 6085 * wait-for-vblank between disabling the plane and the pipe. 6086 */ 6087 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active && 6088 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 6089 intel_wait_for_vblank(dev_priv, crtc->pipe); 6090 6091 /* 6092 * IVB workaround: must disable low power watermarks for at least 6093 * one frame before enabling scaling. LP watermarks can be re-enabled 6094 * when scaling is disabled. 6095 * 6096 * WaCxSRDisabledForSpriteScaling:ivb 6097 */ 6098 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) && 6099 old_crtc_state->base.active) 6100 intel_wait_for_vblank(dev_priv, crtc->pipe); 6101 6102 /* 6103 * If we're doing a modeset, we're done. No need to do any pre-vblank 6104 * watermark programming here. 6105 */ 6106 if (needs_modeset(pipe_config)) 6107 return; 6108 6109 /* 6110 * For platforms that support atomic watermarks, program the 6111 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 6112 * will be the intermediate values that are safe for both pre- and 6113 * post- vblank; when vblank happens, the 'active' values will be set 6114 * to the final 'target' values and we'll do this again to get the 6115 * optimal watermarks. For gen9+ platforms, the values we program here 6116 * will be the final target values which will get automatically latched 6117 * at vblank time; no further programming will be necessary. 6118 * 6119 * If a platform hasn't been transitioned to atomic watermarks yet, 6120 * we'll continue to update watermarks the old way, if flags tell 6121 * us to. 6122 */ 6123 if (dev_priv->display.initial_watermarks != NULL) 6124 dev_priv->display.initial_watermarks(intel_state, 6125 pipe_config); 6126 else if (pipe_config->update_wm_pre) 6127 intel_update_watermarks(crtc); 6128 } 6129 6130 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 6131 struct intel_crtc *crtc) 6132 { 6133 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6134 const struct intel_crtc_state *new_crtc_state = 6135 intel_atomic_get_new_crtc_state(state, crtc); 6136 unsigned int update_mask = new_crtc_state->update_planes; 6137 const struct intel_plane_state *old_plane_state; 6138 struct intel_plane *plane; 6139 unsigned fb_bits = 0; 6140 int i; 6141 6142 intel_crtc_dpms_overlay_disable(crtc); 6143 6144 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 6145 if (crtc->pipe != plane->pipe || 6146 !(update_mask & BIT(plane->id))) 6147 continue; 6148 6149 intel_disable_plane(plane, new_crtc_state); 6150 6151 if (old_plane_state->base.visible) 6152 fb_bits |= plane->frontbuffer_bit; 6153 } 6154 6155 intel_frontbuffer_flip(dev_priv, fb_bits); 6156 } 6157 6158 /* 6159 * intel_connector_primary_encoder - get the primary encoder for a connector 6160 * @connector: connector for which to return the encoder 6161 * 6162 * Returns the primary encoder for a connector. There is a 1:1 mapping from 6163 * all connectors to their encoder, except for DP-MST connectors which have 6164 * both a virtual and a primary encoder. These DP-MST primary encoders can be 6165 * pointed to by as many DP-MST connectors as there are pipes. 6166 */ 6167 static struct intel_encoder * 6168 intel_connector_primary_encoder(struct intel_connector *connector) 6169 { 6170 struct intel_encoder *encoder; 6171 6172 if (connector->mst_port) 6173 return &dp_to_dig_port(connector->mst_port)->base; 6174 6175 encoder = intel_attached_encoder(&connector->base); 6176 WARN_ON(!encoder); 6177 6178 return encoder; 6179 } 6180 6181 static bool 6182 intel_connector_needs_modeset(struct intel_atomic_state *state, 6183 const struct drm_connector_state *old_conn_state, 6184 const struct drm_connector_state *new_conn_state) 6185 { 6186 struct intel_crtc *old_crtc = old_conn_state->crtc ? 6187 to_intel_crtc(old_conn_state->crtc) : NULL; 6188 struct intel_crtc *new_crtc = new_conn_state->crtc ? 6189 to_intel_crtc(new_conn_state->crtc) : NULL; 6190 6191 return new_crtc != old_crtc || 6192 (new_crtc && 6193 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc))); 6194 } 6195 6196 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 6197 { 6198 struct drm_connector_state *old_conn_state; 6199 struct drm_connector_state *new_conn_state; 6200 struct drm_connector *conn; 6201 int i; 6202 6203 for_each_oldnew_connector_in_state(&state->base, conn, 6204 old_conn_state, new_conn_state, i) { 6205 struct intel_encoder *encoder; 6206 struct intel_crtc *crtc; 6207 6208 if (!intel_connector_needs_modeset(state, 6209 old_conn_state, 6210 new_conn_state)) 6211 continue; 6212 6213 encoder = intel_connector_primary_encoder(to_intel_connector(conn)); 6214 if (!encoder->update_prepare) 6215 continue; 6216 6217 crtc = new_conn_state->crtc ? 6218 to_intel_crtc(new_conn_state->crtc) : NULL; 6219 encoder->update_prepare(state, encoder, crtc); 6220 } 6221 } 6222 6223 static void intel_encoders_update_complete(struct intel_atomic_state *state) 6224 { 6225 struct drm_connector_state *old_conn_state; 6226 struct drm_connector_state *new_conn_state; 6227 struct drm_connector *conn; 6228 int i; 6229 6230 for_each_oldnew_connector_in_state(&state->base, conn, 6231 old_conn_state, new_conn_state, i) { 6232 struct intel_encoder *encoder; 6233 struct intel_crtc *crtc; 6234 6235 if (!intel_connector_needs_modeset(state, 6236 old_conn_state, 6237 new_conn_state)) 6238 continue; 6239 6240 encoder = intel_connector_primary_encoder(to_intel_connector(conn)); 6241 if (!encoder->update_complete) 6242 continue; 6243 6244 crtc = new_conn_state->crtc ? 6245 to_intel_crtc(new_conn_state->crtc) : NULL; 6246 encoder->update_complete(state, encoder, crtc); 6247 } 6248 } 6249 6250 static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc, 6251 struct intel_crtc_state *crtc_state, 6252 struct intel_atomic_state *state) 6253 { 6254 struct drm_connector_state *conn_state; 6255 struct drm_connector *conn; 6256 int i; 6257 6258 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6259 struct intel_encoder *encoder = 6260 to_intel_encoder(conn_state->best_encoder); 6261 6262 if (conn_state->crtc != &crtc->base) 6263 continue; 6264 6265 if (encoder->pre_pll_enable) 6266 encoder->pre_pll_enable(encoder, crtc_state, conn_state); 6267 } 6268 } 6269 6270 static void intel_encoders_pre_enable(struct intel_crtc *crtc, 6271 struct intel_crtc_state *crtc_state, 6272 struct intel_atomic_state *state) 6273 { 6274 struct drm_connector_state *conn_state; 6275 struct drm_connector *conn; 6276 int i; 6277 6278 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6279 struct intel_encoder *encoder = 6280 to_intel_encoder(conn_state->best_encoder); 6281 6282 if (conn_state->crtc != &crtc->base) 6283 continue; 6284 6285 if (encoder->pre_enable) 6286 encoder->pre_enable(encoder, crtc_state, conn_state); 6287 } 6288 } 6289 6290 static void intel_encoders_enable(struct intel_crtc *crtc, 6291 struct intel_crtc_state *crtc_state, 6292 struct intel_atomic_state *state) 6293 { 6294 struct drm_connector_state *conn_state; 6295 struct drm_connector *conn; 6296 int i; 6297 6298 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6299 struct intel_encoder *encoder = 6300 to_intel_encoder(conn_state->best_encoder); 6301 6302 if (conn_state->crtc != &crtc->base) 6303 continue; 6304 6305 if (encoder->enable) 6306 encoder->enable(encoder, crtc_state, conn_state); 6307 intel_opregion_notify_encoder(encoder, true); 6308 } 6309 } 6310 6311 static void intel_encoders_disable(struct intel_crtc *crtc, 6312 struct intel_crtc_state *old_crtc_state, 6313 struct intel_atomic_state *state) 6314 { 6315 struct drm_connector_state *old_conn_state; 6316 struct drm_connector *conn; 6317 int i; 6318 6319 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6320 struct intel_encoder *encoder = 6321 to_intel_encoder(old_conn_state->best_encoder); 6322 6323 if (old_conn_state->crtc != &crtc->base) 6324 continue; 6325 6326 intel_opregion_notify_encoder(encoder, false); 6327 if (encoder->disable) 6328 encoder->disable(encoder, old_crtc_state, old_conn_state); 6329 } 6330 } 6331 6332 static void intel_encoders_post_disable(struct intel_crtc *crtc, 6333 struct intel_crtc_state *old_crtc_state, 6334 struct intel_atomic_state *state) 6335 { 6336 struct drm_connector_state *old_conn_state; 6337 struct drm_connector *conn; 6338 int i; 6339 6340 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6341 struct intel_encoder *encoder = 6342 to_intel_encoder(old_conn_state->best_encoder); 6343 6344 if (old_conn_state->crtc != &crtc->base) 6345 continue; 6346 6347 if (encoder->post_disable) 6348 encoder->post_disable(encoder, old_crtc_state, old_conn_state); 6349 } 6350 } 6351 6352 static void intel_encoders_post_pll_disable(struct intel_crtc *crtc, 6353 struct intel_crtc_state *old_crtc_state, 6354 struct intel_atomic_state *state) 6355 { 6356 struct drm_connector_state *old_conn_state; 6357 struct drm_connector *conn; 6358 int i; 6359 6360 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6361 struct intel_encoder *encoder = 6362 to_intel_encoder(old_conn_state->best_encoder); 6363 6364 if (old_conn_state->crtc != &crtc->base) 6365 continue; 6366 6367 if (encoder->post_pll_disable) 6368 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); 6369 } 6370 } 6371 6372 static void intel_encoders_update_pipe(struct intel_crtc *crtc, 6373 struct intel_crtc_state *crtc_state, 6374 struct intel_atomic_state *state) 6375 { 6376 struct drm_connector_state *conn_state; 6377 struct drm_connector *conn; 6378 int i; 6379 6380 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6381 struct intel_encoder *encoder = 6382 to_intel_encoder(conn_state->best_encoder); 6383 6384 if (conn_state->crtc != &crtc->base) 6385 continue; 6386 6387 if (encoder->update_pipe) 6388 encoder->update_pipe(encoder, crtc_state, conn_state); 6389 } 6390 } 6391 6392 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 6393 { 6394 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6395 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 6396 6397 plane->disable_plane(plane, crtc_state); 6398 } 6399 6400 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, 6401 struct intel_atomic_state *state) 6402 { 6403 struct drm_crtc *crtc = pipe_config->base.crtc; 6404 struct drm_device *dev = crtc->dev; 6405 struct drm_i915_private *dev_priv = to_i915(dev); 6406 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6407 enum pipe pipe = intel_crtc->pipe; 6408 6409 if (WARN_ON(intel_crtc->active)) 6410 return; 6411 6412 /* 6413 * Sometimes spurious CPU pipe underruns happen during FDI 6414 * training, at least with VGA+HDMI cloning. Suppress them. 6415 * 6416 * On ILK we get an occasional spurious CPU pipe underruns 6417 * between eDP port A enable and vdd enable. Also PCH port 6418 * enable seems to result in the occasional CPU pipe underrun. 6419 * 6420 * Spurious PCH underruns also occur during PCH enabling. 6421 */ 6422 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6423 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6424 6425 if (pipe_config->has_pch_encoder) 6426 intel_prepare_shared_dpll(pipe_config); 6427 6428 if (intel_crtc_has_dp_encoder(pipe_config)) 6429 intel_dp_set_m_n(pipe_config, M1_N1); 6430 6431 intel_set_pipe_timings(pipe_config); 6432 intel_set_pipe_src_size(pipe_config); 6433 6434 if (pipe_config->has_pch_encoder) { 6435 intel_cpu_transcoder_set_m_n(pipe_config, 6436 &pipe_config->fdi_m_n, NULL); 6437 } 6438 6439 ironlake_set_pipeconf(pipe_config); 6440 6441 intel_crtc->active = true; 6442 6443 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6444 6445 if (pipe_config->has_pch_encoder) { 6446 /* Note: FDI PLL enabling _must_ be done before we enable the 6447 * cpu pipes, hence this is separate from all the other fdi/pch 6448 * enabling. */ 6449 ironlake_fdi_pll_enable(pipe_config); 6450 } else { 6451 assert_fdi_tx_disabled(dev_priv, pipe); 6452 assert_fdi_rx_disabled(dev_priv, pipe); 6453 } 6454 6455 ironlake_pfit_enable(pipe_config); 6456 6457 /* 6458 * On ILK+ LUT must be loaded before the pipe is running but with 6459 * clocks enabled 6460 */ 6461 intel_color_load_luts(pipe_config); 6462 intel_color_commit(pipe_config); 6463 /* update DSPCNTR to configure gamma for pipe bottom color */ 6464 intel_disable_primary_plane(pipe_config); 6465 6466 if (dev_priv->display.initial_watermarks != NULL) 6467 dev_priv->display.initial_watermarks(state, pipe_config); 6468 intel_enable_pipe(pipe_config); 6469 6470 if (pipe_config->has_pch_encoder) 6471 ironlake_pch_enable(state, pipe_config); 6472 6473 assert_vblank_disabled(crtc); 6474 intel_crtc_vblank_on(pipe_config); 6475 6476 intel_encoders_enable(intel_crtc, pipe_config, state); 6477 6478 if (HAS_PCH_CPT(dev_priv)) 6479 cpt_verify_modeset(dev, intel_crtc->pipe); 6480 6481 /* 6482 * Must wait for vblank to avoid spurious PCH FIFO underruns. 6483 * And a second vblank wait is needed at least on ILK with 6484 * some interlaced HDMI modes. Let's do the double wait always 6485 * in case there are more corner cases we don't know about. 6486 */ 6487 if (pipe_config->has_pch_encoder) { 6488 intel_wait_for_vblank(dev_priv, pipe); 6489 intel_wait_for_vblank(dev_priv, pipe); 6490 } 6491 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6492 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6493 } 6494 6495 /* IPS only exists on ULT machines and is tied to pipe A. */ 6496 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 6497 { 6498 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 6499 } 6500 6501 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 6502 enum pipe pipe, bool apply) 6503 { 6504 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe)); 6505 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 6506 6507 if (apply) 6508 val |= mask; 6509 else 6510 val &= ~mask; 6511 6512 I915_WRITE(CLKGATE_DIS_PSL(pipe), val); 6513 } 6514 6515 static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 6516 { 6517 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6518 enum pipe pipe = crtc->pipe; 6519 u32 val; 6520 6521 val = MBUS_DBOX_A_CREDIT(2); 6522 6523 if (INTEL_GEN(dev_priv) >= 12) { 6524 val |= MBUS_DBOX_BW_CREDIT(2); 6525 val |= MBUS_DBOX_B_CREDIT(12); 6526 } else { 6527 val |= MBUS_DBOX_BW_CREDIT(1); 6528 val |= MBUS_DBOX_B_CREDIT(8); 6529 } 6530 6531 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); 6532 } 6533 6534 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, 6535 struct intel_atomic_state *state) 6536 { 6537 struct drm_crtc *crtc = pipe_config->base.crtc; 6538 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6539 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6540 enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe; 6541 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 6542 bool psl_clkgate_wa; 6543 6544 if (WARN_ON(intel_crtc->active)) 6545 return; 6546 6547 intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); 6548 6549 if (pipe_config->shared_dpll) 6550 intel_enable_shared_dpll(pipe_config); 6551 6552 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6553 6554 if (intel_crtc_has_dp_encoder(pipe_config)) 6555 intel_dp_set_m_n(pipe_config, M1_N1); 6556 6557 if (!transcoder_is_dsi(cpu_transcoder)) 6558 intel_set_pipe_timings(pipe_config); 6559 6560 if (INTEL_GEN(dev_priv) >= 11) 6561 icl_enable_trans_port_sync(pipe_config); 6562 6563 intel_set_pipe_src_size(pipe_config); 6564 6565 if (cpu_transcoder != TRANSCODER_EDP && 6566 !transcoder_is_dsi(cpu_transcoder)) { 6567 I915_WRITE(PIPE_MULT(cpu_transcoder), 6568 pipe_config->pixel_multiplier - 1); 6569 } 6570 6571 if (pipe_config->has_pch_encoder) { 6572 intel_cpu_transcoder_set_m_n(pipe_config, 6573 &pipe_config->fdi_m_n, NULL); 6574 } 6575 6576 if (!transcoder_is_dsi(cpu_transcoder)) 6577 haswell_set_pipeconf(pipe_config); 6578 6579 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6580 bdw_set_pipemisc(pipe_config); 6581 6582 intel_crtc->active = true; 6583 6584 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 6585 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 6586 pipe_config->pch_pfit.enabled; 6587 if (psl_clkgate_wa) 6588 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 6589 6590 if (INTEL_GEN(dev_priv) >= 9) 6591 skylake_pfit_enable(pipe_config); 6592 else 6593 ironlake_pfit_enable(pipe_config); 6594 6595 /* 6596 * On ILK+ LUT must be loaded before the pipe is running but with 6597 * clocks enabled 6598 */ 6599 intel_color_load_luts(pipe_config); 6600 intel_color_commit(pipe_config); 6601 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 6602 if (INTEL_GEN(dev_priv) < 9) 6603 intel_disable_primary_plane(pipe_config); 6604 6605 if (INTEL_GEN(dev_priv) >= 11) 6606 icl_set_pipe_chicken(intel_crtc); 6607 6608 if (!transcoder_is_dsi(cpu_transcoder)) 6609 intel_ddi_enable_transcoder_func(pipe_config); 6610 6611 if (dev_priv->display.initial_watermarks != NULL) 6612 dev_priv->display.initial_watermarks(state, pipe_config); 6613 6614 if (INTEL_GEN(dev_priv) >= 11) 6615 icl_pipe_mbus_enable(intel_crtc); 6616 6617 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 6618 if (!transcoder_is_dsi(cpu_transcoder)) 6619 intel_enable_pipe(pipe_config); 6620 6621 if (pipe_config->has_pch_encoder) 6622 lpt_pch_enable(state, pipe_config); 6623 6624 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) 6625 intel_ddi_set_vc_payload_alloc(pipe_config, true); 6626 6627 assert_vblank_disabled(crtc); 6628 intel_crtc_vblank_on(pipe_config); 6629 6630 intel_encoders_enable(intel_crtc, pipe_config, state); 6631 6632 if (psl_clkgate_wa) { 6633 intel_wait_for_vblank(dev_priv, pipe); 6634 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 6635 } 6636 6637 /* If we change the relative order between pipe/planes enabling, we need 6638 * to change the workaround. */ 6639 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 6640 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 6641 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 6642 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 6643 } 6644 } 6645 6646 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) 6647 { 6648 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 6649 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6650 enum pipe pipe = crtc->pipe; 6651 6652 /* To avoid upsetting the power well on haswell only disable the pfit if 6653 * it's in use. The hw state code will make sure we get this right. */ 6654 if (old_crtc_state->pch_pfit.enabled) { 6655 I915_WRITE(PF_CTL(pipe), 0); 6656 I915_WRITE(PF_WIN_POS(pipe), 0); 6657 I915_WRITE(PF_WIN_SZ(pipe), 0); 6658 } 6659 } 6660 6661 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, 6662 struct intel_atomic_state *state) 6663 { 6664 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6665 struct drm_device *dev = crtc->dev; 6666 struct drm_i915_private *dev_priv = to_i915(dev); 6667 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6668 enum pipe pipe = intel_crtc->pipe; 6669 6670 /* 6671 * Sometimes spurious CPU pipe underruns happen when the 6672 * pipe is already disabled, but FDI RX/TX is still enabled. 6673 * Happens at least with VGA+HDMI cloning. Suppress them. 6674 */ 6675 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6676 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6677 6678 intel_encoders_disable(intel_crtc, old_crtc_state, state); 6679 6680 drm_crtc_vblank_off(crtc); 6681 assert_vblank_disabled(crtc); 6682 6683 intel_disable_pipe(old_crtc_state); 6684 6685 ironlake_pfit_disable(old_crtc_state); 6686 6687 if (old_crtc_state->has_pch_encoder) 6688 ironlake_fdi_disable(crtc); 6689 6690 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 6691 6692 if (old_crtc_state->has_pch_encoder) { 6693 ironlake_disable_pch_transcoder(dev_priv, pipe); 6694 6695 if (HAS_PCH_CPT(dev_priv)) { 6696 i915_reg_t reg; 6697 u32 temp; 6698 6699 /* disable TRANS_DP_CTL */ 6700 reg = TRANS_DP_CTL(pipe); 6701 temp = I915_READ(reg); 6702 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 6703 TRANS_DP_PORT_SEL_MASK); 6704 temp |= TRANS_DP_PORT_SEL_NONE; 6705 I915_WRITE(reg, temp); 6706 6707 /* disable DPLL_SEL */ 6708 temp = I915_READ(PCH_DPLL_SEL); 6709 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 6710 I915_WRITE(PCH_DPLL_SEL, temp); 6711 } 6712 6713 ironlake_fdi_pll_disable(intel_crtc); 6714 } 6715 6716 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6717 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6718 } 6719 6720 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, 6721 struct intel_atomic_state *state) 6722 { 6723 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6724 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6725 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6726 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 6727 6728 intel_encoders_disable(intel_crtc, old_crtc_state, state); 6729 6730 drm_crtc_vblank_off(crtc); 6731 assert_vblank_disabled(crtc); 6732 6733 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 6734 if (!transcoder_is_dsi(cpu_transcoder)) 6735 intel_disable_pipe(old_crtc_state); 6736 6737 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) 6738 intel_ddi_set_vc_payload_alloc(old_crtc_state, false); 6739 6740 if (INTEL_GEN(dev_priv) >= 11) 6741 icl_disable_transcoder_port_sync(old_crtc_state); 6742 6743 if (!transcoder_is_dsi(cpu_transcoder)) 6744 intel_ddi_disable_transcoder_func(old_crtc_state); 6745 6746 intel_dsc_disable(old_crtc_state); 6747 6748 if (INTEL_GEN(dev_priv) >= 9) 6749 skylake_scaler_disable(intel_crtc); 6750 else 6751 ironlake_pfit_disable(old_crtc_state); 6752 6753 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 6754 6755 intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); 6756 } 6757 6758 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 6759 { 6760 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6761 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6762 6763 if (!crtc_state->gmch_pfit.control) 6764 return; 6765 6766 /* 6767 * The panel fitter should only be adjusted whilst the pipe is disabled, 6768 * according to register description and PRM. 6769 */ 6770 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 6771 assert_pipe_disabled(dev_priv, crtc->pipe); 6772 6773 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); 6774 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control); 6775 6776 /* Border color in case we don't scale up to the full screen. Black by 6777 * default, change to something else for debugging. */ 6778 I915_WRITE(BCLRPAT(crtc->pipe), 0); 6779 } 6780 6781 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 6782 { 6783 if (phy == PHY_NONE) 6784 return false; 6785 6786 if (IS_ELKHARTLAKE(dev_priv)) 6787 return phy <= PHY_C; 6788 6789 if (INTEL_GEN(dev_priv) >= 11) 6790 return phy <= PHY_B; 6791 6792 return false; 6793 } 6794 6795 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 6796 { 6797 if (INTEL_GEN(dev_priv) >= 12) 6798 return phy >= PHY_D && phy <= PHY_I; 6799 6800 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) 6801 return phy >= PHY_C && phy <= PHY_F; 6802 6803 return false; 6804 } 6805 6806 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 6807 { 6808 if (IS_ELKHARTLAKE(i915) && port == PORT_D) 6809 return PHY_A; 6810 6811 return (enum phy)port; 6812 } 6813 6814 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 6815 { 6816 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 6817 return PORT_TC_NONE; 6818 6819 if (INTEL_GEN(dev_priv) >= 12) 6820 return port - PORT_D; 6821 6822 return port - PORT_C; 6823 } 6824 6825 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 6826 { 6827 switch (port) { 6828 case PORT_A: 6829 return POWER_DOMAIN_PORT_DDI_A_LANES; 6830 case PORT_B: 6831 return POWER_DOMAIN_PORT_DDI_B_LANES; 6832 case PORT_C: 6833 return POWER_DOMAIN_PORT_DDI_C_LANES; 6834 case PORT_D: 6835 return POWER_DOMAIN_PORT_DDI_D_LANES; 6836 case PORT_E: 6837 return POWER_DOMAIN_PORT_DDI_E_LANES; 6838 case PORT_F: 6839 return POWER_DOMAIN_PORT_DDI_F_LANES; 6840 case PORT_G: 6841 return POWER_DOMAIN_PORT_DDI_G_LANES; 6842 default: 6843 MISSING_CASE(port); 6844 return POWER_DOMAIN_PORT_OTHER; 6845 } 6846 } 6847 6848 enum intel_display_power_domain 6849 intel_aux_power_domain(struct intel_digital_port *dig_port) 6850 { 6851 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 6852 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 6853 6854 if (intel_phy_is_tc(dev_priv, phy) && 6855 dig_port->tc_mode == TC_PORT_TBT_ALT) { 6856 switch (dig_port->aux_ch) { 6857 case AUX_CH_C: 6858 return POWER_DOMAIN_AUX_C_TBT; 6859 case AUX_CH_D: 6860 return POWER_DOMAIN_AUX_D_TBT; 6861 case AUX_CH_E: 6862 return POWER_DOMAIN_AUX_E_TBT; 6863 case AUX_CH_F: 6864 return POWER_DOMAIN_AUX_F_TBT; 6865 case AUX_CH_G: 6866 return POWER_DOMAIN_AUX_G_TBT; 6867 default: 6868 MISSING_CASE(dig_port->aux_ch); 6869 return POWER_DOMAIN_AUX_C_TBT; 6870 } 6871 } 6872 6873 switch (dig_port->aux_ch) { 6874 case AUX_CH_A: 6875 return POWER_DOMAIN_AUX_A; 6876 case AUX_CH_B: 6877 return POWER_DOMAIN_AUX_B; 6878 case AUX_CH_C: 6879 return POWER_DOMAIN_AUX_C; 6880 case AUX_CH_D: 6881 return POWER_DOMAIN_AUX_D; 6882 case AUX_CH_E: 6883 return POWER_DOMAIN_AUX_E; 6884 case AUX_CH_F: 6885 return POWER_DOMAIN_AUX_F; 6886 case AUX_CH_G: 6887 return POWER_DOMAIN_AUX_G; 6888 default: 6889 MISSING_CASE(dig_port->aux_ch); 6890 return POWER_DOMAIN_AUX_A; 6891 } 6892 } 6893 6894 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 6895 { 6896 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6897 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6898 struct drm_encoder *encoder; 6899 enum pipe pipe = crtc->pipe; 6900 u64 mask; 6901 enum transcoder transcoder = crtc_state->cpu_transcoder; 6902 6903 if (!crtc_state->base.active) 6904 return 0; 6905 6906 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 6907 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); 6908 if (crtc_state->pch_pfit.enabled || 6909 crtc_state->pch_pfit.force_thru) 6910 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 6911 6912 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 6913 crtc_state->base.encoder_mask) { 6914 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6915 6916 mask |= BIT_ULL(intel_encoder->power_domain); 6917 } 6918 6919 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 6920 mask |= BIT_ULL(POWER_DOMAIN_AUDIO); 6921 6922 if (crtc_state->shared_dpll) 6923 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 6924 6925 return mask; 6926 } 6927 6928 static u64 6929 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 6930 { 6931 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6932 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6933 enum intel_display_power_domain domain; 6934 u64 domains, new_domains, old_domains; 6935 6936 old_domains = crtc->enabled_power_domains; 6937 crtc->enabled_power_domains = new_domains = 6938 get_crtc_power_domains(crtc_state); 6939 6940 domains = new_domains & ~old_domains; 6941 6942 for_each_power_domain(domain, domains) 6943 intel_display_power_get(dev_priv, domain); 6944 6945 return old_domains & ~new_domains; 6946 } 6947 6948 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 6949 u64 domains) 6950 { 6951 enum intel_display_power_domain domain; 6952 6953 for_each_power_domain(domain, domains) 6954 intel_display_power_put_unchecked(dev_priv, domain); 6955 } 6956 6957 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, 6958 struct intel_atomic_state *state) 6959 { 6960 struct drm_crtc *crtc = pipe_config->base.crtc; 6961 struct drm_device *dev = crtc->dev; 6962 struct drm_i915_private *dev_priv = to_i915(dev); 6963 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6964 enum pipe pipe = intel_crtc->pipe; 6965 6966 if (WARN_ON(intel_crtc->active)) 6967 return; 6968 6969 if (intel_crtc_has_dp_encoder(pipe_config)) 6970 intel_dp_set_m_n(pipe_config, M1_N1); 6971 6972 intel_set_pipe_timings(pipe_config); 6973 intel_set_pipe_src_size(pipe_config); 6974 6975 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 6976 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6977 I915_WRITE(CHV_CANVAS(pipe), 0); 6978 } 6979 6980 i9xx_set_pipeconf(pipe_config); 6981 6982 intel_crtc->active = true; 6983 6984 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6985 6986 intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); 6987 6988 if (IS_CHERRYVIEW(dev_priv)) { 6989 chv_prepare_pll(intel_crtc, pipe_config); 6990 chv_enable_pll(intel_crtc, pipe_config); 6991 } else { 6992 vlv_prepare_pll(intel_crtc, pipe_config); 6993 vlv_enable_pll(intel_crtc, pipe_config); 6994 } 6995 6996 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6997 6998 i9xx_pfit_enable(pipe_config); 6999 7000 intel_color_load_luts(pipe_config); 7001 intel_color_commit(pipe_config); 7002 /* update DSPCNTR to configure gamma for pipe bottom color */ 7003 intel_disable_primary_plane(pipe_config); 7004 7005 dev_priv->display.initial_watermarks(state, pipe_config); 7006 intel_enable_pipe(pipe_config); 7007 7008 assert_vblank_disabled(crtc); 7009 intel_crtc_vblank_on(pipe_config); 7010 7011 intel_encoders_enable(intel_crtc, pipe_config, state); 7012 } 7013 7014 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) 7015 { 7016 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7017 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7018 7019 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0); 7020 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1); 7021 } 7022 7023 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, 7024 struct intel_atomic_state *state) 7025 { 7026 struct drm_crtc *crtc = pipe_config->base.crtc; 7027 struct drm_device *dev = crtc->dev; 7028 struct drm_i915_private *dev_priv = to_i915(dev); 7029 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7030 enum pipe pipe = intel_crtc->pipe; 7031 7032 if (WARN_ON(intel_crtc->active)) 7033 return; 7034 7035 i9xx_set_pll_dividers(pipe_config); 7036 7037 if (intel_crtc_has_dp_encoder(pipe_config)) 7038 intel_dp_set_m_n(pipe_config, M1_N1); 7039 7040 intel_set_pipe_timings(pipe_config); 7041 intel_set_pipe_src_size(pipe_config); 7042 7043 i9xx_set_pipeconf(pipe_config); 7044 7045 intel_crtc->active = true; 7046 7047 if (!IS_GEN(dev_priv, 2)) 7048 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7049 7050 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 7051 7052 i9xx_enable_pll(intel_crtc, pipe_config); 7053 7054 i9xx_pfit_enable(pipe_config); 7055 7056 intel_color_load_luts(pipe_config); 7057 intel_color_commit(pipe_config); 7058 /* update DSPCNTR to configure gamma for pipe bottom color */ 7059 intel_disable_primary_plane(pipe_config); 7060 7061 if (dev_priv->display.initial_watermarks != NULL) 7062 dev_priv->display.initial_watermarks(state, 7063 pipe_config); 7064 else 7065 intel_update_watermarks(intel_crtc); 7066 intel_enable_pipe(pipe_config); 7067 7068 assert_vblank_disabled(crtc); 7069 intel_crtc_vblank_on(pipe_config); 7070 7071 intel_encoders_enable(intel_crtc, pipe_config, state); 7072 } 7073 7074 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 7075 { 7076 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 7077 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7078 7079 if (!old_crtc_state->gmch_pfit.control) 7080 return; 7081 7082 assert_pipe_disabled(dev_priv, crtc->pipe); 7083 7084 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n", 7085 I915_READ(PFIT_CONTROL)); 7086 I915_WRITE(PFIT_CONTROL, 0); 7087 } 7088 7089 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, 7090 struct intel_atomic_state *state) 7091 { 7092 struct drm_crtc *crtc = old_crtc_state->base.crtc; 7093 struct drm_device *dev = crtc->dev; 7094 struct drm_i915_private *dev_priv = to_i915(dev); 7095 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7096 enum pipe pipe = intel_crtc->pipe; 7097 7098 /* 7099 * On gen2 planes are double buffered but the pipe isn't, so we must 7100 * wait for planes to fully turn off before disabling the pipe. 7101 */ 7102 if (IS_GEN(dev_priv, 2)) 7103 intel_wait_for_vblank(dev_priv, pipe); 7104 7105 intel_encoders_disable(intel_crtc, old_crtc_state, state); 7106 7107 drm_crtc_vblank_off(crtc); 7108 assert_vblank_disabled(crtc); 7109 7110 intel_disable_pipe(old_crtc_state); 7111 7112 i9xx_pfit_disable(old_crtc_state); 7113 7114 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 7115 7116 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 7117 if (IS_CHERRYVIEW(dev_priv)) 7118 chv_disable_pll(dev_priv, pipe); 7119 else if (IS_VALLEYVIEW(dev_priv)) 7120 vlv_disable_pll(dev_priv, pipe); 7121 else 7122 i9xx_disable_pll(old_crtc_state); 7123 } 7124 7125 intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); 7126 7127 if (!IS_GEN(dev_priv, 2)) 7128 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7129 7130 if (!dev_priv->display.initial_watermarks) 7131 intel_update_watermarks(intel_crtc); 7132 7133 /* clock the pipe down to 640x480@60 to potentially save power */ 7134 if (IS_I830(dev_priv)) 7135 i830_enable_pipe(dev_priv, pipe); 7136 } 7137 7138 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, 7139 struct drm_modeset_acquire_ctx *ctx) 7140 { 7141 struct intel_encoder *encoder; 7142 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7143 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 7144 struct intel_bw_state *bw_state = 7145 to_intel_bw_state(dev_priv->bw_obj.state); 7146 enum intel_display_power_domain domain; 7147 struct intel_plane *plane; 7148 u64 domains; 7149 struct drm_atomic_state *state; 7150 struct intel_crtc_state *crtc_state; 7151 int ret; 7152 7153 if (!intel_crtc->active) 7154 return; 7155 7156 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) { 7157 const struct intel_plane_state *plane_state = 7158 to_intel_plane_state(plane->base.state); 7159 7160 if (plane_state->base.visible) 7161 intel_plane_disable_noatomic(intel_crtc, plane); 7162 } 7163 7164 state = drm_atomic_state_alloc(crtc->dev); 7165 if (!state) { 7166 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 7167 crtc->base.id, crtc->name); 7168 return; 7169 } 7170 7171 state->acquire_ctx = ctx; 7172 7173 /* Everything's already locked, -EDEADLK can't happen. */ 7174 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 7175 ret = drm_atomic_add_affected_connectors(state, crtc); 7176 7177 WARN_ON(IS_ERR(crtc_state) || ret); 7178 7179 dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state)); 7180 7181 drm_atomic_state_put(state); 7182 7183 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 7184 crtc->base.id, crtc->name); 7185 7186 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 7187 crtc->state->active = false; 7188 intel_crtc->active = false; 7189 crtc->enabled = false; 7190 crtc->state->connector_mask = 0; 7191 crtc->state->encoder_mask = 0; 7192 7193 for_each_encoder_on_crtc(crtc->dev, crtc, encoder) 7194 encoder->base.crtc = NULL; 7195 7196 intel_fbc_disable(intel_crtc); 7197 intel_update_watermarks(intel_crtc); 7198 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state)); 7199 7200 domains = intel_crtc->enabled_power_domains; 7201 for_each_power_domain(domain, domains) 7202 intel_display_power_put_unchecked(dev_priv, domain); 7203 intel_crtc->enabled_power_domains = 0; 7204 7205 dev_priv->active_pipes &= ~BIT(intel_crtc->pipe); 7206 dev_priv->min_cdclk[intel_crtc->pipe] = 0; 7207 dev_priv->min_voltage_level[intel_crtc->pipe] = 0; 7208 7209 bw_state->data_rate[intel_crtc->pipe] = 0; 7210 bw_state->num_active_planes[intel_crtc->pipe] = 0; 7211 } 7212 7213 /* 7214 * turn all crtc's off, but do not adjust state 7215 * This has to be paired with a call to intel_modeset_setup_hw_state. 7216 */ 7217 int intel_display_suspend(struct drm_device *dev) 7218 { 7219 struct drm_i915_private *dev_priv = to_i915(dev); 7220 struct drm_atomic_state *state; 7221 int ret; 7222 7223 state = drm_atomic_helper_suspend(dev); 7224 ret = PTR_ERR_OR_ZERO(state); 7225 if (ret) 7226 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 7227 else 7228 dev_priv->modeset_restore_state = state; 7229 return ret; 7230 } 7231 7232 void intel_encoder_destroy(struct drm_encoder *encoder) 7233 { 7234 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7235 7236 drm_encoder_cleanup(encoder); 7237 kfree(intel_encoder); 7238 } 7239 7240 /* Cross check the actual hw state with our own modeset state tracking (and it's 7241 * internal consistency). */ 7242 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 7243 struct drm_connector_state *conn_state) 7244 { 7245 struct intel_connector *connector = to_intel_connector(conn_state->connector); 7246 7247 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 7248 connector->base.base.id, 7249 connector->base.name); 7250 7251 if (connector->get_hw_state(connector)) { 7252 struct intel_encoder *encoder = connector->encoder; 7253 7254 I915_STATE_WARN(!crtc_state, 7255 "connector enabled without attached crtc\n"); 7256 7257 if (!crtc_state) 7258 return; 7259 7260 I915_STATE_WARN(!crtc_state->base.active, 7261 "connector is active, but attached crtc isn't\n"); 7262 7263 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 7264 return; 7265 7266 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 7267 "atomic encoder doesn't match attached encoder\n"); 7268 7269 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 7270 "attached encoder crtc differs from connector crtc\n"); 7271 } else { 7272 I915_STATE_WARN(crtc_state && crtc_state->base.active, 7273 "attached crtc is active, but connector isn't\n"); 7274 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 7275 "best encoder set without crtc!\n"); 7276 } 7277 } 7278 7279 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 7280 { 7281 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 7282 return crtc_state->fdi_lanes; 7283 7284 return 0; 7285 } 7286 7287 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 7288 struct intel_crtc_state *pipe_config) 7289 { 7290 struct drm_i915_private *dev_priv = to_i915(dev); 7291 struct drm_atomic_state *state = pipe_config->base.state; 7292 struct intel_crtc *other_crtc; 7293 struct intel_crtc_state *other_crtc_state; 7294 7295 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 7296 pipe_name(pipe), pipe_config->fdi_lanes); 7297 if (pipe_config->fdi_lanes > 4) { 7298 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 7299 pipe_name(pipe), pipe_config->fdi_lanes); 7300 return -EINVAL; 7301 } 7302 7303 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 7304 if (pipe_config->fdi_lanes > 2) { 7305 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 7306 pipe_config->fdi_lanes); 7307 return -EINVAL; 7308 } else { 7309 return 0; 7310 } 7311 } 7312 7313 if (INTEL_NUM_PIPES(dev_priv) == 2) 7314 return 0; 7315 7316 /* Ivybridge 3 pipe is really complicated */ 7317 switch (pipe) { 7318 case PIPE_A: 7319 return 0; 7320 case PIPE_B: 7321 if (pipe_config->fdi_lanes <= 2) 7322 return 0; 7323 7324 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 7325 other_crtc_state = 7326 intel_atomic_get_crtc_state(state, other_crtc); 7327 if (IS_ERR(other_crtc_state)) 7328 return PTR_ERR(other_crtc_state); 7329 7330 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 7331 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 7332 pipe_name(pipe), pipe_config->fdi_lanes); 7333 return -EINVAL; 7334 } 7335 return 0; 7336 case PIPE_C: 7337 if (pipe_config->fdi_lanes > 2) { 7338 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 7339 pipe_name(pipe), pipe_config->fdi_lanes); 7340 return -EINVAL; 7341 } 7342 7343 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 7344 other_crtc_state = 7345 intel_atomic_get_crtc_state(state, other_crtc); 7346 if (IS_ERR(other_crtc_state)) 7347 return PTR_ERR(other_crtc_state); 7348 7349 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 7350 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 7351 return -EINVAL; 7352 } 7353 return 0; 7354 default: 7355 BUG(); 7356 } 7357 } 7358 7359 #define RETRY 1 7360 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 7361 struct intel_crtc_state *pipe_config) 7362 { 7363 struct drm_device *dev = intel_crtc->base.dev; 7364 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 7365 int lane, link_bw, fdi_dotclock, ret; 7366 bool needs_recompute = false; 7367 7368 retry: 7369 /* FDI is a binary signal running at ~2.7GHz, encoding 7370 * each output octet as 10 bits. The actual frequency 7371 * is stored as a divider into a 100MHz clock, and the 7372 * mode pixel clock is stored in units of 1KHz. 7373 * Hence the bw of each lane in terms of the mode signal 7374 * is: 7375 */ 7376 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 7377 7378 fdi_dotclock = adjusted_mode->crtc_clock; 7379 7380 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 7381 pipe_config->pipe_bpp); 7382 7383 pipe_config->fdi_lanes = lane; 7384 7385 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7386 link_bw, &pipe_config->fdi_m_n, false, false); 7387 7388 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7389 if (ret == -EDEADLK) 7390 return ret; 7391 7392 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 7393 pipe_config->pipe_bpp -= 2*3; 7394 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 7395 pipe_config->pipe_bpp); 7396 needs_recompute = true; 7397 pipe_config->bw_constrained = true; 7398 7399 goto retry; 7400 } 7401 7402 if (needs_recompute) 7403 return RETRY; 7404 7405 return ret; 7406 } 7407 7408 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) 7409 { 7410 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7411 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7412 7413 /* IPS only exists on ULT machines and is tied to pipe A. */ 7414 if (!hsw_crtc_supports_ips(crtc)) 7415 return false; 7416 7417 if (!i915_modparams.enable_ips) 7418 return false; 7419 7420 if (crtc_state->pipe_bpp > 24) 7421 return false; 7422 7423 /* 7424 * We compare against max which means we must take 7425 * the increased cdclk requirement into account when 7426 * calculating the new cdclk. 7427 * 7428 * Should measure whether using a lower cdclk w/o IPS 7429 */ 7430 if (IS_BROADWELL(dev_priv) && 7431 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) 7432 return false; 7433 7434 return true; 7435 } 7436 7437 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) 7438 { 7439 struct drm_i915_private *dev_priv = 7440 to_i915(crtc_state->base.crtc->dev); 7441 struct intel_atomic_state *intel_state = 7442 to_intel_atomic_state(crtc_state->base.state); 7443 7444 if (!hsw_crtc_state_ips_capable(crtc_state)) 7445 return false; 7446 7447 /* 7448 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 7449 * enabled and disabled dynamically based on package C states, 7450 * user space can't make reliable use of the CRCs, so let's just 7451 * completely disable it. 7452 */ 7453 if (crtc_state->crc_enabled) 7454 return false; 7455 7456 /* IPS should be fine as long as at least one plane is enabled. */ 7457 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) 7458 return false; 7459 7460 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 7461 if (IS_BROADWELL(dev_priv) && 7462 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100) 7463 return false; 7464 7465 return true; 7466 } 7467 7468 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 7469 { 7470 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7471 7472 /* GDG double wide on either pipe, otherwise pipe A only */ 7473 return INTEL_GEN(dev_priv) < 4 && 7474 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 7475 } 7476 7477 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 7478 { 7479 u32 pixel_rate; 7480 7481 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 7482 7483 /* 7484 * We only use IF-ID interlacing. If we ever use 7485 * PF-ID we'll need to adjust the pixel_rate here. 7486 */ 7487 7488 if (pipe_config->pch_pfit.enabled) { 7489 u64 pipe_w, pipe_h, pfit_w, pfit_h; 7490 u32 pfit_size = pipe_config->pch_pfit.size; 7491 7492 pipe_w = pipe_config->pipe_src_w; 7493 pipe_h = pipe_config->pipe_src_h; 7494 7495 pfit_w = (pfit_size >> 16) & 0xFFFF; 7496 pfit_h = pfit_size & 0xFFFF; 7497 if (pipe_w < pfit_w) 7498 pipe_w = pfit_w; 7499 if (pipe_h < pfit_h) 7500 pipe_h = pfit_h; 7501 7502 if (WARN_ON(!pfit_w || !pfit_h)) 7503 return pixel_rate; 7504 7505 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), 7506 pfit_w * pfit_h); 7507 } 7508 7509 return pixel_rate; 7510 } 7511 7512 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 7513 { 7514 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 7515 7516 if (HAS_GMCH(dev_priv)) 7517 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 7518 crtc_state->pixel_rate = 7519 crtc_state->base.adjusted_mode.crtc_clock; 7520 else 7521 crtc_state->pixel_rate = 7522 ilk_pipe_pixel_rate(crtc_state); 7523 } 7524 7525 static int intel_crtc_compute_config(struct intel_crtc *crtc, 7526 struct intel_crtc_state *pipe_config) 7527 { 7528 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7529 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 7530 int clock_limit = dev_priv->max_dotclk_freq; 7531 7532 if (INTEL_GEN(dev_priv) < 4) { 7533 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 7534 7535 /* 7536 * Enable double wide mode when the dot clock 7537 * is > 90% of the (display) core speed. 7538 */ 7539 if (intel_crtc_supports_double_wide(crtc) && 7540 adjusted_mode->crtc_clock > clock_limit) { 7541 clock_limit = dev_priv->max_dotclk_freq; 7542 pipe_config->double_wide = true; 7543 } 7544 } 7545 7546 if (adjusted_mode->crtc_clock > clock_limit) { 7547 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 7548 adjusted_mode->crtc_clock, clock_limit, 7549 yesno(pipe_config->double_wide)); 7550 return -EINVAL; 7551 } 7552 7553 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 7554 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && 7555 pipe_config->base.ctm) { 7556 /* 7557 * There is only one pipe CSC unit per pipe, and we need that 7558 * for output conversion from RGB->YCBCR. So if CTM is already 7559 * applied we can't support YCBCR420 output. 7560 */ 7561 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n"); 7562 return -EINVAL; 7563 } 7564 7565 /* 7566 * Pipe horizontal size must be even in: 7567 * - DVO ganged mode 7568 * - LVDS dual channel mode 7569 * - Double wide pipe 7570 */ 7571 if (pipe_config->pipe_src_w & 1) { 7572 if (pipe_config->double_wide) { 7573 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n"); 7574 return -EINVAL; 7575 } 7576 7577 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 7578 intel_is_dual_link_lvds(dev_priv)) { 7579 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n"); 7580 return -EINVAL; 7581 } 7582 } 7583 7584 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 7585 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 7586 */ 7587 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 7588 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 7589 return -EINVAL; 7590 7591 intel_crtc_compute_pixel_rate(pipe_config); 7592 7593 if (pipe_config->has_pch_encoder) 7594 return ironlake_fdi_compute_config(crtc, pipe_config); 7595 7596 return 0; 7597 } 7598 7599 static void 7600 intel_reduce_m_n_ratio(u32 *num, u32 *den) 7601 { 7602 while (*num > DATA_LINK_M_N_MASK || 7603 *den > DATA_LINK_M_N_MASK) { 7604 *num >>= 1; 7605 *den >>= 1; 7606 } 7607 } 7608 7609 static void compute_m_n(unsigned int m, unsigned int n, 7610 u32 *ret_m, u32 *ret_n, 7611 bool constant_n) 7612 { 7613 /* 7614 * Several DP dongles in particular seem to be fussy about 7615 * too large link M/N values. Give N value as 0x8000 that 7616 * should be acceptable by specific devices. 0x8000 is the 7617 * specified fixed N value for asynchronous clock mode, 7618 * which the devices expect also in synchronous clock mode. 7619 */ 7620 if (constant_n) 7621 *ret_n = 0x8000; 7622 else 7623 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7624 7625 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 7626 intel_reduce_m_n_ratio(ret_m, ret_n); 7627 } 7628 7629 void 7630 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 7631 int pixel_clock, int link_clock, 7632 struct intel_link_m_n *m_n, 7633 bool constant_n, bool fec_enable) 7634 { 7635 u32 data_clock = bits_per_pixel * pixel_clock; 7636 7637 if (fec_enable) 7638 data_clock = intel_dp_mode_to_fec_clock(data_clock); 7639 7640 m_n->tu = 64; 7641 compute_m_n(data_clock, 7642 link_clock * nlanes * 8, 7643 &m_n->gmch_m, &m_n->gmch_n, 7644 constant_n); 7645 7646 compute_m_n(pixel_clock, link_clock, 7647 &m_n->link_m, &m_n->link_n, 7648 constant_n); 7649 } 7650 7651 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 7652 { 7653 /* 7654 * There may be no VBT; and if the BIOS enabled SSC we can 7655 * just keep using it to avoid unnecessary flicker. Whereas if the 7656 * BIOS isn't using it, don't assume it will work even if the VBT 7657 * indicates as much. 7658 */ 7659 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7660 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) & 7661 DREF_SSC1_ENABLE; 7662 7663 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 7664 DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n", 7665 enableddisabled(bios_lvds_use_ssc), 7666 enableddisabled(dev_priv->vbt.lvds_use_ssc)); 7667 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 7668 } 7669 } 7670 } 7671 7672 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 7673 { 7674 if (i915_modparams.panel_use_ssc >= 0) 7675 return i915_modparams.panel_use_ssc != 0; 7676 return dev_priv->vbt.lvds_use_ssc 7677 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 7678 } 7679 7680 static u32 pnv_dpll_compute_fp(struct dpll *dpll) 7681 { 7682 return (1 << dpll->n) << 16 | dpll->m2; 7683 } 7684 7685 static u32 i9xx_dpll_compute_fp(struct dpll *dpll) 7686 { 7687 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 7688 } 7689 7690 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7691 struct intel_crtc_state *crtc_state, 7692 struct dpll *reduced_clock) 7693 { 7694 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7695 u32 fp, fp2 = 0; 7696 7697 if (IS_PINEVIEW(dev_priv)) { 7698 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 7699 if (reduced_clock) 7700 fp2 = pnv_dpll_compute_fp(reduced_clock); 7701 } else { 7702 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7703 if (reduced_clock) 7704 fp2 = i9xx_dpll_compute_fp(reduced_clock); 7705 } 7706 7707 crtc_state->dpll_hw_state.fp0 = fp; 7708 7709 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7710 reduced_clock) { 7711 crtc_state->dpll_hw_state.fp1 = fp2; 7712 } else { 7713 crtc_state->dpll_hw_state.fp1 = fp; 7714 } 7715 } 7716 7717 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 7718 pipe) 7719 { 7720 u32 reg_val; 7721 7722 /* 7723 * PLLB opamp always calibrates to max value of 0x3f, force enable it 7724 * and set it to a reasonable value instead. 7725 */ 7726 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7727 reg_val &= 0xffffff00; 7728 reg_val |= 0x00000030; 7729 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7730 7731 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7732 reg_val &= 0x00ffffff; 7733 reg_val |= 0x8c000000; 7734 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7735 7736 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7737 reg_val &= 0xffffff00; 7738 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7739 7740 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7741 reg_val &= 0x00ffffff; 7742 reg_val |= 0xb0000000; 7743 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7744 } 7745 7746 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 7747 const struct intel_link_m_n *m_n) 7748 { 7749 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7750 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7751 enum pipe pipe = crtc->pipe; 7752 7753 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7754 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 7755 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 7756 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 7757 } 7758 7759 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 7760 enum transcoder transcoder) 7761 { 7762 if (IS_HASWELL(dev_priv)) 7763 return transcoder == TRANSCODER_EDP; 7764 7765 /* 7766 * Strictly speaking some registers are available before 7767 * gen7, but we only support DRRS on gen7+ 7768 */ 7769 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); 7770 } 7771 7772 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 7773 const struct intel_link_m_n *m_n, 7774 const struct intel_link_m_n *m2_n2) 7775 { 7776 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7777 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7778 enum pipe pipe = crtc->pipe; 7779 enum transcoder transcoder = crtc_state->cpu_transcoder; 7780 7781 if (INTEL_GEN(dev_priv) >= 5) { 7782 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 7783 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 7784 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 7785 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 7786 /* 7787 * M2_N2 registers are set only if DRRS is supported 7788 * (to make sure the registers are not unnecessarily accessed). 7789 */ 7790 if (m2_n2 && crtc_state->has_drrs && 7791 transcoder_has_m2_n2(dev_priv, transcoder)) { 7792 I915_WRITE(PIPE_DATA_M2(transcoder), 7793 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 7794 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 7795 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 7796 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 7797 } 7798 } else { 7799 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7800 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 7801 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 7802 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 7803 } 7804 } 7805 7806 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) 7807 { 7808 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 7809 7810 if (m_n == M1_N1) { 7811 dp_m_n = &crtc_state->dp_m_n; 7812 dp_m2_n2 = &crtc_state->dp_m2_n2; 7813 } else if (m_n == M2_N2) { 7814 7815 /* 7816 * M2_N2 registers are not supported. Hence m2_n2 divider value 7817 * needs to be programmed into M1_N1. 7818 */ 7819 dp_m_n = &crtc_state->dp_m2_n2; 7820 } else { 7821 DRM_ERROR("Unsupported divider value\n"); 7822 return; 7823 } 7824 7825 if (crtc_state->has_pch_encoder) 7826 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); 7827 else 7828 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); 7829 } 7830 7831 static void vlv_compute_dpll(struct intel_crtc *crtc, 7832 struct intel_crtc_state *pipe_config) 7833 { 7834 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 7835 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7836 if (crtc->pipe != PIPE_A) 7837 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7838 7839 /* DPLL not used with DSI, but still need the rest set up */ 7840 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7841 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 7842 DPLL_EXT_BUFFER_ENABLE_VLV; 7843 7844 pipe_config->dpll_hw_state.dpll_md = 7845 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7846 } 7847 7848 static void chv_compute_dpll(struct intel_crtc *crtc, 7849 struct intel_crtc_state *pipe_config) 7850 { 7851 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 7852 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7853 if (crtc->pipe != PIPE_A) 7854 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7855 7856 /* DPLL not used with DSI, but still need the rest set up */ 7857 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7858 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 7859 7860 pipe_config->dpll_hw_state.dpll_md = 7861 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7862 } 7863 7864 static void vlv_prepare_pll(struct intel_crtc *crtc, 7865 const struct intel_crtc_state *pipe_config) 7866 { 7867 struct drm_device *dev = crtc->base.dev; 7868 struct drm_i915_private *dev_priv = to_i915(dev); 7869 enum pipe pipe = crtc->pipe; 7870 u32 mdiv; 7871 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7872 u32 coreclk, reg_val; 7873 7874 /* Enable Refclk */ 7875 I915_WRITE(DPLL(pipe), 7876 pipe_config->dpll_hw_state.dpll & 7877 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 7878 7879 /* No need to actually set up the DPLL with DSI */ 7880 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7881 return; 7882 7883 vlv_dpio_get(dev_priv); 7884 7885 bestn = pipe_config->dpll.n; 7886 bestm1 = pipe_config->dpll.m1; 7887 bestm2 = pipe_config->dpll.m2; 7888 bestp1 = pipe_config->dpll.p1; 7889 bestp2 = pipe_config->dpll.p2; 7890 7891 /* See eDP HDMI DPIO driver vbios notes doc */ 7892 7893 /* PLL B needs special handling */ 7894 if (pipe == PIPE_B) 7895 vlv_pllb_recal_opamp(dev_priv, pipe); 7896 7897 /* Set up Tx target for periodic Rcomp update */ 7898 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 7899 7900 /* Disable target IRef on PLL */ 7901 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 7902 reg_val &= 0x00ffffff; 7903 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 7904 7905 /* Disable fast lock */ 7906 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 7907 7908 /* Set idtafcrecal before PLL is enabled */ 7909 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 7910 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 7911 mdiv |= ((bestn << DPIO_N_SHIFT)); 7912 mdiv |= (1 << DPIO_K_SHIFT); 7913 7914 /* 7915 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 7916 * but we don't support that). 7917 * Note: don't use the DAC post divider as it seems unstable. 7918 */ 7919 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 7920 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7921 7922 mdiv |= DPIO_ENABLE_CALIBRATION; 7923 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7924 7925 /* Set HBR and RBR LPF coefficients */ 7926 if (pipe_config->port_clock == 162000 || 7927 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || 7928 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) 7929 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7930 0x009f0003); 7931 else 7932 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7933 0x00d0000f); 7934 7935 if (intel_crtc_has_dp_encoder(pipe_config)) { 7936 /* Use SSC source */ 7937 if (pipe == PIPE_A) 7938 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7939 0x0df40000); 7940 else 7941 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7942 0x0df70000); 7943 } else { /* HDMI or VGA */ 7944 /* Use bend source */ 7945 if (pipe == PIPE_A) 7946 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7947 0x0df70000); 7948 else 7949 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7950 0x0df40000); 7951 } 7952 7953 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7954 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7955 if (intel_crtc_has_dp_encoder(pipe_config)) 7956 coreclk |= 0x01000000; 7957 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7958 7959 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 7960 7961 vlv_dpio_put(dev_priv); 7962 } 7963 7964 static void chv_prepare_pll(struct intel_crtc *crtc, 7965 const struct intel_crtc_state *pipe_config) 7966 { 7967 struct drm_device *dev = crtc->base.dev; 7968 struct drm_i915_private *dev_priv = to_i915(dev); 7969 enum pipe pipe = crtc->pipe; 7970 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7971 u32 loopfilter, tribuf_calcntr; 7972 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7973 u32 dpio_val; 7974 int vco; 7975 7976 /* Enable Refclk and SSC */ 7977 I915_WRITE(DPLL(pipe), 7978 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 7979 7980 /* No need to actually set up the DPLL with DSI */ 7981 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7982 return; 7983 7984 bestn = pipe_config->dpll.n; 7985 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 7986 bestm1 = pipe_config->dpll.m1; 7987 bestm2 = pipe_config->dpll.m2 >> 22; 7988 bestp1 = pipe_config->dpll.p1; 7989 bestp2 = pipe_config->dpll.p2; 7990 vco = pipe_config->dpll.vco; 7991 dpio_val = 0; 7992 loopfilter = 0; 7993 7994 vlv_dpio_get(dev_priv); 7995 7996 /* p1 and p2 divider */ 7997 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 7998 5 << DPIO_CHV_S1_DIV_SHIFT | 7999 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 8000 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 8001 1 << DPIO_CHV_K_DIV_SHIFT); 8002 8003 /* Feedback post-divider - m2 */ 8004 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 8005 8006 /* Feedback refclk divider - n and m1 */ 8007 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 8008 DPIO_CHV_M1_DIV_BY_2 | 8009 1 << DPIO_CHV_N_DIV_SHIFT); 8010 8011 /* M2 fraction division */ 8012 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 8013 8014 /* M2 fraction division enable */ 8015 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8016 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 8017 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 8018 if (bestm2_frac) 8019 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 8020 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 8021 8022 /* Program digital lock detect threshold */ 8023 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 8024 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 8025 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 8026 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 8027 if (!bestm2_frac) 8028 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 8029 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 8030 8031 /* Loop filter */ 8032 if (vco == 5400000) { 8033 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 8034 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 8035 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 8036 tribuf_calcntr = 0x9; 8037 } else if (vco <= 6200000) { 8038 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 8039 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 8040 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8041 tribuf_calcntr = 0x9; 8042 } else if (vco <= 6480000) { 8043 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8044 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8045 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8046 tribuf_calcntr = 0x8; 8047 } else { 8048 /* Not supported. Apply the same limits as in the max case */ 8049 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8050 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8051 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8052 tribuf_calcntr = 0; 8053 } 8054 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 8055 8056 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 8057 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 8058 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 8059 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 8060 8061 /* AFC Recal */ 8062 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 8063 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 8064 DPIO_AFC_RECAL); 8065 8066 vlv_dpio_put(dev_priv); 8067 } 8068 8069 /** 8070 * vlv_force_pll_on - forcibly enable just the PLL 8071 * @dev_priv: i915 private structure 8072 * @pipe: pipe PLL to enable 8073 * @dpll: PLL configuration 8074 * 8075 * Enable the PLL for @pipe using the supplied @dpll config. To be used 8076 * in cases where we need the PLL enabled even when @pipe is not going to 8077 * be enabled. 8078 */ 8079 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 8080 const struct dpll *dpll) 8081 { 8082 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 8083 struct intel_crtc_state *pipe_config; 8084 8085 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 8086 if (!pipe_config) 8087 return -ENOMEM; 8088 8089 pipe_config->base.crtc = &crtc->base; 8090 pipe_config->pixel_multiplier = 1; 8091 pipe_config->dpll = *dpll; 8092 8093 if (IS_CHERRYVIEW(dev_priv)) { 8094 chv_compute_dpll(crtc, pipe_config); 8095 chv_prepare_pll(crtc, pipe_config); 8096 chv_enable_pll(crtc, pipe_config); 8097 } else { 8098 vlv_compute_dpll(crtc, pipe_config); 8099 vlv_prepare_pll(crtc, pipe_config); 8100 vlv_enable_pll(crtc, pipe_config); 8101 } 8102 8103 kfree(pipe_config); 8104 8105 return 0; 8106 } 8107 8108 /** 8109 * vlv_force_pll_off - forcibly disable just the PLL 8110 * @dev_priv: i915 private structure 8111 * @pipe: pipe PLL to disable 8112 * 8113 * Disable the PLL for @pipe. To be used in cases where we need 8114 * the PLL enabled even when @pipe is not going to be enabled. 8115 */ 8116 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 8117 { 8118 if (IS_CHERRYVIEW(dev_priv)) 8119 chv_disable_pll(dev_priv, pipe); 8120 else 8121 vlv_disable_pll(dev_priv, pipe); 8122 } 8123 8124 static void i9xx_compute_dpll(struct intel_crtc *crtc, 8125 struct intel_crtc_state *crtc_state, 8126 struct dpll *reduced_clock) 8127 { 8128 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8129 u32 dpll; 8130 struct dpll *clock = &crtc_state->dpll; 8131 8132 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8133 8134 dpll = DPLL_VGA_MODE_DIS; 8135 8136 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8137 dpll |= DPLLB_MODE_LVDS; 8138 else 8139 dpll |= DPLLB_MODE_DAC_SERIAL; 8140 8141 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8142 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8143 dpll |= (crtc_state->pixel_multiplier - 1) 8144 << SDVO_MULTIPLIER_SHIFT_HIRES; 8145 } 8146 8147 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8148 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8149 dpll |= DPLL_SDVO_HIGH_SPEED; 8150 8151 if (intel_crtc_has_dp_encoder(crtc_state)) 8152 dpll |= DPLL_SDVO_HIGH_SPEED; 8153 8154 /* compute bitmask from p1 value */ 8155 if (IS_PINEVIEW(dev_priv)) 8156 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 8157 else { 8158 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8159 if (IS_G4X(dev_priv) && reduced_clock) 8160 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8161 } 8162 switch (clock->p2) { 8163 case 5: 8164 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8165 break; 8166 case 7: 8167 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8168 break; 8169 case 10: 8170 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8171 break; 8172 case 14: 8173 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8174 break; 8175 } 8176 if (INTEL_GEN(dev_priv) >= 4) 8177 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 8178 8179 if (crtc_state->sdvo_tv_clock) 8180 dpll |= PLL_REF_INPUT_TVCLKINBC; 8181 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8182 intel_panel_use_ssc(dev_priv)) 8183 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8184 else 8185 dpll |= PLL_REF_INPUT_DREFCLK; 8186 8187 dpll |= DPLL_VCO_ENABLE; 8188 crtc_state->dpll_hw_state.dpll = dpll; 8189 8190 if (INTEL_GEN(dev_priv) >= 4) { 8191 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 8192 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8193 crtc_state->dpll_hw_state.dpll_md = dpll_md; 8194 } 8195 } 8196 8197 static void i8xx_compute_dpll(struct intel_crtc *crtc, 8198 struct intel_crtc_state *crtc_state, 8199 struct dpll *reduced_clock) 8200 { 8201 struct drm_device *dev = crtc->base.dev; 8202 struct drm_i915_private *dev_priv = to_i915(dev); 8203 u32 dpll; 8204 struct dpll *clock = &crtc_state->dpll; 8205 8206 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8207 8208 dpll = DPLL_VGA_MODE_DIS; 8209 8210 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8211 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8212 } else { 8213 if (clock->p1 == 2) 8214 dpll |= PLL_P1_DIVIDE_BY_TWO; 8215 else 8216 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8217 if (clock->p2 == 4) 8218 dpll |= PLL_P2_DIVIDE_BY_4; 8219 } 8220 8221 /* 8222 * Bspec: 8223 * "[Almador Errata}: For the correct operation of the muxed DVO pins 8224 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 8225 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 8226 * Enable) must be set to “1” in both the DPLL A Control Register 8227 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 8228 * 8229 * For simplicity We simply keep both bits always enabled in 8230 * both DPLLS. The spec says we should disable the DVO 2X clock 8231 * when not needed, but this seems to work fine in practice. 8232 */ 8233 if (IS_I830(dev_priv) || 8234 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 8235 dpll |= DPLL_DVO_2X_MODE; 8236 8237 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8238 intel_panel_use_ssc(dev_priv)) 8239 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8240 else 8241 dpll |= PLL_REF_INPUT_DREFCLK; 8242 8243 dpll |= DPLL_VCO_ENABLE; 8244 crtc_state->dpll_hw_state.dpll = dpll; 8245 } 8246 8247 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) 8248 { 8249 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8250 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8251 enum pipe pipe = crtc->pipe; 8252 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8253 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; 8254 u32 crtc_vtotal, crtc_vblank_end; 8255 int vsyncshift = 0; 8256 8257 /* We need to be careful not to changed the adjusted mode, for otherwise 8258 * the hw state checker will get angry at the mismatch. */ 8259 crtc_vtotal = adjusted_mode->crtc_vtotal; 8260 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 8261 8262 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 8263 /* the chip adds 2 halflines automatically */ 8264 crtc_vtotal -= 1; 8265 crtc_vblank_end -= 1; 8266 8267 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8268 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 8269 else 8270 vsyncshift = adjusted_mode->crtc_hsync_start - 8271 adjusted_mode->crtc_htotal / 2; 8272 if (vsyncshift < 0) 8273 vsyncshift += adjusted_mode->crtc_htotal; 8274 } 8275 8276 if (INTEL_GEN(dev_priv) > 3) 8277 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 8278 8279 I915_WRITE(HTOTAL(cpu_transcoder), 8280 (adjusted_mode->crtc_hdisplay - 1) | 8281 ((adjusted_mode->crtc_htotal - 1) << 16)); 8282 I915_WRITE(HBLANK(cpu_transcoder), 8283 (adjusted_mode->crtc_hblank_start - 1) | 8284 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 8285 I915_WRITE(HSYNC(cpu_transcoder), 8286 (adjusted_mode->crtc_hsync_start - 1) | 8287 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 8288 8289 I915_WRITE(VTOTAL(cpu_transcoder), 8290 (adjusted_mode->crtc_vdisplay - 1) | 8291 ((crtc_vtotal - 1) << 16)); 8292 I915_WRITE(VBLANK(cpu_transcoder), 8293 (adjusted_mode->crtc_vblank_start - 1) | 8294 ((crtc_vblank_end - 1) << 16)); 8295 I915_WRITE(VSYNC(cpu_transcoder), 8296 (adjusted_mode->crtc_vsync_start - 1) | 8297 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 8298 8299 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 8300 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 8301 * documented on the DDI_FUNC_CTL register description, EDP Input Select 8302 * bits. */ 8303 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 8304 (pipe == PIPE_B || pipe == PIPE_C)) 8305 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 8306 8307 } 8308 8309 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 8310 { 8311 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8312 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8313 enum pipe pipe = crtc->pipe; 8314 8315 /* pipesrc controls the size that is scaled from, which should 8316 * always be the user's requested size. 8317 */ 8318 I915_WRITE(PIPESRC(pipe), 8319 ((crtc_state->pipe_src_w - 1) << 16) | 8320 (crtc_state->pipe_src_h - 1)); 8321 } 8322 8323 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 8324 { 8325 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 8326 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8327 8328 if (IS_GEN(dev_priv, 2)) 8329 return false; 8330 8331 if (INTEL_GEN(dev_priv) >= 9 || 8332 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 8333 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 8334 else 8335 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 8336 } 8337 8338 static void intel_get_pipe_timings(struct intel_crtc *crtc, 8339 struct intel_crtc_state *pipe_config) 8340 { 8341 struct drm_device *dev = crtc->base.dev; 8342 struct drm_i915_private *dev_priv = to_i915(dev); 8343 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 8344 u32 tmp; 8345 8346 tmp = I915_READ(HTOTAL(cpu_transcoder)); 8347 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 8348 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 8349 8350 if (!transcoder_is_dsi(cpu_transcoder)) { 8351 tmp = I915_READ(HBLANK(cpu_transcoder)); 8352 pipe_config->base.adjusted_mode.crtc_hblank_start = 8353 (tmp & 0xffff) + 1; 8354 pipe_config->base.adjusted_mode.crtc_hblank_end = 8355 ((tmp >> 16) & 0xffff) + 1; 8356 } 8357 tmp = I915_READ(HSYNC(cpu_transcoder)); 8358 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 8359 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 8360 8361 tmp = I915_READ(VTOTAL(cpu_transcoder)); 8362 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 8363 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 8364 8365 if (!transcoder_is_dsi(cpu_transcoder)) { 8366 tmp = I915_READ(VBLANK(cpu_transcoder)); 8367 pipe_config->base.adjusted_mode.crtc_vblank_start = 8368 (tmp & 0xffff) + 1; 8369 pipe_config->base.adjusted_mode.crtc_vblank_end = 8370 ((tmp >> 16) & 0xffff) + 1; 8371 } 8372 tmp = I915_READ(VSYNC(cpu_transcoder)); 8373 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 8374 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 8375 8376 if (intel_pipe_is_interlaced(pipe_config)) { 8377 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 8378 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 8379 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 8380 } 8381 } 8382 8383 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 8384 struct intel_crtc_state *pipe_config) 8385 { 8386 struct drm_device *dev = crtc->base.dev; 8387 struct drm_i915_private *dev_priv = to_i915(dev); 8388 u32 tmp; 8389 8390 tmp = I915_READ(PIPESRC(crtc->pipe)); 8391 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 8392 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 8393 8394 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 8395 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 8396 } 8397 8398 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 8399 struct intel_crtc_state *pipe_config) 8400 { 8401 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 8402 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 8403 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 8404 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 8405 8406 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 8407 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 8408 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 8409 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 8410 8411 mode->flags = pipe_config->base.adjusted_mode.flags; 8412 mode->type = DRM_MODE_TYPE_DRIVER; 8413 8414 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 8415 8416 mode->hsync = drm_mode_hsync(mode); 8417 mode->vrefresh = drm_mode_vrefresh(mode); 8418 drm_mode_set_name(mode); 8419 } 8420 8421 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 8422 { 8423 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8424 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8425 u32 pipeconf; 8426 8427 pipeconf = 0; 8428 8429 /* we keep both pipes enabled on 830 */ 8430 if (IS_I830(dev_priv)) 8431 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; 8432 8433 if (crtc_state->double_wide) 8434 pipeconf |= PIPECONF_DOUBLE_WIDE; 8435 8436 /* only g4x and later have fancy bpc/dither controls */ 8437 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8438 IS_CHERRYVIEW(dev_priv)) { 8439 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 8440 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 8441 pipeconf |= PIPECONF_DITHER_EN | 8442 PIPECONF_DITHER_TYPE_SP; 8443 8444 switch (crtc_state->pipe_bpp) { 8445 case 18: 8446 pipeconf |= PIPECONF_6BPC; 8447 break; 8448 case 24: 8449 pipeconf |= PIPECONF_8BPC; 8450 break; 8451 case 30: 8452 pipeconf |= PIPECONF_10BPC; 8453 break; 8454 default: 8455 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8456 BUG(); 8457 } 8458 } 8459 8460 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 8461 if (INTEL_GEN(dev_priv) < 4 || 8462 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8463 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 8464 else 8465 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 8466 } else { 8467 pipeconf |= PIPECONF_PROGRESSIVE; 8468 } 8469 8470 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8471 crtc_state->limited_color_range) 8472 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 8473 8474 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 8475 8476 I915_WRITE(PIPECONF(crtc->pipe), pipeconf); 8477 POSTING_READ(PIPECONF(crtc->pipe)); 8478 } 8479 8480 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 8481 struct intel_crtc_state *crtc_state) 8482 { 8483 struct drm_device *dev = crtc->base.dev; 8484 struct drm_i915_private *dev_priv = to_i915(dev); 8485 const struct intel_limit *limit; 8486 int refclk = 48000; 8487 8488 memset(&crtc_state->dpll_hw_state, 0, 8489 sizeof(crtc_state->dpll_hw_state)); 8490 8491 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8492 if (intel_panel_use_ssc(dev_priv)) { 8493 refclk = dev_priv->vbt.lvds_ssc_freq; 8494 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8495 } 8496 8497 limit = &intel_limits_i8xx_lvds; 8498 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 8499 limit = &intel_limits_i8xx_dvo; 8500 } else { 8501 limit = &intel_limits_i8xx_dac; 8502 } 8503 8504 if (!crtc_state->clock_set && 8505 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8506 refclk, NULL, &crtc_state->dpll)) { 8507 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8508 return -EINVAL; 8509 } 8510 8511 i8xx_compute_dpll(crtc, crtc_state, NULL); 8512 8513 return 0; 8514 } 8515 8516 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 8517 struct intel_crtc_state *crtc_state) 8518 { 8519 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8520 const struct intel_limit *limit; 8521 int refclk = 96000; 8522 8523 memset(&crtc_state->dpll_hw_state, 0, 8524 sizeof(crtc_state->dpll_hw_state)); 8525 8526 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8527 if (intel_panel_use_ssc(dev_priv)) { 8528 refclk = dev_priv->vbt.lvds_ssc_freq; 8529 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8530 } 8531 8532 if (intel_is_dual_link_lvds(dev_priv)) 8533 limit = &intel_limits_g4x_dual_channel_lvds; 8534 else 8535 limit = &intel_limits_g4x_single_channel_lvds; 8536 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 8537 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 8538 limit = &intel_limits_g4x_hdmi; 8539 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 8540 limit = &intel_limits_g4x_sdvo; 8541 } else { 8542 /* The option is for other outputs */ 8543 limit = &intel_limits_i9xx_sdvo; 8544 } 8545 8546 if (!crtc_state->clock_set && 8547 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8548 refclk, NULL, &crtc_state->dpll)) { 8549 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8550 return -EINVAL; 8551 } 8552 8553 i9xx_compute_dpll(crtc, crtc_state, NULL); 8554 8555 return 0; 8556 } 8557 8558 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 8559 struct intel_crtc_state *crtc_state) 8560 { 8561 struct drm_device *dev = crtc->base.dev; 8562 struct drm_i915_private *dev_priv = to_i915(dev); 8563 const struct intel_limit *limit; 8564 int refclk = 96000; 8565 8566 memset(&crtc_state->dpll_hw_state, 0, 8567 sizeof(crtc_state->dpll_hw_state)); 8568 8569 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8570 if (intel_panel_use_ssc(dev_priv)) { 8571 refclk = dev_priv->vbt.lvds_ssc_freq; 8572 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8573 } 8574 8575 limit = &intel_limits_pineview_lvds; 8576 } else { 8577 limit = &intel_limits_pineview_sdvo; 8578 } 8579 8580 if (!crtc_state->clock_set && 8581 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8582 refclk, NULL, &crtc_state->dpll)) { 8583 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8584 return -EINVAL; 8585 } 8586 8587 i9xx_compute_dpll(crtc, crtc_state, NULL); 8588 8589 return 0; 8590 } 8591 8592 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 8593 struct intel_crtc_state *crtc_state) 8594 { 8595 struct drm_device *dev = crtc->base.dev; 8596 struct drm_i915_private *dev_priv = to_i915(dev); 8597 const struct intel_limit *limit; 8598 int refclk = 96000; 8599 8600 memset(&crtc_state->dpll_hw_state, 0, 8601 sizeof(crtc_state->dpll_hw_state)); 8602 8603 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8604 if (intel_panel_use_ssc(dev_priv)) { 8605 refclk = dev_priv->vbt.lvds_ssc_freq; 8606 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8607 } 8608 8609 limit = &intel_limits_i9xx_lvds; 8610 } else { 8611 limit = &intel_limits_i9xx_sdvo; 8612 } 8613 8614 if (!crtc_state->clock_set && 8615 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8616 refclk, NULL, &crtc_state->dpll)) { 8617 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8618 return -EINVAL; 8619 } 8620 8621 i9xx_compute_dpll(crtc, crtc_state, NULL); 8622 8623 return 0; 8624 } 8625 8626 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 8627 struct intel_crtc_state *crtc_state) 8628 { 8629 int refclk = 100000; 8630 const struct intel_limit *limit = &intel_limits_chv; 8631 8632 memset(&crtc_state->dpll_hw_state, 0, 8633 sizeof(crtc_state->dpll_hw_state)); 8634 8635 if (!crtc_state->clock_set && 8636 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8637 refclk, NULL, &crtc_state->dpll)) { 8638 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8639 return -EINVAL; 8640 } 8641 8642 chv_compute_dpll(crtc, crtc_state); 8643 8644 return 0; 8645 } 8646 8647 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 8648 struct intel_crtc_state *crtc_state) 8649 { 8650 int refclk = 100000; 8651 const struct intel_limit *limit = &intel_limits_vlv; 8652 8653 memset(&crtc_state->dpll_hw_state, 0, 8654 sizeof(crtc_state->dpll_hw_state)); 8655 8656 if (!crtc_state->clock_set && 8657 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8658 refclk, NULL, &crtc_state->dpll)) { 8659 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8660 return -EINVAL; 8661 } 8662 8663 vlv_compute_dpll(crtc, crtc_state); 8664 8665 return 0; 8666 } 8667 8668 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 8669 { 8670 if (IS_I830(dev_priv)) 8671 return false; 8672 8673 return INTEL_GEN(dev_priv) >= 4 || 8674 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 8675 } 8676 8677 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 8678 struct intel_crtc_state *pipe_config) 8679 { 8680 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8681 u32 tmp; 8682 8683 if (!i9xx_has_pfit(dev_priv)) 8684 return; 8685 8686 tmp = I915_READ(PFIT_CONTROL); 8687 if (!(tmp & PFIT_ENABLE)) 8688 return; 8689 8690 /* Check whether the pfit is attached to our pipe. */ 8691 if (INTEL_GEN(dev_priv) < 4) { 8692 if (crtc->pipe != PIPE_B) 8693 return; 8694 } else { 8695 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 8696 return; 8697 } 8698 8699 pipe_config->gmch_pfit.control = tmp; 8700 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 8701 } 8702 8703 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 8704 struct intel_crtc_state *pipe_config) 8705 { 8706 struct drm_device *dev = crtc->base.dev; 8707 struct drm_i915_private *dev_priv = to_i915(dev); 8708 enum pipe pipe = crtc->pipe; 8709 struct dpll clock; 8710 u32 mdiv; 8711 int refclk = 100000; 8712 8713 /* In case of DSI, DPLL will not be used */ 8714 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8715 return; 8716 8717 vlv_dpio_get(dev_priv); 8718 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 8719 vlv_dpio_put(dev_priv); 8720 8721 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 8722 clock.m2 = mdiv & DPIO_M2DIV_MASK; 8723 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 8724 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 8725 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 8726 8727 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 8728 } 8729 8730 static void 8731 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 8732 struct intel_initial_plane_config *plane_config) 8733 { 8734 struct drm_device *dev = crtc->base.dev; 8735 struct drm_i915_private *dev_priv = to_i915(dev); 8736 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 8737 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 8738 enum pipe pipe; 8739 u32 val, base, offset; 8740 int fourcc, pixel_format; 8741 unsigned int aligned_height; 8742 struct drm_framebuffer *fb; 8743 struct intel_framebuffer *intel_fb; 8744 8745 if (!plane->get_hw_state(plane, &pipe)) 8746 return; 8747 8748 WARN_ON(pipe != crtc->pipe); 8749 8750 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8751 if (!intel_fb) { 8752 DRM_DEBUG_KMS("failed to alloc fb\n"); 8753 return; 8754 } 8755 8756 fb = &intel_fb->base; 8757 8758 fb->dev = dev; 8759 8760 val = I915_READ(DSPCNTR(i9xx_plane)); 8761 8762 if (INTEL_GEN(dev_priv) >= 4) { 8763 if (val & DISPPLANE_TILED) { 8764 plane_config->tiling = I915_TILING_X; 8765 fb->modifier = I915_FORMAT_MOD_X_TILED; 8766 } 8767 8768 if (val & DISPPLANE_ROTATE_180) 8769 plane_config->rotation = DRM_MODE_ROTATE_180; 8770 } 8771 8772 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && 8773 val & DISPPLANE_MIRROR) 8774 plane_config->rotation |= DRM_MODE_REFLECT_X; 8775 8776 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8777 fourcc = i9xx_format_to_fourcc(pixel_format); 8778 fb->format = drm_format_info(fourcc); 8779 8780 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 8781 offset = I915_READ(DSPOFFSET(i9xx_plane)); 8782 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 8783 } else if (INTEL_GEN(dev_priv) >= 4) { 8784 if (plane_config->tiling) 8785 offset = I915_READ(DSPTILEOFF(i9xx_plane)); 8786 else 8787 offset = I915_READ(DSPLINOFF(i9xx_plane)); 8788 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 8789 } else { 8790 base = I915_READ(DSPADDR(i9xx_plane)); 8791 } 8792 plane_config->base = base; 8793 8794 val = I915_READ(PIPESRC(pipe)); 8795 fb->width = ((val >> 16) & 0xfff) + 1; 8796 fb->height = ((val >> 0) & 0xfff) + 1; 8797 8798 val = I915_READ(DSPSTRIDE(i9xx_plane)); 8799 fb->pitches[0] = val & 0xffffffc0; 8800 8801 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8802 8803 plane_config->size = fb->pitches[0] * aligned_height; 8804 8805 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8806 crtc->base.name, plane->base.name, fb->width, fb->height, 8807 fb->format->cpp[0] * 8, base, fb->pitches[0], 8808 plane_config->size); 8809 8810 plane_config->fb = intel_fb; 8811 } 8812 8813 static void chv_crtc_clock_get(struct intel_crtc *crtc, 8814 struct intel_crtc_state *pipe_config) 8815 { 8816 struct drm_device *dev = crtc->base.dev; 8817 struct drm_i915_private *dev_priv = to_i915(dev); 8818 enum pipe pipe = crtc->pipe; 8819 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8820 struct dpll clock; 8821 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8822 int refclk = 100000; 8823 8824 /* In case of DSI, DPLL will not be used */ 8825 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8826 return; 8827 8828 vlv_dpio_get(dev_priv); 8829 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 8830 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 8831 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 8832 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 8833 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8834 vlv_dpio_put(dev_priv); 8835 8836 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 8837 clock.m2 = (pll_dw0 & 0xff) << 22; 8838 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 8839 clock.m2 |= pll_dw2 & 0x3fffff; 8840 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 8841 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 8842 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 8843 8844 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 8845 } 8846 8847 static enum intel_output_format 8848 bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 8849 { 8850 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8851 u32 tmp; 8852 8853 tmp = I915_READ(PIPEMISC(crtc->pipe)); 8854 8855 if (tmp & PIPEMISC_YUV420_ENABLE) { 8856 /* We support 4:2:0 in full blend mode only */ 8857 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 8858 8859 return INTEL_OUTPUT_FORMAT_YCBCR420; 8860 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 8861 return INTEL_OUTPUT_FORMAT_YCBCR444; 8862 } else { 8863 return INTEL_OUTPUT_FORMAT_RGB; 8864 } 8865 } 8866 8867 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 8868 { 8869 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8870 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 8871 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8872 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 8873 u32 tmp; 8874 8875 tmp = I915_READ(DSPCNTR(i9xx_plane)); 8876 8877 if (tmp & DISPPLANE_GAMMA_ENABLE) 8878 crtc_state->gamma_enable = true; 8879 8880 if (!HAS_GMCH(dev_priv) && 8881 tmp & DISPPLANE_PIPE_CSC_ENABLE) 8882 crtc_state->csc_enable = true; 8883 } 8884 8885 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 8886 struct intel_crtc_state *pipe_config) 8887 { 8888 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8889 enum intel_display_power_domain power_domain; 8890 intel_wakeref_t wakeref; 8891 u32 tmp; 8892 bool ret; 8893 8894 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8895 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 8896 if (!wakeref) 8897 return false; 8898 8899 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 8900 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8901 pipe_config->shared_dpll = NULL; 8902 pipe_config->master_transcoder = INVALID_TRANSCODER; 8903 8904 ret = false; 8905 8906 tmp = I915_READ(PIPECONF(crtc->pipe)); 8907 if (!(tmp & PIPECONF_ENABLE)) 8908 goto out; 8909 8910 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8911 IS_CHERRYVIEW(dev_priv)) { 8912 switch (tmp & PIPECONF_BPC_MASK) { 8913 case PIPECONF_6BPC: 8914 pipe_config->pipe_bpp = 18; 8915 break; 8916 case PIPECONF_8BPC: 8917 pipe_config->pipe_bpp = 24; 8918 break; 8919 case PIPECONF_10BPC: 8920 pipe_config->pipe_bpp = 30; 8921 break; 8922 default: 8923 break; 8924 } 8925 } 8926 8927 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8928 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 8929 pipe_config->limited_color_range = true; 8930 8931 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> 8932 PIPECONF_GAMMA_MODE_SHIFT; 8933 8934 if (IS_CHERRYVIEW(dev_priv)) 8935 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe)); 8936 8937 i9xx_get_pipe_color_config(pipe_config); 8938 intel_color_get_config(pipe_config); 8939 8940 if (INTEL_GEN(dev_priv) < 4) 8941 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 8942 8943 intel_get_pipe_timings(crtc, pipe_config); 8944 intel_get_pipe_src_size(crtc, pipe_config); 8945 8946 i9xx_get_pfit_config(crtc, pipe_config); 8947 8948 if (INTEL_GEN(dev_priv) >= 4) { 8949 /* No way to read it out on pipes B and C */ 8950 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 8951 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 8952 else 8953 tmp = I915_READ(DPLL_MD(crtc->pipe)); 8954 pipe_config->pixel_multiplier = 8955 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 8956 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 8957 pipe_config->dpll_hw_state.dpll_md = tmp; 8958 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8959 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8960 tmp = I915_READ(DPLL(crtc->pipe)); 8961 pipe_config->pixel_multiplier = 8962 ((tmp & SDVO_MULTIPLIER_MASK) 8963 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 8964 } else { 8965 /* Note that on i915G/GM the pixel multiplier is in the sdvo 8966 * port and will be fixed up in the encoder->get_config 8967 * function. */ 8968 pipe_config->pixel_multiplier = 1; 8969 } 8970 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 8971 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 8972 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 8973 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 8974 } else { 8975 /* Mask out read-only status bits. */ 8976 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 8977 DPLL_PORTC_READY_MASK | 8978 DPLL_PORTB_READY_MASK); 8979 } 8980 8981 if (IS_CHERRYVIEW(dev_priv)) 8982 chv_crtc_clock_get(crtc, pipe_config); 8983 else if (IS_VALLEYVIEW(dev_priv)) 8984 vlv_crtc_clock_get(crtc, pipe_config); 8985 else 8986 i9xx_crtc_clock_get(crtc, pipe_config); 8987 8988 /* 8989 * Normally the dotclock is filled in by the encoder .get_config() 8990 * but in case the pipe is enabled w/o any ports we need a sane 8991 * default. 8992 */ 8993 pipe_config->base.adjusted_mode.crtc_clock = 8994 pipe_config->port_clock / pipe_config->pixel_multiplier; 8995 8996 ret = true; 8997 8998 out: 8999 intel_display_power_put(dev_priv, power_domain, wakeref); 9000 9001 return ret; 9002 } 9003 9004 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) 9005 { 9006 struct intel_encoder *encoder; 9007 int i; 9008 u32 val, final; 9009 bool has_lvds = false; 9010 bool has_cpu_edp = false; 9011 bool has_panel = false; 9012 bool has_ck505 = false; 9013 bool can_ssc = false; 9014 bool using_ssc_source = false; 9015 9016 /* We need to take the global config into account */ 9017 for_each_intel_encoder(&dev_priv->drm, encoder) { 9018 switch (encoder->type) { 9019 case INTEL_OUTPUT_LVDS: 9020 has_panel = true; 9021 has_lvds = true; 9022 break; 9023 case INTEL_OUTPUT_EDP: 9024 has_panel = true; 9025 if (encoder->port == PORT_A) 9026 has_cpu_edp = true; 9027 break; 9028 default: 9029 break; 9030 } 9031 } 9032 9033 if (HAS_PCH_IBX(dev_priv)) { 9034 has_ck505 = dev_priv->vbt.display_clock_mode; 9035 can_ssc = has_ck505; 9036 } else { 9037 has_ck505 = false; 9038 can_ssc = true; 9039 } 9040 9041 /* Check if any DPLLs are using the SSC source */ 9042 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 9043 u32 temp = I915_READ(PCH_DPLL(i)); 9044 9045 if (!(temp & DPLL_VCO_ENABLE)) 9046 continue; 9047 9048 if ((temp & PLL_REF_INPUT_MASK) == 9049 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 9050 using_ssc_source = true; 9051 break; 9052 } 9053 } 9054 9055 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 9056 has_panel, has_lvds, has_ck505, using_ssc_source); 9057 9058 /* Ironlake: try to setup display ref clock before DPLL 9059 * enabling. This is only under driver's control after 9060 * PCH B stepping, previous chipset stepping should be 9061 * ignoring this setting. 9062 */ 9063 val = I915_READ(PCH_DREF_CONTROL); 9064 9065 /* As we must carefully and slowly disable/enable each source in turn, 9066 * compute the final state we want first and check if we need to 9067 * make any changes at all. 9068 */ 9069 final = val; 9070 final &= ~DREF_NONSPREAD_SOURCE_MASK; 9071 if (has_ck505) 9072 final |= DREF_NONSPREAD_CK505_ENABLE; 9073 else 9074 final |= DREF_NONSPREAD_SOURCE_ENABLE; 9075 9076 final &= ~DREF_SSC_SOURCE_MASK; 9077 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9078 final &= ~DREF_SSC1_ENABLE; 9079 9080 if (has_panel) { 9081 final |= DREF_SSC_SOURCE_ENABLE; 9082 9083 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9084 final |= DREF_SSC1_ENABLE; 9085 9086 if (has_cpu_edp) { 9087 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9088 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9089 else 9090 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9091 } else 9092 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9093 } else if (using_ssc_source) { 9094 final |= DREF_SSC_SOURCE_ENABLE; 9095 final |= DREF_SSC1_ENABLE; 9096 } 9097 9098 if (final == val) 9099 return; 9100 9101 /* Always enable nonspread source */ 9102 val &= ~DREF_NONSPREAD_SOURCE_MASK; 9103 9104 if (has_ck505) 9105 val |= DREF_NONSPREAD_CK505_ENABLE; 9106 else 9107 val |= DREF_NONSPREAD_SOURCE_ENABLE; 9108 9109 if (has_panel) { 9110 val &= ~DREF_SSC_SOURCE_MASK; 9111 val |= DREF_SSC_SOURCE_ENABLE; 9112 9113 /* SSC must be turned on before enabling the CPU output */ 9114 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9115 DRM_DEBUG_KMS("Using SSC on panel\n"); 9116 val |= DREF_SSC1_ENABLE; 9117 } else 9118 val &= ~DREF_SSC1_ENABLE; 9119 9120 /* Get SSC going before enabling the outputs */ 9121 I915_WRITE(PCH_DREF_CONTROL, val); 9122 POSTING_READ(PCH_DREF_CONTROL); 9123 udelay(200); 9124 9125 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9126 9127 /* Enable CPU source on CPU attached eDP */ 9128 if (has_cpu_edp) { 9129 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9130 DRM_DEBUG_KMS("Using SSC on eDP\n"); 9131 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9132 } else 9133 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9134 } else 9135 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9136 9137 I915_WRITE(PCH_DREF_CONTROL, val); 9138 POSTING_READ(PCH_DREF_CONTROL); 9139 udelay(200); 9140 } else { 9141 DRM_DEBUG_KMS("Disabling CPU source output\n"); 9142 9143 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9144 9145 /* Turn off CPU output */ 9146 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9147 9148 I915_WRITE(PCH_DREF_CONTROL, val); 9149 POSTING_READ(PCH_DREF_CONTROL); 9150 udelay(200); 9151 9152 if (!using_ssc_source) { 9153 DRM_DEBUG_KMS("Disabling SSC source\n"); 9154 9155 /* Turn off the SSC source */ 9156 val &= ~DREF_SSC_SOURCE_MASK; 9157 val |= DREF_SSC_SOURCE_DISABLE; 9158 9159 /* Turn off SSC1 */ 9160 val &= ~DREF_SSC1_ENABLE; 9161 9162 I915_WRITE(PCH_DREF_CONTROL, val); 9163 POSTING_READ(PCH_DREF_CONTROL); 9164 udelay(200); 9165 } 9166 } 9167 9168 BUG_ON(val != final); 9169 } 9170 9171 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 9172 { 9173 u32 tmp; 9174 9175 tmp = I915_READ(SOUTH_CHICKEN2); 9176 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 9177 I915_WRITE(SOUTH_CHICKEN2, tmp); 9178 9179 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 9180 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 9181 DRM_ERROR("FDI mPHY reset assert timeout\n"); 9182 9183 tmp = I915_READ(SOUTH_CHICKEN2); 9184 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 9185 I915_WRITE(SOUTH_CHICKEN2, tmp); 9186 9187 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 9188 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 9189 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 9190 } 9191 9192 /* WaMPhyProgramming:hsw */ 9193 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 9194 { 9195 u32 tmp; 9196 9197 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 9198 tmp &= ~(0xFF << 24); 9199 tmp |= (0x12 << 24); 9200 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 9201 9202 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 9203 tmp |= (1 << 11); 9204 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 9205 9206 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 9207 tmp |= (1 << 11); 9208 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 9209 9210 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 9211 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9212 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 9213 9214 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 9215 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9216 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 9217 9218 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 9219 tmp &= ~(7 << 13); 9220 tmp |= (5 << 13); 9221 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 9222 9223 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 9224 tmp &= ~(7 << 13); 9225 tmp |= (5 << 13); 9226 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 9227 9228 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 9229 tmp &= ~0xFF; 9230 tmp |= 0x1C; 9231 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 9232 9233 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 9234 tmp &= ~0xFF; 9235 tmp |= 0x1C; 9236 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 9237 9238 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 9239 tmp &= ~(0xFF << 16); 9240 tmp |= (0x1C << 16); 9241 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 9242 9243 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 9244 tmp &= ~(0xFF << 16); 9245 tmp |= (0x1C << 16); 9246 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 9247 9248 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 9249 tmp |= (1 << 27); 9250 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 9251 9252 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 9253 tmp |= (1 << 27); 9254 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 9255 9256 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 9257 tmp &= ~(0xF << 28); 9258 tmp |= (4 << 28); 9259 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 9260 9261 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 9262 tmp &= ~(0xF << 28); 9263 tmp |= (4 << 28); 9264 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 9265 } 9266 9267 /* Implements 3 different sequences from BSpec chapter "Display iCLK 9268 * Programming" based on the parameters passed: 9269 * - Sequence to enable CLKOUT_DP 9270 * - Sequence to enable CLKOUT_DP without spread 9271 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 9272 */ 9273 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 9274 bool with_spread, bool with_fdi) 9275 { 9276 u32 reg, tmp; 9277 9278 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 9279 with_spread = true; 9280 if (WARN(HAS_PCH_LPT_LP(dev_priv) && 9281 with_fdi, "LP PCH doesn't have FDI\n")) 9282 with_fdi = false; 9283 9284 mutex_lock(&dev_priv->sb_lock); 9285 9286 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9287 tmp &= ~SBI_SSCCTL_DISABLE; 9288 tmp |= SBI_SSCCTL_PATHALT; 9289 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9290 9291 udelay(24); 9292 9293 if (with_spread) { 9294 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9295 tmp &= ~SBI_SSCCTL_PATHALT; 9296 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9297 9298 if (with_fdi) { 9299 lpt_reset_fdi_mphy(dev_priv); 9300 lpt_program_fdi_mphy(dev_priv); 9301 } 9302 } 9303 9304 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9305 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9306 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9307 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9308 9309 mutex_unlock(&dev_priv->sb_lock); 9310 } 9311 9312 /* Sequence to disable CLKOUT_DP */ 9313 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 9314 { 9315 u32 reg, tmp; 9316 9317 mutex_lock(&dev_priv->sb_lock); 9318 9319 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9320 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9321 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9322 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9323 9324 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9325 if (!(tmp & SBI_SSCCTL_DISABLE)) { 9326 if (!(tmp & SBI_SSCCTL_PATHALT)) { 9327 tmp |= SBI_SSCCTL_PATHALT; 9328 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9329 udelay(32); 9330 } 9331 tmp |= SBI_SSCCTL_DISABLE; 9332 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9333 } 9334 9335 mutex_unlock(&dev_priv->sb_lock); 9336 } 9337 9338 #define BEND_IDX(steps) ((50 + (steps)) / 5) 9339 9340 static const u16 sscdivintphase[] = { 9341 [BEND_IDX( 50)] = 0x3B23, 9342 [BEND_IDX( 45)] = 0x3B23, 9343 [BEND_IDX( 40)] = 0x3C23, 9344 [BEND_IDX( 35)] = 0x3C23, 9345 [BEND_IDX( 30)] = 0x3D23, 9346 [BEND_IDX( 25)] = 0x3D23, 9347 [BEND_IDX( 20)] = 0x3E23, 9348 [BEND_IDX( 15)] = 0x3E23, 9349 [BEND_IDX( 10)] = 0x3F23, 9350 [BEND_IDX( 5)] = 0x3F23, 9351 [BEND_IDX( 0)] = 0x0025, 9352 [BEND_IDX( -5)] = 0x0025, 9353 [BEND_IDX(-10)] = 0x0125, 9354 [BEND_IDX(-15)] = 0x0125, 9355 [BEND_IDX(-20)] = 0x0225, 9356 [BEND_IDX(-25)] = 0x0225, 9357 [BEND_IDX(-30)] = 0x0325, 9358 [BEND_IDX(-35)] = 0x0325, 9359 [BEND_IDX(-40)] = 0x0425, 9360 [BEND_IDX(-45)] = 0x0425, 9361 [BEND_IDX(-50)] = 0x0525, 9362 }; 9363 9364 /* 9365 * Bend CLKOUT_DP 9366 * steps -50 to 50 inclusive, in steps of 5 9367 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 9368 * change in clock period = -(steps / 10) * 5.787 ps 9369 */ 9370 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 9371 { 9372 u32 tmp; 9373 int idx = BEND_IDX(steps); 9374 9375 if (WARN_ON(steps % 5 != 0)) 9376 return; 9377 9378 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 9379 return; 9380 9381 mutex_lock(&dev_priv->sb_lock); 9382 9383 if (steps % 10 != 0) 9384 tmp = 0xAAAAAAAB; 9385 else 9386 tmp = 0x00000000; 9387 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 9388 9389 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 9390 tmp &= 0xffff0000; 9391 tmp |= sscdivintphase[idx]; 9392 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 9393 9394 mutex_unlock(&dev_priv->sb_lock); 9395 } 9396 9397 #undef BEND_IDX 9398 9399 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 9400 { 9401 u32 fuse_strap = I915_READ(FUSE_STRAP); 9402 u32 ctl = I915_READ(SPLL_CTL); 9403 9404 if ((ctl & SPLL_PLL_ENABLE) == 0) 9405 return false; 9406 9407 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 9408 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9409 return true; 9410 9411 if (IS_BROADWELL(dev_priv) && 9412 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 9413 return true; 9414 9415 return false; 9416 } 9417 9418 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 9419 enum intel_dpll_id id) 9420 { 9421 u32 fuse_strap = I915_READ(FUSE_STRAP); 9422 u32 ctl = I915_READ(WRPLL_CTL(id)); 9423 9424 if ((ctl & WRPLL_PLL_ENABLE) == 0) 9425 return false; 9426 9427 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 9428 return true; 9429 9430 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 9431 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 9432 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9433 return true; 9434 9435 return false; 9436 } 9437 9438 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 9439 { 9440 struct intel_encoder *encoder; 9441 bool has_fdi = false; 9442 9443 for_each_intel_encoder(&dev_priv->drm, encoder) { 9444 switch (encoder->type) { 9445 case INTEL_OUTPUT_ANALOG: 9446 has_fdi = true; 9447 break; 9448 default: 9449 break; 9450 } 9451 } 9452 9453 /* 9454 * The BIOS may have decided to use the PCH SSC 9455 * reference so we must not disable it until the 9456 * relevant PLLs have stopped relying on it. We'll 9457 * just leave the PCH SSC reference enabled in case 9458 * any active PLL is using it. It will get disabled 9459 * after runtime suspend if we don't have FDI. 9460 * 9461 * TODO: Move the whole reference clock handling 9462 * to the modeset sequence proper so that we can 9463 * actually enable/disable/reconfigure these things 9464 * safely. To do that we need to introduce a real 9465 * clock hierarchy. That would also allow us to do 9466 * clock bending finally. 9467 */ 9468 dev_priv->pch_ssc_use = 0; 9469 9470 if (spll_uses_pch_ssc(dev_priv)) { 9471 DRM_DEBUG_KMS("SPLL using PCH SSC\n"); 9472 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 9473 } 9474 9475 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 9476 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n"); 9477 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 9478 } 9479 9480 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 9481 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n"); 9482 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 9483 } 9484 9485 if (dev_priv->pch_ssc_use) 9486 return; 9487 9488 if (has_fdi) { 9489 lpt_bend_clkout_dp(dev_priv, 0); 9490 lpt_enable_clkout_dp(dev_priv, true, true); 9491 } else { 9492 lpt_disable_clkout_dp(dev_priv); 9493 } 9494 } 9495 9496 /* 9497 * Initialize reference clocks when the driver loads 9498 */ 9499 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 9500 { 9501 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 9502 ironlake_init_pch_refclk(dev_priv); 9503 else if (HAS_PCH_LPT(dev_priv)) 9504 lpt_init_pch_refclk(dev_priv); 9505 } 9506 9507 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) 9508 { 9509 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9510 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9511 enum pipe pipe = crtc->pipe; 9512 u32 val; 9513 9514 val = 0; 9515 9516 switch (crtc_state->pipe_bpp) { 9517 case 18: 9518 val |= PIPECONF_6BPC; 9519 break; 9520 case 24: 9521 val |= PIPECONF_8BPC; 9522 break; 9523 case 30: 9524 val |= PIPECONF_10BPC; 9525 break; 9526 case 36: 9527 val |= PIPECONF_12BPC; 9528 break; 9529 default: 9530 /* Case prevented by intel_choose_pipe_bpp_dither. */ 9531 BUG(); 9532 } 9533 9534 if (crtc_state->dither) 9535 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9536 9537 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9538 val |= PIPECONF_INTERLACED_ILK; 9539 else 9540 val |= PIPECONF_PROGRESSIVE; 9541 9542 /* 9543 * This would end up with an odd purple hue over 9544 * the entire display. Make sure we don't do it. 9545 */ 9546 WARN_ON(crtc_state->limited_color_range && 9547 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 9548 9549 if (crtc_state->limited_color_range) 9550 val |= PIPECONF_COLOR_RANGE_SELECT; 9551 9552 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 9553 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 9554 9555 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 9556 9557 I915_WRITE(PIPECONF(pipe), val); 9558 POSTING_READ(PIPECONF(pipe)); 9559 } 9560 9561 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) 9562 { 9563 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9564 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9565 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 9566 u32 val = 0; 9567 9568 if (IS_HASWELL(dev_priv) && crtc_state->dither) 9569 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9570 9571 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9572 val |= PIPECONF_INTERLACED_ILK; 9573 else 9574 val |= PIPECONF_PROGRESSIVE; 9575 9576 if (IS_HASWELL(dev_priv) && 9577 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 9578 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 9579 9580 I915_WRITE(PIPECONF(cpu_transcoder), val); 9581 POSTING_READ(PIPECONF(cpu_transcoder)); 9582 } 9583 9584 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 9585 { 9586 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9587 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9588 u32 val = 0; 9589 9590 switch (crtc_state->pipe_bpp) { 9591 case 18: 9592 val |= PIPEMISC_DITHER_6_BPC; 9593 break; 9594 case 24: 9595 val |= PIPEMISC_DITHER_8_BPC; 9596 break; 9597 case 30: 9598 val |= PIPEMISC_DITHER_10_BPC; 9599 break; 9600 case 36: 9601 val |= PIPEMISC_DITHER_12_BPC; 9602 break; 9603 default: 9604 MISSING_CASE(crtc_state->pipe_bpp); 9605 break; 9606 } 9607 9608 if (crtc_state->dither) 9609 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 9610 9611 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 9612 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 9613 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 9614 9615 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 9616 val |= PIPEMISC_YUV420_ENABLE | 9617 PIPEMISC_YUV420_MODE_FULL_BLEND; 9618 9619 if (INTEL_GEN(dev_priv) >= 11 && 9620 (crtc_state->active_planes & ~(icl_hdr_plane_mask() | 9621 BIT(PLANE_CURSOR))) == 0) 9622 val |= PIPEMISC_HDR_MODE_PRECISION; 9623 9624 I915_WRITE(PIPEMISC(crtc->pipe), val); 9625 } 9626 9627 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 9628 { 9629 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9630 u32 tmp; 9631 9632 tmp = I915_READ(PIPEMISC(crtc->pipe)); 9633 9634 switch (tmp & PIPEMISC_DITHER_BPC_MASK) { 9635 case PIPEMISC_DITHER_6_BPC: 9636 return 18; 9637 case PIPEMISC_DITHER_8_BPC: 9638 return 24; 9639 case PIPEMISC_DITHER_10_BPC: 9640 return 30; 9641 case PIPEMISC_DITHER_12_BPC: 9642 return 36; 9643 default: 9644 MISSING_CASE(tmp); 9645 return 0; 9646 } 9647 } 9648 9649 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 9650 { 9651 /* 9652 * Account for spread spectrum to avoid 9653 * oversubscribing the link. Max center spread 9654 * is 2.5%; use 5% for safety's sake. 9655 */ 9656 u32 bps = target_clock * bpp * 21 / 20; 9657 return DIV_ROUND_UP(bps, link_bw * 8); 9658 } 9659 9660 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 9661 { 9662 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 9663 } 9664 9665 static void ironlake_compute_dpll(struct intel_crtc *crtc, 9666 struct intel_crtc_state *crtc_state, 9667 struct dpll *reduced_clock) 9668 { 9669 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9670 u32 dpll, fp, fp2; 9671 int factor; 9672 9673 /* Enable autotuning of the PLL clock (if permissible) */ 9674 factor = 21; 9675 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9676 if ((intel_panel_use_ssc(dev_priv) && 9677 dev_priv->vbt.lvds_ssc_freq == 100000) || 9678 (HAS_PCH_IBX(dev_priv) && 9679 intel_is_dual_link_lvds(dev_priv))) 9680 factor = 25; 9681 } else if (crtc_state->sdvo_tv_clock) { 9682 factor = 20; 9683 } 9684 9685 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 9686 9687 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 9688 fp |= FP_CB_TUNE; 9689 9690 if (reduced_clock) { 9691 fp2 = i9xx_dpll_compute_fp(reduced_clock); 9692 9693 if (reduced_clock->m < factor * reduced_clock->n) 9694 fp2 |= FP_CB_TUNE; 9695 } else { 9696 fp2 = fp; 9697 } 9698 9699 dpll = 0; 9700 9701 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 9702 dpll |= DPLLB_MODE_LVDS; 9703 else 9704 dpll |= DPLLB_MODE_DAC_SERIAL; 9705 9706 dpll |= (crtc_state->pixel_multiplier - 1) 9707 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 9708 9709 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 9710 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 9711 dpll |= DPLL_SDVO_HIGH_SPEED; 9712 9713 if (intel_crtc_has_dp_encoder(crtc_state)) 9714 dpll |= DPLL_SDVO_HIGH_SPEED; 9715 9716 /* 9717 * The high speed IO clock is only really required for 9718 * SDVO/HDMI/DP, but we also enable it for CRT to make it 9719 * possible to share the DPLL between CRT and HDMI. Enabling 9720 * the clock needlessly does no real harm, except use up a 9721 * bit of power potentially. 9722 * 9723 * We'll limit this to IVB with 3 pipes, since it has only two 9724 * DPLLs and so DPLL sharing is the only way to get three pipes 9725 * driving PCH ports at the same time. On SNB we could do this, 9726 * and potentially avoid enabling the second DPLL, but it's not 9727 * clear if it''s a win or loss power wise. No point in doing 9728 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 9729 */ 9730 if (INTEL_NUM_PIPES(dev_priv) == 3 && 9731 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 9732 dpll |= DPLL_SDVO_HIGH_SPEED; 9733 9734 /* compute bitmask from p1 value */ 9735 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 9736 /* also FPA1 */ 9737 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 9738 9739 switch (crtc_state->dpll.p2) { 9740 case 5: 9741 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 9742 break; 9743 case 7: 9744 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 9745 break; 9746 case 10: 9747 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 9748 break; 9749 case 14: 9750 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 9751 break; 9752 } 9753 9754 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 9755 intel_panel_use_ssc(dev_priv)) 9756 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 9757 else 9758 dpll |= PLL_REF_INPUT_DREFCLK; 9759 9760 dpll |= DPLL_VCO_ENABLE; 9761 9762 crtc_state->dpll_hw_state.dpll = dpll; 9763 crtc_state->dpll_hw_state.fp0 = fp; 9764 crtc_state->dpll_hw_state.fp1 = fp2; 9765 } 9766 9767 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 9768 struct intel_crtc_state *crtc_state) 9769 { 9770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9771 struct intel_atomic_state *state = 9772 to_intel_atomic_state(crtc_state->base.state); 9773 const struct intel_limit *limit; 9774 int refclk = 120000; 9775 9776 memset(&crtc_state->dpll_hw_state, 0, 9777 sizeof(crtc_state->dpll_hw_state)); 9778 9779 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 9780 if (!crtc_state->has_pch_encoder) 9781 return 0; 9782 9783 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9784 if (intel_panel_use_ssc(dev_priv)) { 9785 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 9786 dev_priv->vbt.lvds_ssc_freq); 9787 refclk = dev_priv->vbt.lvds_ssc_freq; 9788 } 9789 9790 if (intel_is_dual_link_lvds(dev_priv)) { 9791 if (refclk == 100000) 9792 limit = &intel_limits_ironlake_dual_lvds_100m; 9793 else 9794 limit = &intel_limits_ironlake_dual_lvds; 9795 } else { 9796 if (refclk == 100000) 9797 limit = &intel_limits_ironlake_single_lvds_100m; 9798 else 9799 limit = &intel_limits_ironlake_single_lvds; 9800 } 9801 } else { 9802 limit = &intel_limits_ironlake_dac; 9803 } 9804 9805 if (!crtc_state->clock_set && 9806 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9807 refclk, NULL, &crtc_state->dpll)) { 9808 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 9809 return -EINVAL; 9810 } 9811 9812 ironlake_compute_dpll(crtc, crtc_state, NULL); 9813 9814 if (!intel_reserve_shared_dplls(state, crtc, NULL)) { 9815 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 9816 pipe_name(crtc->pipe)); 9817 return -EINVAL; 9818 } 9819 9820 return 0; 9821 } 9822 9823 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 9824 struct intel_link_m_n *m_n) 9825 { 9826 struct drm_device *dev = crtc->base.dev; 9827 struct drm_i915_private *dev_priv = to_i915(dev); 9828 enum pipe pipe = crtc->pipe; 9829 9830 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 9831 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 9832 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 9833 & ~TU_SIZE_MASK; 9834 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 9835 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 9836 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9837 } 9838 9839 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 9840 enum transcoder transcoder, 9841 struct intel_link_m_n *m_n, 9842 struct intel_link_m_n *m2_n2) 9843 { 9844 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9845 enum pipe pipe = crtc->pipe; 9846 9847 if (INTEL_GEN(dev_priv) >= 5) { 9848 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 9849 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 9850 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 9851 & ~TU_SIZE_MASK; 9852 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 9853 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 9854 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9855 9856 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { 9857 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 9858 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 9859 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 9860 & ~TU_SIZE_MASK; 9861 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 9862 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 9863 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9864 } 9865 } else { 9866 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 9867 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 9868 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 9869 & ~TU_SIZE_MASK; 9870 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 9871 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 9872 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9873 } 9874 } 9875 9876 void intel_dp_get_m_n(struct intel_crtc *crtc, 9877 struct intel_crtc_state *pipe_config) 9878 { 9879 if (pipe_config->has_pch_encoder) 9880 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 9881 else 9882 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9883 &pipe_config->dp_m_n, 9884 &pipe_config->dp_m2_n2); 9885 } 9886 9887 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 9888 struct intel_crtc_state *pipe_config) 9889 { 9890 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9891 &pipe_config->fdi_m_n, NULL); 9892 } 9893 9894 static void skylake_get_pfit_config(struct intel_crtc *crtc, 9895 struct intel_crtc_state *pipe_config) 9896 { 9897 struct drm_device *dev = crtc->base.dev; 9898 struct drm_i915_private *dev_priv = to_i915(dev); 9899 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 9900 u32 ps_ctrl = 0; 9901 int id = -1; 9902 int i; 9903 9904 /* find scaler attached to this pipe */ 9905 for (i = 0; i < crtc->num_scalers; i++) { 9906 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 9907 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 9908 id = i; 9909 pipe_config->pch_pfit.enabled = true; 9910 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 9911 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 9912 scaler_state->scalers[i].in_use = true; 9913 break; 9914 } 9915 } 9916 9917 scaler_state->scaler_id = id; 9918 if (id >= 0) { 9919 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 9920 } else { 9921 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 9922 } 9923 } 9924 9925 static void 9926 skylake_get_initial_plane_config(struct intel_crtc *crtc, 9927 struct intel_initial_plane_config *plane_config) 9928 { 9929 struct drm_device *dev = crtc->base.dev; 9930 struct drm_i915_private *dev_priv = to_i915(dev); 9931 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9932 enum plane_id plane_id = plane->id; 9933 enum pipe pipe; 9934 u32 val, base, offset, stride_mult, tiling, alpha; 9935 int fourcc, pixel_format; 9936 unsigned int aligned_height; 9937 struct drm_framebuffer *fb; 9938 struct intel_framebuffer *intel_fb; 9939 9940 if (!plane->get_hw_state(plane, &pipe)) 9941 return; 9942 9943 WARN_ON(pipe != crtc->pipe); 9944 9945 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9946 if (!intel_fb) { 9947 DRM_DEBUG_KMS("failed to alloc fb\n"); 9948 return; 9949 } 9950 9951 fb = &intel_fb->base; 9952 9953 fb->dev = dev; 9954 9955 val = I915_READ(PLANE_CTL(pipe, plane_id)); 9956 9957 if (INTEL_GEN(dev_priv) >= 11) 9958 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; 9959 else 9960 pixel_format = val & PLANE_CTL_FORMAT_MASK; 9961 9962 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 9963 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id)); 9964 alpha &= PLANE_COLOR_ALPHA_MASK; 9965 } else { 9966 alpha = val & PLANE_CTL_ALPHA_MASK; 9967 } 9968 9969 fourcc = skl_format_to_fourcc(pixel_format, 9970 val & PLANE_CTL_ORDER_RGBX, alpha); 9971 fb->format = drm_format_info(fourcc); 9972 9973 tiling = val & PLANE_CTL_TILED_MASK; 9974 switch (tiling) { 9975 case PLANE_CTL_TILED_LINEAR: 9976 fb->modifier = DRM_FORMAT_MOD_LINEAR; 9977 break; 9978 case PLANE_CTL_TILED_X: 9979 plane_config->tiling = I915_TILING_X; 9980 fb->modifier = I915_FORMAT_MOD_X_TILED; 9981 break; 9982 case PLANE_CTL_TILED_Y: 9983 plane_config->tiling = I915_TILING_Y; 9984 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 9985 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; 9986 else 9987 fb->modifier = I915_FORMAT_MOD_Y_TILED; 9988 break; 9989 case PLANE_CTL_TILED_YF: 9990 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 9991 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 9992 else 9993 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 9994 break; 9995 default: 9996 MISSING_CASE(tiling); 9997 goto error; 9998 } 9999 10000 /* 10001 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 10002 * while i915 HW rotation is clockwise, thats why this swapping. 10003 */ 10004 switch (val & PLANE_CTL_ROTATE_MASK) { 10005 case PLANE_CTL_ROTATE_0: 10006 plane_config->rotation = DRM_MODE_ROTATE_0; 10007 break; 10008 case PLANE_CTL_ROTATE_90: 10009 plane_config->rotation = DRM_MODE_ROTATE_270; 10010 break; 10011 case PLANE_CTL_ROTATE_180: 10012 plane_config->rotation = DRM_MODE_ROTATE_180; 10013 break; 10014 case PLANE_CTL_ROTATE_270: 10015 plane_config->rotation = DRM_MODE_ROTATE_90; 10016 break; 10017 } 10018 10019 if (INTEL_GEN(dev_priv) >= 10 && 10020 val & PLANE_CTL_FLIP_HORIZONTAL) 10021 plane_config->rotation |= DRM_MODE_REFLECT_X; 10022 10023 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; 10024 plane_config->base = base; 10025 10026 offset = I915_READ(PLANE_OFFSET(pipe, plane_id)); 10027 10028 val = I915_READ(PLANE_SIZE(pipe, plane_id)); 10029 fb->height = ((val >> 16) & 0xffff) + 1; 10030 fb->width = ((val >> 0) & 0xffff) + 1; 10031 10032 val = I915_READ(PLANE_STRIDE(pipe, plane_id)); 10033 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); 10034 fb->pitches[0] = (val & 0x3ff) * stride_mult; 10035 10036 aligned_height = intel_fb_align_height(fb, 0, fb->height); 10037 10038 plane_config->size = fb->pitches[0] * aligned_height; 10039 10040 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 10041 crtc->base.name, plane->base.name, fb->width, fb->height, 10042 fb->format->cpp[0] * 8, base, fb->pitches[0], 10043 plane_config->size); 10044 10045 plane_config->fb = intel_fb; 10046 return; 10047 10048 error: 10049 kfree(intel_fb); 10050 } 10051 10052 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 10053 struct intel_crtc_state *pipe_config) 10054 { 10055 struct drm_device *dev = crtc->base.dev; 10056 struct drm_i915_private *dev_priv = to_i915(dev); 10057 u32 tmp; 10058 10059 tmp = I915_READ(PF_CTL(crtc->pipe)); 10060 10061 if (tmp & PF_ENABLE) { 10062 pipe_config->pch_pfit.enabled = true; 10063 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 10064 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 10065 10066 /* We currently do not free assignements of panel fitters on 10067 * ivb/hsw (since we don't use the higher upscaling modes which 10068 * differentiates them) so just WARN about this case for now. */ 10069 if (IS_GEN(dev_priv, 7)) { 10070 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 10071 PF_PIPE_SEL_IVB(crtc->pipe)); 10072 } 10073 } 10074 } 10075 10076 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 10077 struct intel_crtc_state *pipe_config) 10078 { 10079 struct drm_device *dev = crtc->base.dev; 10080 struct drm_i915_private *dev_priv = to_i915(dev); 10081 enum intel_display_power_domain power_domain; 10082 intel_wakeref_t wakeref; 10083 u32 tmp; 10084 bool ret; 10085 10086 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10087 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 10088 if (!wakeref) 10089 return false; 10090 10091 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10092 pipe_config->shared_dpll = NULL; 10093 pipe_config->master_transcoder = INVALID_TRANSCODER; 10094 10095 ret = false; 10096 tmp = I915_READ(PIPECONF(crtc->pipe)); 10097 if (!(tmp & PIPECONF_ENABLE)) 10098 goto out; 10099 10100 switch (tmp & PIPECONF_BPC_MASK) { 10101 case PIPECONF_6BPC: 10102 pipe_config->pipe_bpp = 18; 10103 break; 10104 case PIPECONF_8BPC: 10105 pipe_config->pipe_bpp = 24; 10106 break; 10107 case PIPECONF_10BPC: 10108 pipe_config->pipe_bpp = 30; 10109 break; 10110 case PIPECONF_12BPC: 10111 pipe_config->pipe_bpp = 36; 10112 break; 10113 default: 10114 break; 10115 } 10116 10117 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 10118 pipe_config->limited_color_range = true; 10119 10120 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 10121 case PIPECONF_OUTPUT_COLORSPACE_YUV601: 10122 case PIPECONF_OUTPUT_COLORSPACE_YUV709: 10123 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10124 break; 10125 default: 10126 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10127 break; 10128 } 10129 10130 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> 10131 PIPECONF_GAMMA_MODE_SHIFT; 10132 10133 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 10134 10135 i9xx_get_pipe_color_config(pipe_config); 10136 intel_color_get_config(pipe_config); 10137 10138 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 10139 struct intel_shared_dpll *pll; 10140 enum intel_dpll_id pll_id; 10141 10142 pipe_config->has_pch_encoder = true; 10143 10144 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 10145 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10146 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10147 10148 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10149 10150 if (HAS_PCH_IBX(dev_priv)) { 10151 /* 10152 * The pipe->pch transcoder and pch transcoder->pll 10153 * mapping is fixed. 10154 */ 10155 pll_id = (enum intel_dpll_id) crtc->pipe; 10156 } else { 10157 tmp = I915_READ(PCH_DPLL_SEL); 10158 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 10159 pll_id = DPLL_ID_PCH_PLL_B; 10160 else 10161 pll_id= DPLL_ID_PCH_PLL_A; 10162 } 10163 10164 pipe_config->shared_dpll = 10165 intel_get_shared_dpll_by_id(dev_priv, pll_id); 10166 pll = pipe_config->shared_dpll; 10167 10168 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10169 &pipe_config->dpll_hw_state)); 10170 10171 tmp = pipe_config->dpll_hw_state.dpll; 10172 pipe_config->pixel_multiplier = 10173 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 10174 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 10175 10176 ironlake_pch_clock_get(crtc, pipe_config); 10177 } else { 10178 pipe_config->pixel_multiplier = 1; 10179 } 10180 10181 intel_get_pipe_timings(crtc, pipe_config); 10182 intel_get_pipe_src_size(crtc, pipe_config); 10183 10184 ironlake_get_pfit_config(crtc, pipe_config); 10185 10186 ret = true; 10187 10188 out: 10189 intel_display_power_put(dev_priv, power_domain, wakeref); 10190 10191 return ret; 10192 } 10193 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 10194 struct intel_crtc_state *crtc_state) 10195 { 10196 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10197 struct intel_atomic_state *state = 10198 to_intel_atomic_state(crtc_state->base.state); 10199 10200 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || 10201 INTEL_GEN(dev_priv) >= 11) { 10202 struct intel_encoder *encoder = 10203 intel_get_crtc_new_encoder(state, crtc_state); 10204 10205 if (!intel_reserve_shared_dplls(state, crtc, encoder)) { 10206 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 10207 pipe_name(crtc->pipe)); 10208 return -EINVAL; 10209 } 10210 } 10211 10212 return 0; 10213 } 10214 10215 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, 10216 enum port port, 10217 struct intel_crtc_state *pipe_config) 10218 { 10219 enum intel_dpll_id id; 10220 u32 temp; 10221 10222 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 10223 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 10224 10225 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2)) 10226 return; 10227 10228 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10229 } 10230 10231 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, 10232 enum port port, 10233 struct intel_crtc_state *pipe_config) 10234 { 10235 enum phy phy = intel_port_to_phy(dev_priv, port); 10236 enum icl_port_dpll_id port_dpll_id; 10237 enum intel_dpll_id id; 10238 u32 temp; 10239 10240 if (intel_phy_is_combo(dev_priv, phy)) { 10241 temp = I915_READ(ICL_DPCLKA_CFGCR0) & 10242 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 10243 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 10244 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10245 } else if (intel_phy_is_tc(dev_priv, phy)) { 10246 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; 10247 10248 if (clk_sel == DDI_CLK_SEL_MG) { 10249 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 10250 port)); 10251 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 10252 } else { 10253 WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162); 10254 id = DPLL_ID_ICL_TBTPLL; 10255 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10256 } 10257 } else { 10258 WARN(1, "Invalid port %x\n", port); 10259 return; 10260 } 10261 10262 pipe_config->icl_port_dplls[port_dpll_id].pll = 10263 intel_get_shared_dpll_by_id(dev_priv, id); 10264 10265 icl_set_active_port_dpll(pipe_config, port_dpll_id); 10266 } 10267 10268 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 10269 enum port port, 10270 struct intel_crtc_state *pipe_config) 10271 { 10272 enum intel_dpll_id id; 10273 10274 switch (port) { 10275 case PORT_A: 10276 id = DPLL_ID_SKL_DPLL0; 10277 break; 10278 case PORT_B: 10279 id = DPLL_ID_SKL_DPLL1; 10280 break; 10281 case PORT_C: 10282 id = DPLL_ID_SKL_DPLL2; 10283 break; 10284 default: 10285 DRM_ERROR("Incorrect port type\n"); 10286 return; 10287 } 10288 10289 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10290 } 10291 10292 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 10293 enum port port, 10294 struct intel_crtc_state *pipe_config) 10295 { 10296 enum intel_dpll_id id; 10297 u32 temp; 10298 10299 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 10300 id = temp >> (port * 3 + 1); 10301 10302 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) 10303 return; 10304 10305 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10306 } 10307 10308 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 10309 enum port port, 10310 struct intel_crtc_state *pipe_config) 10311 { 10312 enum intel_dpll_id id; 10313 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 10314 10315 switch (ddi_pll_sel) { 10316 case PORT_CLK_SEL_WRPLL1: 10317 id = DPLL_ID_WRPLL1; 10318 break; 10319 case PORT_CLK_SEL_WRPLL2: 10320 id = DPLL_ID_WRPLL2; 10321 break; 10322 case PORT_CLK_SEL_SPLL: 10323 id = DPLL_ID_SPLL; 10324 break; 10325 case PORT_CLK_SEL_LCPLL_810: 10326 id = DPLL_ID_LCPLL_810; 10327 break; 10328 case PORT_CLK_SEL_LCPLL_1350: 10329 id = DPLL_ID_LCPLL_1350; 10330 break; 10331 case PORT_CLK_SEL_LCPLL_2700: 10332 id = DPLL_ID_LCPLL_2700; 10333 break; 10334 default: 10335 MISSING_CASE(ddi_pll_sel); 10336 /* fall through */ 10337 case PORT_CLK_SEL_NONE: 10338 return; 10339 } 10340 10341 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10342 } 10343 10344 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10345 struct intel_crtc_state *pipe_config, 10346 u64 *power_domain_mask, 10347 intel_wakeref_t *wakerefs) 10348 { 10349 struct drm_device *dev = crtc->base.dev; 10350 struct drm_i915_private *dev_priv = to_i915(dev); 10351 enum intel_display_power_domain power_domain; 10352 unsigned long panel_transcoder_mask = 0; 10353 unsigned long enabled_panel_transcoders = 0; 10354 enum transcoder panel_transcoder; 10355 intel_wakeref_t wf; 10356 u32 tmp; 10357 10358 if (INTEL_GEN(dev_priv) >= 11) 10359 panel_transcoder_mask |= 10360 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 10361 10362 if (HAS_TRANSCODER_EDP(dev_priv)) 10363 panel_transcoder_mask |= BIT(TRANSCODER_EDP); 10364 10365 /* 10366 * The pipe->transcoder mapping is fixed with the exception of the eDP 10367 * and DSI transcoders handled below. 10368 */ 10369 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10370 10371 /* 10372 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10373 * consistency and less surprising code; it's in always on power). 10374 */ 10375 for_each_set_bit(panel_transcoder, 10376 &panel_transcoder_mask, 10377 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) { 10378 bool force_thru = false; 10379 enum pipe trans_pipe; 10380 10381 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); 10382 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 10383 continue; 10384 10385 /* 10386 * Log all enabled ones, only use the first one. 10387 * 10388 * FIXME: This won't work for two separate DSI displays. 10389 */ 10390 enabled_panel_transcoders |= BIT(panel_transcoder); 10391 if (enabled_panel_transcoders != BIT(panel_transcoder)) 10392 continue; 10393 10394 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 10395 default: 10396 WARN(1, "unknown pipe linked to transcoder %s\n", 10397 transcoder_name(panel_transcoder)); 10398 /* fall through */ 10399 case TRANS_DDI_EDP_INPUT_A_ONOFF: 10400 force_thru = true; 10401 /* fall through */ 10402 case TRANS_DDI_EDP_INPUT_A_ON: 10403 trans_pipe = PIPE_A; 10404 break; 10405 case TRANS_DDI_EDP_INPUT_B_ONOFF: 10406 trans_pipe = PIPE_B; 10407 break; 10408 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10409 trans_pipe = PIPE_C; 10410 break; 10411 } 10412 10413 if (trans_pipe == crtc->pipe) { 10414 pipe_config->cpu_transcoder = panel_transcoder; 10415 pipe_config->pch_pfit.force_thru = force_thru; 10416 } 10417 } 10418 10419 /* 10420 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 10421 */ 10422 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 10423 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 10424 10425 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10426 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10427 10428 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10429 if (!wf) 10430 return false; 10431 10432 wakerefs[power_domain] = wf; 10433 *power_domain_mask |= BIT_ULL(power_domain); 10434 10435 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10436 10437 return tmp & PIPECONF_ENABLE; 10438 } 10439 10440 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 10441 struct intel_crtc_state *pipe_config, 10442 u64 *power_domain_mask, 10443 intel_wakeref_t *wakerefs) 10444 { 10445 struct drm_device *dev = crtc->base.dev; 10446 struct drm_i915_private *dev_priv = to_i915(dev); 10447 enum intel_display_power_domain power_domain; 10448 enum transcoder cpu_transcoder; 10449 intel_wakeref_t wf; 10450 enum port port; 10451 u32 tmp; 10452 10453 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 10454 if (port == PORT_A) 10455 cpu_transcoder = TRANSCODER_DSI_A; 10456 else 10457 cpu_transcoder = TRANSCODER_DSI_C; 10458 10459 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10460 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10461 10462 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10463 if (!wf) 10464 continue; 10465 10466 wakerefs[power_domain] = wf; 10467 *power_domain_mask |= BIT_ULL(power_domain); 10468 10469 /* 10470 * The PLL needs to be enabled with a valid divider 10471 * configuration, otherwise accessing DSI registers will hang 10472 * the machine. See BSpec North Display Engine 10473 * registers/MIPI[BXT]. We can break out here early, since we 10474 * need the same DSI PLL to be enabled for both DSI ports. 10475 */ 10476 if (!bxt_dsi_pll_is_enabled(dev_priv)) 10477 break; 10478 10479 /* XXX: this works for video mode only */ 10480 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 10481 if (!(tmp & DPI_ENABLE)) 10482 continue; 10483 10484 tmp = I915_READ(MIPI_CTRL(port)); 10485 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 10486 continue; 10487 10488 pipe_config->cpu_transcoder = cpu_transcoder; 10489 break; 10490 } 10491 10492 return transcoder_is_dsi(pipe_config->cpu_transcoder); 10493 } 10494 10495 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 10496 struct intel_crtc_state *pipe_config) 10497 { 10498 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10499 struct intel_shared_dpll *pll; 10500 enum port port; 10501 u32 tmp; 10502 10503 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 10504 10505 if (INTEL_GEN(dev_priv) >= 12) 10506 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 10507 else 10508 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 10509 10510 if (INTEL_GEN(dev_priv) >= 11) 10511 icelake_get_ddi_pll(dev_priv, port, pipe_config); 10512 else if (IS_CANNONLAKE(dev_priv)) 10513 cannonlake_get_ddi_pll(dev_priv, port, pipe_config); 10514 else if (IS_GEN9_BC(dev_priv)) 10515 skylake_get_ddi_pll(dev_priv, port, pipe_config); 10516 else if (IS_GEN9_LP(dev_priv)) 10517 bxt_get_ddi_pll(dev_priv, port, pipe_config); 10518 else 10519 haswell_get_ddi_pll(dev_priv, port, pipe_config); 10520 10521 pll = pipe_config->shared_dpll; 10522 if (pll) { 10523 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10524 &pipe_config->dpll_hw_state)); 10525 } 10526 10527 /* 10528 * Haswell has only FDI/PCH transcoder A. It is which is connected to 10529 * DDI E. So just check whether this pipe is wired to DDI E and whether 10530 * the PCH transcoder is on. 10531 */ 10532 if (INTEL_GEN(dev_priv) < 9 && 10533 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 10534 pipe_config->has_pch_encoder = true; 10535 10536 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 10537 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10538 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10539 10540 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10541 } 10542 } 10543 10544 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv, 10545 enum transcoder cpu_transcoder) 10546 { 10547 u32 trans_port_sync, master_select; 10548 10549 trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder)); 10550 10551 if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0) 10552 return INVALID_TRANSCODER; 10553 10554 master_select = trans_port_sync & 10555 PORT_SYNC_MODE_MASTER_SELECT_MASK; 10556 if (master_select == 0) 10557 return TRANSCODER_EDP; 10558 else 10559 return master_select - 1; 10560 } 10561 10562 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) 10563 { 10564 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 10565 u32 transcoders; 10566 enum transcoder cpu_transcoder; 10567 10568 crtc_state->master_transcoder = transcoder_master_readout(dev_priv, 10569 crtc_state->cpu_transcoder); 10570 10571 transcoders = BIT(TRANSCODER_A) | 10572 BIT(TRANSCODER_B) | 10573 BIT(TRANSCODER_C) | 10574 BIT(TRANSCODER_D); 10575 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { 10576 enum intel_display_power_domain power_domain; 10577 intel_wakeref_t trans_wakeref; 10578 10579 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10580 trans_wakeref = intel_display_power_get_if_enabled(dev_priv, 10581 power_domain); 10582 10583 if (!trans_wakeref) 10584 continue; 10585 10586 if (transcoder_master_readout(dev_priv, cpu_transcoder) == 10587 crtc_state->cpu_transcoder) 10588 crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder); 10589 10590 intel_display_power_put(dev_priv, power_domain, trans_wakeref); 10591 } 10592 10593 WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER && 10594 crtc_state->sync_mode_slaves_mask); 10595 } 10596 10597 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 10598 struct intel_crtc_state *pipe_config) 10599 { 10600 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10601 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; 10602 enum intel_display_power_domain power_domain; 10603 u64 power_domain_mask; 10604 bool active; 10605 10606 intel_crtc_init_scalers(crtc, pipe_config); 10607 10608 pipe_config->master_transcoder = INVALID_TRANSCODER; 10609 10610 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10611 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10612 if (!wf) 10613 return false; 10614 10615 wakerefs[power_domain] = wf; 10616 power_domain_mask = BIT_ULL(power_domain); 10617 10618 pipe_config->shared_dpll = NULL; 10619 10620 active = hsw_get_transcoder_state(crtc, pipe_config, 10621 &power_domain_mask, wakerefs); 10622 10623 if (IS_GEN9_LP(dev_priv) && 10624 bxt_get_dsi_transcoder_state(crtc, pipe_config, 10625 &power_domain_mask, wakerefs)) { 10626 WARN_ON(active); 10627 active = true; 10628 } 10629 10630 if (!active) 10631 goto out; 10632 10633 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 10634 INTEL_GEN(dev_priv) >= 11) { 10635 haswell_get_ddi_port_state(crtc, pipe_config); 10636 intel_get_pipe_timings(crtc, pipe_config); 10637 } 10638 10639 intel_get_pipe_src_size(crtc, pipe_config); 10640 10641 if (IS_HASWELL(dev_priv)) { 10642 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10643 10644 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 10645 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10646 else 10647 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10648 } else { 10649 pipe_config->output_format = 10650 bdw_get_pipemisc_output_format(crtc); 10651 10652 /* 10653 * Currently there is no interface defined to 10654 * check user preference between RGB/YCBCR444 10655 * or YCBCR420. So the only possible case for 10656 * YCBCR444 usage is driving YCBCR420 output 10657 * with LSPCON, when pipe is configured for 10658 * YCBCR444 output and LSPCON takes care of 10659 * downsampling it. 10660 */ 10661 pipe_config->lspcon_downsampling = 10662 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; 10663 } 10664 10665 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); 10666 10667 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 10668 10669 if (INTEL_GEN(dev_priv) >= 9) { 10670 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe)); 10671 10672 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 10673 pipe_config->gamma_enable = true; 10674 10675 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 10676 pipe_config->csc_enable = true; 10677 } else { 10678 i9xx_get_pipe_color_config(pipe_config); 10679 } 10680 10681 intel_color_get_config(pipe_config); 10682 10683 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 10684 WARN_ON(power_domain_mask & BIT_ULL(power_domain)); 10685 10686 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10687 if (wf) { 10688 wakerefs[power_domain] = wf; 10689 power_domain_mask |= BIT_ULL(power_domain); 10690 10691 if (INTEL_GEN(dev_priv) >= 9) 10692 skylake_get_pfit_config(crtc, pipe_config); 10693 else 10694 ironlake_get_pfit_config(crtc, pipe_config); 10695 } 10696 10697 if (hsw_crtc_supports_ips(crtc)) { 10698 if (IS_HASWELL(dev_priv)) 10699 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE; 10700 else { 10701 /* 10702 * We cannot readout IPS state on broadwell, set to 10703 * true so we can set it to a defined state on first 10704 * commit. 10705 */ 10706 pipe_config->ips_enabled = true; 10707 } 10708 } 10709 10710 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 10711 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 10712 pipe_config->pixel_multiplier = 10713 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 10714 } else { 10715 pipe_config->pixel_multiplier = 1; 10716 } 10717 10718 if (INTEL_GEN(dev_priv) >= 11 && 10719 !transcoder_is_dsi(pipe_config->cpu_transcoder)) 10720 icelake_get_trans_port_sync_config(pipe_config); 10721 10722 out: 10723 for_each_power_domain(power_domain, power_domain_mask) 10724 intel_display_power_put(dev_priv, 10725 power_domain, wakerefs[power_domain]); 10726 10727 return active; 10728 } 10729 10730 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 10731 { 10732 struct drm_i915_private *dev_priv = 10733 to_i915(plane_state->base.plane->dev); 10734 const struct drm_framebuffer *fb = plane_state->base.fb; 10735 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 10736 u32 base; 10737 10738 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 10739 base = obj->phys_handle->busaddr; 10740 else 10741 base = intel_plane_ggtt_offset(plane_state); 10742 10743 return base + plane_state->color_plane[0].offset; 10744 } 10745 10746 static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 10747 { 10748 int x = plane_state->base.dst.x1; 10749 int y = plane_state->base.dst.y1; 10750 u32 pos = 0; 10751 10752 if (x < 0) { 10753 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10754 x = -x; 10755 } 10756 pos |= x << CURSOR_X_SHIFT; 10757 10758 if (y < 0) { 10759 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10760 y = -y; 10761 } 10762 pos |= y << CURSOR_Y_SHIFT; 10763 10764 return pos; 10765 } 10766 10767 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 10768 { 10769 const struct drm_mode_config *config = 10770 &plane_state->base.plane->dev->mode_config; 10771 int width = drm_rect_width(&plane_state->base.dst); 10772 int height = drm_rect_height(&plane_state->base.dst); 10773 10774 return width > 0 && width <= config->cursor_width && 10775 height > 0 && height <= config->cursor_height; 10776 } 10777 10778 static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 10779 { 10780 struct drm_i915_private *dev_priv = 10781 to_i915(plane_state->base.plane->dev); 10782 unsigned int rotation = plane_state->base.rotation; 10783 int src_x, src_y; 10784 u32 offset; 10785 int ret; 10786 10787 ret = intel_plane_compute_gtt(plane_state); 10788 if (ret) 10789 return ret; 10790 10791 if (!plane_state->base.visible) 10792 return 0; 10793 10794 src_x = plane_state->base.src.x1 >> 16; 10795 src_y = plane_state->base.src.y1 >> 16; 10796 10797 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 10798 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 10799 plane_state, 0); 10800 10801 if (src_x != 0 || src_y != 0) { 10802 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n"); 10803 return -EINVAL; 10804 } 10805 10806 /* 10807 * Put the final coordinates back so that the src 10808 * coordinate checks will see the right values. 10809 */ 10810 drm_rect_translate_to(&plane_state->base.src, 10811 src_x << 16, src_y << 16); 10812 10813 /* ILK+ do this automagically in hardware */ 10814 if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { 10815 const struct drm_framebuffer *fb = plane_state->base.fb; 10816 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 10817 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 10818 10819 offset += (src_h * src_w - 1) * fb->format->cpp[0]; 10820 } 10821 10822 plane_state->color_plane[0].offset = offset; 10823 plane_state->color_plane[0].x = src_x; 10824 plane_state->color_plane[0].y = src_y; 10825 10826 return 0; 10827 } 10828 10829 static int intel_check_cursor(struct intel_crtc_state *crtc_state, 10830 struct intel_plane_state *plane_state) 10831 { 10832 const struct drm_framebuffer *fb = plane_state->base.fb; 10833 int ret; 10834 10835 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 10836 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 10837 return -EINVAL; 10838 } 10839 10840 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 10841 &crtc_state->base, 10842 DRM_PLANE_HELPER_NO_SCALING, 10843 DRM_PLANE_HELPER_NO_SCALING, 10844 true, true); 10845 if (ret) 10846 return ret; 10847 10848 /* Use the unclipped src/dst rectangles, which we program to hw */ 10849 plane_state->base.src = drm_plane_state_src(&plane_state->base); 10850 plane_state->base.dst = drm_plane_state_dest(&plane_state->base); 10851 10852 ret = intel_cursor_check_surface(plane_state); 10853 if (ret) 10854 return ret; 10855 10856 if (!plane_state->base.visible) 10857 return 0; 10858 10859 ret = intel_plane_check_src_coordinates(plane_state); 10860 if (ret) 10861 return ret; 10862 10863 return 0; 10864 } 10865 10866 static unsigned int 10867 i845_cursor_max_stride(struct intel_plane *plane, 10868 u32 pixel_format, u64 modifier, 10869 unsigned int rotation) 10870 { 10871 return 2048; 10872 } 10873 10874 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 10875 { 10876 u32 cntl = 0; 10877 10878 if (crtc_state->gamma_enable) 10879 cntl |= CURSOR_GAMMA_ENABLE; 10880 10881 return cntl; 10882 } 10883 10884 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 10885 const struct intel_plane_state *plane_state) 10886 { 10887 return CURSOR_ENABLE | 10888 CURSOR_FORMAT_ARGB | 10889 CURSOR_STRIDE(plane_state->color_plane[0].stride); 10890 } 10891 10892 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 10893 { 10894 int width = drm_rect_width(&plane_state->base.dst); 10895 10896 /* 10897 * 845g/865g are only limited by the width of their cursors, 10898 * the height is arbitrary up to the precision of the register. 10899 */ 10900 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 10901 } 10902 10903 static int i845_check_cursor(struct intel_crtc_state *crtc_state, 10904 struct intel_plane_state *plane_state) 10905 { 10906 const struct drm_framebuffer *fb = plane_state->base.fb; 10907 int ret; 10908 10909 ret = intel_check_cursor(crtc_state, plane_state); 10910 if (ret) 10911 return ret; 10912 10913 /* if we want to turn off the cursor ignore width and height */ 10914 if (!fb) 10915 return 0; 10916 10917 /* Check for which cursor types we support */ 10918 if (!i845_cursor_size_ok(plane_state)) { 10919 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 10920 drm_rect_width(&plane_state->base.dst), 10921 drm_rect_height(&plane_state->base.dst)); 10922 return -EINVAL; 10923 } 10924 10925 WARN_ON(plane_state->base.visible && 10926 plane_state->color_plane[0].stride != fb->pitches[0]); 10927 10928 switch (fb->pitches[0]) { 10929 case 256: 10930 case 512: 10931 case 1024: 10932 case 2048: 10933 break; 10934 default: 10935 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n", 10936 fb->pitches[0]); 10937 return -EINVAL; 10938 } 10939 10940 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 10941 10942 return 0; 10943 } 10944 10945 static void i845_update_cursor(struct intel_plane *plane, 10946 const struct intel_crtc_state *crtc_state, 10947 const struct intel_plane_state *plane_state) 10948 { 10949 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10950 u32 cntl = 0, base = 0, pos = 0, size = 0; 10951 unsigned long irqflags; 10952 10953 if (plane_state && plane_state->base.visible) { 10954 unsigned int width = drm_rect_width(&plane_state->base.dst); 10955 unsigned int height = drm_rect_height(&plane_state->base.dst); 10956 10957 cntl = plane_state->ctl | 10958 i845_cursor_ctl_crtc(crtc_state); 10959 10960 size = (height << 12) | width; 10961 10962 base = intel_cursor_base(plane_state); 10963 pos = intel_cursor_position(plane_state); 10964 } 10965 10966 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 10967 10968 /* On these chipsets we can only modify the base/size/stride 10969 * whilst the cursor is disabled. 10970 */ 10971 if (plane->cursor.base != base || 10972 plane->cursor.size != size || 10973 plane->cursor.cntl != cntl) { 10974 I915_WRITE_FW(CURCNTR(PIPE_A), 0); 10975 I915_WRITE_FW(CURBASE(PIPE_A), base); 10976 I915_WRITE_FW(CURSIZE, size); 10977 I915_WRITE_FW(CURPOS(PIPE_A), pos); 10978 I915_WRITE_FW(CURCNTR(PIPE_A), cntl); 10979 10980 plane->cursor.base = base; 10981 plane->cursor.size = size; 10982 plane->cursor.cntl = cntl; 10983 } else { 10984 I915_WRITE_FW(CURPOS(PIPE_A), pos); 10985 } 10986 10987 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 10988 } 10989 10990 static void i845_disable_cursor(struct intel_plane *plane, 10991 const struct intel_crtc_state *crtc_state) 10992 { 10993 i845_update_cursor(plane, crtc_state, NULL); 10994 } 10995 10996 static bool i845_cursor_get_hw_state(struct intel_plane *plane, 10997 enum pipe *pipe) 10998 { 10999 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11000 enum intel_display_power_domain power_domain; 11001 intel_wakeref_t wakeref; 11002 bool ret; 11003 11004 power_domain = POWER_DOMAIN_PIPE(PIPE_A); 11005 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11006 if (!wakeref) 11007 return false; 11008 11009 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 11010 11011 *pipe = PIPE_A; 11012 11013 intel_display_power_put(dev_priv, power_domain, wakeref); 11014 11015 return ret; 11016 } 11017 11018 static unsigned int 11019 i9xx_cursor_max_stride(struct intel_plane *plane, 11020 u32 pixel_format, u64 modifier, 11021 unsigned int rotation) 11022 { 11023 return plane->base.dev->mode_config.cursor_width * 4; 11024 } 11025 11026 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11027 { 11028 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 11029 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11030 u32 cntl = 0; 11031 11032 if (INTEL_GEN(dev_priv) >= 11) 11033 return cntl; 11034 11035 if (crtc_state->gamma_enable) 11036 cntl = MCURSOR_GAMMA_ENABLE; 11037 11038 if (crtc_state->csc_enable) 11039 cntl |= MCURSOR_PIPE_CSC_ENABLE; 11040 11041 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11042 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 11043 11044 return cntl; 11045 } 11046 11047 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 11048 const struct intel_plane_state *plane_state) 11049 { 11050 struct drm_i915_private *dev_priv = 11051 to_i915(plane_state->base.plane->dev); 11052 u32 cntl = 0; 11053 11054 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 11055 cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 11056 11057 switch (drm_rect_width(&plane_state->base.dst)) { 11058 case 64: 11059 cntl |= MCURSOR_MODE_64_ARGB_AX; 11060 break; 11061 case 128: 11062 cntl |= MCURSOR_MODE_128_ARGB_AX; 11063 break; 11064 case 256: 11065 cntl |= MCURSOR_MODE_256_ARGB_AX; 11066 break; 11067 default: 11068 MISSING_CASE(drm_rect_width(&plane_state->base.dst)); 11069 return 0; 11070 } 11071 11072 if (plane_state->base.rotation & DRM_MODE_ROTATE_180) 11073 cntl |= MCURSOR_ROTATE_180; 11074 11075 return cntl; 11076 } 11077 11078 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 11079 { 11080 struct drm_i915_private *dev_priv = 11081 to_i915(plane_state->base.plane->dev); 11082 int width = drm_rect_width(&plane_state->base.dst); 11083 int height = drm_rect_height(&plane_state->base.dst); 11084 11085 if (!intel_cursor_size_ok(plane_state)) 11086 return false; 11087 11088 /* Cursor width is limited to a few power-of-two sizes */ 11089 switch (width) { 11090 case 256: 11091 case 128: 11092 case 64: 11093 break; 11094 default: 11095 return false; 11096 } 11097 11098 /* 11099 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 11100 * height from 8 lines up to the cursor width, when the 11101 * cursor is not rotated. Everything else requires square 11102 * cursors. 11103 */ 11104 if (HAS_CUR_FBC(dev_priv) && 11105 plane_state->base.rotation & DRM_MODE_ROTATE_0) { 11106 if (height < 8 || height > width) 11107 return false; 11108 } else { 11109 if (height != width) 11110 return false; 11111 } 11112 11113 return true; 11114 } 11115 11116 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 11117 struct intel_plane_state *plane_state) 11118 { 11119 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 11120 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11121 const struct drm_framebuffer *fb = plane_state->base.fb; 11122 enum pipe pipe = plane->pipe; 11123 int ret; 11124 11125 ret = intel_check_cursor(crtc_state, plane_state); 11126 if (ret) 11127 return ret; 11128 11129 /* if we want to turn off the cursor ignore width and height */ 11130 if (!fb) 11131 return 0; 11132 11133 /* Check for which cursor types we support */ 11134 if (!i9xx_cursor_size_ok(plane_state)) { 11135 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 11136 drm_rect_width(&plane_state->base.dst), 11137 drm_rect_height(&plane_state->base.dst)); 11138 return -EINVAL; 11139 } 11140 11141 WARN_ON(plane_state->base.visible && 11142 plane_state->color_plane[0].stride != fb->pitches[0]); 11143 11144 if (fb->pitches[0] != 11145 drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) { 11146 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", 11147 fb->pitches[0], 11148 drm_rect_width(&plane_state->base.dst)); 11149 return -EINVAL; 11150 } 11151 11152 /* 11153 * There's something wrong with the cursor on CHV pipe C. 11154 * If it straddles the left edge of the screen then 11155 * moving it away from the edge or disabling it often 11156 * results in a pipe underrun, and often that can lead to 11157 * dead pipe (constant underrun reported, and it scans 11158 * out just a solid color). To recover from that, the 11159 * display power well must be turned off and on again. 11160 * Refuse the put the cursor into that compromised position. 11161 */ 11162 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 11163 plane_state->base.visible && plane_state->base.dst.x1 < 0) { 11164 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 11165 return -EINVAL; 11166 } 11167 11168 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 11169 11170 return 0; 11171 } 11172 11173 static void i9xx_update_cursor(struct intel_plane *plane, 11174 const struct intel_crtc_state *crtc_state, 11175 const struct intel_plane_state *plane_state) 11176 { 11177 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11178 enum pipe pipe = plane->pipe; 11179 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 11180 unsigned long irqflags; 11181 11182 if (plane_state && plane_state->base.visible) { 11183 unsigned width = drm_rect_width(&plane_state->base.dst); 11184 unsigned height = drm_rect_height(&plane_state->base.dst); 11185 11186 cntl = plane_state->ctl | 11187 i9xx_cursor_ctl_crtc(crtc_state); 11188 11189 if (width != height) 11190 fbc_ctl = CUR_FBC_CTL_EN | (height - 1); 11191 11192 base = intel_cursor_base(plane_state); 11193 pos = intel_cursor_position(plane_state); 11194 } 11195 11196 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11197 11198 /* 11199 * On some platforms writing CURCNTR first will also 11200 * cause CURPOS to be armed by the CURBASE write. 11201 * Without the CURCNTR write the CURPOS write would 11202 * arm itself. Thus we always update CURCNTR before 11203 * CURPOS. 11204 * 11205 * On other platforms CURPOS always requires the 11206 * CURBASE write to arm the update. Additonally 11207 * a write to any of the cursor register will cancel 11208 * an already armed cursor update. Thus leaving out 11209 * the CURBASE write after CURPOS could lead to a 11210 * cursor that doesn't appear to move, or even change 11211 * shape. Thus we always write CURBASE. 11212 * 11213 * The other registers are armed by by the CURBASE write 11214 * except when the plane is getting enabled at which time 11215 * the CURCNTR write arms the update. 11216 */ 11217 11218 if (INTEL_GEN(dev_priv) >= 9) 11219 skl_write_cursor_wm(plane, crtc_state); 11220 11221 if (plane->cursor.base != base || 11222 plane->cursor.size != fbc_ctl || 11223 plane->cursor.cntl != cntl) { 11224 if (HAS_CUR_FBC(dev_priv)) 11225 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); 11226 I915_WRITE_FW(CURCNTR(pipe), cntl); 11227 I915_WRITE_FW(CURPOS(pipe), pos); 11228 I915_WRITE_FW(CURBASE(pipe), base); 11229 11230 plane->cursor.base = base; 11231 plane->cursor.size = fbc_ctl; 11232 plane->cursor.cntl = cntl; 11233 } else { 11234 I915_WRITE_FW(CURPOS(pipe), pos); 11235 I915_WRITE_FW(CURBASE(pipe), base); 11236 } 11237 11238 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11239 } 11240 11241 static void i9xx_disable_cursor(struct intel_plane *plane, 11242 const struct intel_crtc_state *crtc_state) 11243 { 11244 i9xx_update_cursor(plane, crtc_state, NULL); 11245 } 11246 11247 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 11248 enum pipe *pipe) 11249 { 11250 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11251 enum intel_display_power_domain power_domain; 11252 intel_wakeref_t wakeref; 11253 bool ret; 11254 u32 val; 11255 11256 /* 11257 * Not 100% correct for planes that can move between pipes, 11258 * but that's only the case for gen2-3 which don't have any 11259 * display power wells. 11260 */ 11261 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 11262 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11263 if (!wakeref) 11264 return false; 11265 11266 val = I915_READ(CURCNTR(plane->pipe)); 11267 11268 ret = val & MCURSOR_MODE; 11269 11270 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 11271 *pipe = plane->pipe; 11272 else 11273 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 11274 MCURSOR_PIPE_SELECT_SHIFT; 11275 11276 intel_display_power_put(dev_priv, power_domain, wakeref); 11277 11278 return ret; 11279 } 11280 11281 /* VESA 640x480x72Hz mode to set on the pipe */ 11282 static const struct drm_display_mode load_detect_mode = { 11283 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 11284 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 11285 }; 11286 11287 struct drm_framebuffer * 11288 intel_framebuffer_create(struct drm_i915_gem_object *obj, 11289 struct drm_mode_fb_cmd2 *mode_cmd) 11290 { 11291 struct intel_framebuffer *intel_fb; 11292 int ret; 11293 11294 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 11295 if (!intel_fb) 11296 return ERR_PTR(-ENOMEM); 11297 11298 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 11299 if (ret) 11300 goto err; 11301 11302 return &intel_fb->base; 11303 11304 err: 11305 kfree(intel_fb); 11306 return ERR_PTR(ret); 11307 } 11308 11309 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 11310 struct drm_crtc *crtc) 11311 { 11312 struct drm_plane *plane; 11313 struct drm_plane_state *plane_state; 11314 int ret, i; 11315 11316 ret = drm_atomic_add_affected_planes(state, crtc); 11317 if (ret) 11318 return ret; 11319 11320 for_each_new_plane_in_state(state, plane, plane_state, i) { 11321 if (plane_state->crtc != crtc) 11322 continue; 11323 11324 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 11325 if (ret) 11326 return ret; 11327 11328 drm_atomic_set_fb_for_plane(plane_state, NULL); 11329 } 11330 11331 return 0; 11332 } 11333 11334 int intel_get_load_detect_pipe(struct drm_connector *connector, 11335 struct intel_load_detect_pipe *old, 11336 struct drm_modeset_acquire_ctx *ctx) 11337 { 11338 struct intel_crtc *intel_crtc; 11339 struct intel_encoder *intel_encoder = 11340 intel_attached_encoder(connector); 11341 struct drm_crtc *possible_crtc; 11342 struct drm_encoder *encoder = &intel_encoder->base; 11343 struct drm_crtc *crtc = NULL; 11344 struct drm_device *dev = encoder->dev; 11345 struct drm_i915_private *dev_priv = to_i915(dev); 11346 struct drm_mode_config *config = &dev->mode_config; 11347 struct drm_atomic_state *state = NULL, *restore_state = NULL; 11348 struct drm_connector_state *connector_state; 11349 struct intel_crtc_state *crtc_state; 11350 int ret, i = -1; 11351 11352 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11353 connector->base.id, connector->name, 11354 encoder->base.id, encoder->name); 11355 11356 old->restore_state = NULL; 11357 11358 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 11359 11360 /* 11361 * Algorithm gets a little messy: 11362 * 11363 * - if the connector already has an assigned crtc, use it (but make 11364 * sure it's on first) 11365 * 11366 * - try to find the first unused crtc that can drive this connector, 11367 * and use that if we find one 11368 */ 11369 11370 /* See if we already have a CRTC for this connector */ 11371 if (connector->state->crtc) { 11372 crtc = connector->state->crtc; 11373 11374 ret = drm_modeset_lock(&crtc->mutex, ctx); 11375 if (ret) 11376 goto fail; 11377 11378 /* Make sure the crtc and connector are running */ 11379 goto found; 11380 } 11381 11382 /* Find an unused one (if possible) */ 11383 for_each_crtc(dev, possible_crtc) { 11384 i++; 11385 if (!(encoder->possible_crtcs & (1 << i))) 11386 continue; 11387 11388 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 11389 if (ret) 11390 goto fail; 11391 11392 if (possible_crtc->state->enable) { 11393 drm_modeset_unlock(&possible_crtc->mutex); 11394 continue; 11395 } 11396 11397 crtc = possible_crtc; 11398 break; 11399 } 11400 11401 /* 11402 * If we didn't find an unused CRTC, don't use any. 11403 */ 11404 if (!crtc) { 11405 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 11406 ret = -ENODEV; 11407 goto fail; 11408 } 11409 11410 found: 11411 intel_crtc = to_intel_crtc(crtc); 11412 11413 state = drm_atomic_state_alloc(dev); 11414 restore_state = drm_atomic_state_alloc(dev); 11415 if (!state || !restore_state) { 11416 ret = -ENOMEM; 11417 goto fail; 11418 } 11419 11420 state->acquire_ctx = ctx; 11421 restore_state->acquire_ctx = ctx; 11422 11423 connector_state = drm_atomic_get_connector_state(state, connector); 11424 if (IS_ERR(connector_state)) { 11425 ret = PTR_ERR(connector_state); 11426 goto fail; 11427 } 11428 11429 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 11430 if (ret) 11431 goto fail; 11432 11433 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 11434 if (IS_ERR(crtc_state)) { 11435 ret = PTR_ERR(crtc_state); 11436 goto fail; 11437 } 11438 11439 crtc_state->base.active = crtc_state->base.enable = true; 11440 11441 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, 11442 &load_detect_mode); 11443 if (ret) 11444 goto fail; 11445 11446 ret = intel_modeset_disable_planes(state, crtc); 11447 if (ret) 11448 goto fail; 11449 11450 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 11451 if (!ret) 11452 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 11453 if (!ret) 11454 ret = drm_atomic_add_affected_planes(restore_state, crtc); 11455 if (ret) { 11456 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 11457 goto fail; 11458 } 11459 11460 ret = drm_atomic_commit(state); 11461 if (ret) { 11462 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 11463 goto fail; 11464 } 11465 11466 old->restore_state = restore_state; 11467 drm_atomic_state_put(state); 11468 11469 /* let the connector get through one full cycle before testing */ 11470 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 11471 return true; 11472 11473 fail: 11474 if (state) { 11475 drm_atomic_state_put(state); 11476 state = NULL; 11477 } 11478 if (restore_state) { 11479 drm_atomic_state_put(restore_state); 11480 restore_state = NULL; 11481 } 11482 11483 if (ret == -EDEADLK) 11484 return ret; 11485 11486 return false; 11487 } 11488 11489 void intel_release_load_detect_pipe(struct drm_connector *connector, 11490 struct intel_load_detect_pipe *old, 11491 struct drm_modeset_acquire_ctx *ctx) 11492 { 11493 struct intel_encoder *intel_encoder = 11494 intel_attached_encoder(connector); 11495 struct drm_encoder *encoder = &intel_encoder->base; 11496 struct drm_atomic_state *state = old->restore_state; 11497 int ret; 11498 11499 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11500 connector->base.id, connector->name, 11501 encoder->base.id, encoder->name); 11502 11503 if (!state) 11504 return; 11505 11506 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 11507 if (ret) 11508 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 11509 drm_atomic_state_put(state); 11510 } 11511 11512 static int i9xx_pll_refclk(struct drm_device *dev, 11513 const struct intel_crtc_state *pipe_config) 11514 { 11515 struct drm_i915_private *dev_priv = to_i915(dev); 11516 u32 dpll = pipe_config->dpll_hw_state.dpll; 11517 11518 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 11519 return dev_priv->vbt.lvds_ssc_freq; 11520 else if (HAS_PCH_SPLIT(dev_priv)) 11521 return 120000; 11522 else if (!IS_GEN(dev_priv, 2)) 11523 return 96000; 11524 else 11525 return 48000; 11526 } 11527 11528 /* Returns the clock of the currently programmed mode of the given pipe. */ 11529 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 11530 struct intel_crtc_state *pipe_config) 11531 { 11532 struct drm_device *dev = crtc->base.dev; 11533 struct drm_i915_private *dev_priv = to_i915(dev); 11534 enum pipe pipe = crtc->pipe; 11535 u32 dpll = pipe_config->dpll_hw_state.dpll; 11536 u32 fp; 11537 struct dpll clock; 11538 int port_clock; 11539 int refclk = i9xx_pll_refclk(dev, pipe_config); 11540 11541 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 11542 fp = pipe_config->dpll_hw_state.fp0; 11543 else 11544 fp = pipe_config->dpll_hw_state.fp1; 11545 11546 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 11547 if (IS_PINEVIEW(dev_priv)) { 11548 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 11549 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 11550 } else { 11551 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 11552 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 11553 } 11554 11555 if (!IS_GEN(dev_priv, 2)) { 11556 if (IS_PINEVIEW(dev_priv)) 11557 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 11558 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 11559 else 11560 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 11561 DPLL_FPA01_P1_POST_DIV_SHIFT); 11562 11563 switch (dpll & DPLL_MODE_MASK) { 11564 case DPLLB_MODE_DAC_SERIAL: 11565 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 11566 5 : 10; 11567 break; 11568 case DPLLB_MODE_LVDS: 11569 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 11570 7 : 14; 11571 break; 11572 default: 11573 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 11574 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 11575 return; 11576 } 11577 11578 if (IS_PINEVIEW(dev_priv)) 11579 port_clock = pnv_calc_dpll_params(refclk, &clock); 11580 else 11581 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11582 } else { 11583 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); 11584 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 11585 11586 if (is_lvds) { 11587 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 11588 DPLL_FPA01_P1_POST_DIV_SHIFT); 11589 11590 if (lvds & LVDS_CLKB_POWER_UP) 11591 clock.p2 = 7; 11592 else 11593 clock.p2 = 14; 11594 } else { 11595 if (dpll & PLL_P1_DIVIDE_BY_TWO) 11596 clock.p1 = 2; 11597 else { 11598 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 11599 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 11600 } 11601 if (dpll & PLL_P2_DIVIDE_BY_4) 11602 clock.p2 = 4; 11603 else 11604 clock.p2 = 2; 11605 } 11606 11607 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11608 } 11609 11610 /* 11611 * This value includes pixel_multiplier. We will use 11612 * port_clock to compute adjusted_mode.crtc_clock in the 11613 * encoder's get_config() function. 11614 */ 11615 pipe_config->port_clock = port_clock; 11616 } 11617 11618 int intel_dotclock_calculate(int link_freq, 11619 const struct intel_link_m_n *m_n) 11620 { 11621 /* 11622 * The calculation for the data clock is: 11623 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 11624 * But we want to avoid losing precison if possible, so: 11625 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 11626 * 11627 * and the link clock is simpler: 11628 * link_clock = (m * link_clock) / n 11629 */ 11630 11631 if (!m_n->link_n) 11632 return 0; 11633 11634 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 11635 } 11636 11637 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 11638 struct intel_crtc_state *pipe_config) 11639 { 11640 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11641 11642 /* read out port_clock from the DPLL */ 11643 i9xx_crtc_clock_get(crtc, pipe_config); 11644 11645 /* 11646 * In case there is an active pipe without active ports, 11647 * we may need some idea for the dotclock anyway. 11648 * Calculate one based on the FDI configuration. 11649 */ 11650 pipe_config->base.adjusted_mode.crtc_clock = 11651 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 11652 &pipe_config->fdi_m_n); 11653 } 11654 11655 /* Returns the currently programmed mode of the given encoder. */ 11656 struct drm_display_mode * 11657 intel_encoder_current_mode(struct intel_encoder *encoder) 11658 { 11659 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 11660 struct intel_crtc_state *crtc_state; 11661 struct drm_display_mode *mode; 11662 struct intel_crtc *crtc; 11663 enum pipe pipe; 11664 11665 if (!encoder->get_hw_state(encoder, &pipe)) 11666 return NULL; 11667 11668 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 11669 11670 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 11671 if (!mode) 11672 return NULL; 11673 11674 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 11675 if (!crtc_state) { 11676 kfree(mode); 11677 return NULL; 11678 } 11679 11680 crtc_state->base.crtc = &crtc->base; 11681 11682 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { 11683 kfree(crtc_state); 11684 kfree(mode); 11685 return NULL; 11686 } 11687 11688 encoder->get_config(encoder, crtc_state); 11689 11690 intel_mode_from_pipe_config(mode, crtc_state); 11691 11692 kfree(crtc_state); 11693 11694 return mode; 11695 } 11696 11697 static void intel_crtc_destroy(struct drm_crtc *crtc) 11698 { 11699 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11700 11701 drm_crtc_cleanup(crtc); 11702 kfree(intel_crtc); 11703 } 11704 11705 /** 11706 * intel_wm_need_update - Check whether watermarks need updating 11707 * @cur: current plane state 11708 * @new: new plane state 11709 * 11710 * Check current plane state versus the new one to determine whether 11711 * watermarks need to be recalculated. 11712 * 11713 * Returns true or false. 11714 */ 11715 static bool intel_wm_need_update(const struct intel_plane_state *cur, 11716 struct intel_plane_state *new) 11717 { 11718 /* Update watermarks on tiling or size changes. */ 11719 if (new->base.visible != cur->base.visible) 11720 return true; 11721 11722 if (!cur->base.fb || !new->base.fb) 11723 return false; 11724 11725 if (cur->base.fb->modifier != new->base.fb->modifier || 11726 cur->base.rotation != new->base.rotation || 11727 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || 11728 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || 11729 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || 11730 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) 11731 return true; 11732 11733 return false; 11734 } 11735 11736 static bool needs_scaling(const struct intel_plane_state *state) 11737 { 11738 int src_w = drm_rect_width(&state->base.src) >> 16; 11739 int src_h = drm_rect_height(&state->base.src) >> 16; 11740 int dst_w = drm_rect_width(&state->base.dst); 11741 int dst_h = drm_rect_height(&state->base.dst); 11742 11743 return (src_w != dst_w || src_h != dst_h); 11744 } 11745 11746 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 11747 struct intel_crtc_state *crtc_state, 11748 const struct intel_plane_state *old_plane_state, 11749 struct intel_plane_state *plane_state) 11750 { 11751 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 11752 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 11753 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11754 bool mode_changed = needs_modeset(crtc_state); 11755 bool was_crtc_enabled = old_crtc_state->base.active; 11756 bool is_crtc_enabled = crtc_state->base.active; 11757 bool turn_off, turn_on, visible, was_visible; 11758 int ret; 11759 11760 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 11761 ret = skl_update_scaler_plane(crtc_state, plane_state); 11762 if (ret) 11763 return ret; 11764 } 11765 11766 was_visible = old_plane_state->base.visible; 11767 visible = plane_state->base.visible; 11768 11769 if (!was_crtc_enabled && WARN_ON(was_visible)) 11770 was_visible = false; 11771 11772 /* 11773 * Visibility is calculated as if the crtc was on, but 11774 * after scaler setup everything depends on it being off 11775 * when the crtc isn't active. 11776 * 11777 * FIXME this is wrong for watermarks. Watermarks should also 11778 * be computed as if the pipe would be active. Perhaps move 11779 * per-plane wm computation to the .check_plane() hook, and 11780 * only combine the results from all planes in the current place? 11781 */ 11782 if (!is_crtc_enabled) { 11783 plane_state->base.visible = visible = false; 11784 crtc_state->active_planes &= ~BIT(plane->id); 11785 crtc_state->data_rate[plane->id] = 0; 11786 crtc_state->min_cdclk[plane->id] = 0; 11787 } 11788 11789 if (!was_visible && !visible) 11790 return 0; 11791 11792 turn_off = was_visible && (!visible || mode_changed); 11793 turn_on = visible && (!was_visible || mode_changed); 11794 11795 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 11796 crtc->base.base.id, crtc->base.name, 11797 plane->base.base.id, plane->base.name, 11798 was_visible, visible, 11799 turn_off, turn_on, mode_changed); 11800 11801 if (turn_on) { 11802 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11803 crtc_state->update_wm_pre = true; 11804 11805 /* must disable cxsr around plane enable/disable */ 11806 if (plane->id != PLANE_CURSOR) 11807 crtc_state->disable_cxsr = true; 11808 } else if (turn_off) { 11809 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11810 crtc_state->update_wm_post = true; 11811 11812 /* must disable cxsr around plane enable/disable */ 11813 if (plane->id != PLANE_CURSOR) 11814 crtc_state->disable_cxsr = true; 11815 } else if (intel_wm_need_update(old_plane_state, plane_state)) { 11816 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 11817 /* FIXME bollocks */ 11818 crtc_state->update_wm_pre = true; 11819 crtc_state->update_wm_post = true; 11820 } 11821 } 11822 11823 if (visible || was_visible) 11824 crtc_state->fb_bits |= plane->frontbuffer_bit; 11825 11826 /* 11827 * ILK/SNB DVSACNTR/Sprite Enable 11828 * IVB SPR_CTL/Sprite Enable 11829 * "When in Self Refresh Big FIFO mode, a write to enable the 11830 * plane will be internally buffered and delayed while Big FIFO 11831 * mode is exiting." 11832 * 11833 * Which means that enabling the sprite can take an extra frame 11834 * when we start in big FIFO mode (LP1+). Thus we need to drop 11835 * down to LP0 and wait for vblank in order to make sure the 11836 * sprite gets enabled on the next vblank after the register write. 11837 * Doing otherwise would risk enabling the sprite one frame after 11838 * we've already signalled flip completion. We can resume LP1+ 11839 * once the sprite has been enabled. 11840 * 11841 * 11842 * WaCxSRDisabledForSpriteScaling:ivb 11843 * IVB SPR_SCALE/Scaling Enable 11844 * "Low Power watermarks must be disabled for at least one 11845 * frame before enabling sprite scaling, and kept disabled 11846 * until sprite scaling is disabled." 11847 * 11848 * ILK/SNB DVSASCALE/Scaling Enable 11849 * "When in Self Refresh Big FIFO mode, scaling enable will be 11850 * masked off while Big FIFO mode is exiting." 11851 * 11852 * Despite the w/a only being listed for IVB we assume that 11853 * the ILK/SNB note has similar ramifications, hence we apply 11854 * the w/a on all three platforms. 11855 * 11856 * With experimental results seems this is needed also for primary 11857 * plane, not only sprite plane. 11858 */ 11859 if (plane->id != PLANE_CURSOR && 11860 (IS_GEN_RANGE(dev_priv, 5, 6) || 11861 IS_IVYBRIDGE(dev_priv)) && 11862 (turn_on || (!needs_scaling(old_plane_state) && 11863 needs_scaling(plane_state)))) 11864 crtc_state->disable_lp_wm = true; 11865 11866 return 0; 11867 } 11868 11869 static bool encoders_cloneable(const struct intel_encoder *a, 11870 const struct intel_encoder *b) 11871 { 11872 /* masks could be asymmetric, so check both ways */ 11873 return a == b || (a->cloneable & (1 << b->type) && 11874 b->cloneable & (1 << a->type)); 11875 } 11876 11877 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 11878 struct intel_crtc *crtc, 11879 struct intel_encoder *encoder) 11880 { 11881 struct intel_encoder *source_encoder; 11882 struct drm_connector *connector; 11883 struct drm_connector_state *connector_state; 11884 int i; 11885 11886 for_each_new_connector_in_state(state, connector, connector_state, i) { 11887 if (connector_state->crtc != &crtc->base) 11888 continue; 11889 11890 source_encoder = 11891 to_intel_encoder(connector_state->best_encoder); 11892 if (!encoders_cloneable(encoder, source_encoder)) 11893 return false; 11894 } 11895 11896 return true; 11897 } 11898 11899 static int icl_add_linked_planes(struct intel_atomic_state *state) 11900 { 11901 struct intel_plane *plane, *linked; 11902 struct intel_plane_state *plane_state, *linked_plane_state; 11903 int i; 11904 11905 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11906 linked = plane_state->planar_linked_plane; 11907 11908 if (!linked) 11909 continue; 11910 11911 linked_plane_state = intel_atomic_get_plane_state(state, linked); 11912 if (IS_ERR(linked_plane_state)) 11913 return PTR_ERR(linked_plane_state); 11914 11915 WARN_ON(linked_plane_state->planar_linked_plane != plane); 11916 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave); 11917 } 11918 11919 return 0; 11920 } 11921 11922 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 11923 { 11924 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 11925 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11926 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); 11927 struct intel_plane *plane, *linked; 11928 struct intel_plane_state *plane_state; 11929 int i; 11930 11931 if (INTEL_GEN(dev_priv) < 11) 11932 return 0; 11933 11934 /* 11935 * Destroy all old plane links and make the slave plane invisible 11936 * in the crtc_state->active_planes mask. 11937 */ 11938 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11939 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 11940 continue; 11941 11942 plane_state->planar_linked_plane = NULL; 11943 if (plane_state->planar_slave && !plane_state->base.visible) { 11944 crtc_state->active_planes &= ~BIT(plane->id); 11945 crtc_state->update_planes |= BIT(plane->id); 11946 } 11947 11948 plane_state->planar_slave = false; 11949 } 11950 11951 if (!crtc_state->nv12_planes) 11952 return 0; 11953 11954 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11955 struct intel_plane_state *linked_state = NULL; 11956 11957 if (plane->pipe != crtc->pipe || 11958 !(crtc_state->nv12_planes & BIT(plane->id))) 11959 continue; 11960 11961 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 11962 if (!icl_is_nv12_y_plane(linked->id)) 11963 continue; 11964 11965 if (crtc_state->active_planes & BIT(linked->id)) 11966 continue; 11967 11968 linked_state = intel_atomic_get_plane_state(state, linked); 11969 if (IS_ERR(linked_state)) 11970 return PTR_ERR(linked_state); 11971 11972 break; 11973 } 11974 11975 if (!linked_state) { 11976 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n", 11977 hweight8(crtc_state->nv12_planes)); 11978 11979 return -EINVAL; 11980 } 11981 11982 plane_state->planar_linked_plane = linked; 11983 11984 linked_state->planar_slave = true; 11985 linked_state->planar_linked_plane = plane; 11986 crtc_state->active_planes |= BIT(linked->id); 11987 crtc_state->update_planes |= BIT(linked->id); 11988 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); 11989 } 11990 11991 return 0; 11992 } 11993 11994 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 11995 { 11996 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 11997 struct intel_atomic_state *state = 11998 to_intel_atomic_state(new_crtc_state->base.state); 11999 const struct intel_crtc_state *old_crtc_state = 12000 intel_atomic_get_old_crtc_state(state, crtc); 12001 12002 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 12003 } 12004 12005 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state) 12006 { 12007 struct drm_crtc *crtc = crtc_state->base.crtc; 12008 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); 12009 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 12010 struct drm_connector *master_connector, *connector; 12011 struct drm_connector_state *connector_state; 12012 struct drm_connector_list_iter conn_iter; 12013 struct drm_crtc *master_crtc = NULL; 12014 struct drm_crtc_state *master_crtc_state; 12015 struct intel_crtc_state *master_pipe_config; 12016 int i, tile_group_id; 12017 12018 if (INTEL_GEN(dev_priv) < 11) 12019 return 0; 12020 12021 /* 12022 * In case of tiled displays there could be one or more slaves but there is 12023 * only one master. Lets make the CRTC used by the connector corresponding 12024 * to the last horizonal and last vertical tile a master/genlock CRTC. 12025 * All the other CRTCs corresponding to other tiles of the same Tile group 12026 * are the slave CRTCs and hold a pointer to their genlock CRTC. 12027 */ 12028 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 12029 if (connector_state->crtc != crtc) 12030 continue; 12031 if (!connector->has_tile) 12032 continue; 12033 if (crtc_state->base.mode.hdisplay != connector->tile_h_size || 12034 crtc_state->base.mode.vdisplay != connector->tile_v_size) 12035 return 0; 12036 if (connector->tile_h_loc == connector->num_h_tile - 1 && 12037 connector->tile_v_loc == connector->num_v_tile - 1) 12038 continue; 12039 crtc_state->sync_mode_slaves_mask = 0; 12040 tile_group_id = connector->tile_group->id; 12041 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 12042 drm_for_each_connector_iter(master_connector, &conn_iter) { 12043 struct drm_connector_state *master_conn_state = NULL; 12044 12045 if (!master_connector->has_tile) 12046 continue; 12047 if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 || 12048 master_connector->tile_v_loc != master_connector->num_v_tile - 1) 12049 continue; 12050 if (master_connector->tile_group->id != tile_group_id) 12051 continue; 12052 12053 master_conn_state = drm_atomic_get_connector_state(&state->base, 12054 master_connector); 12055 if (IS_ERR(master_conn_state)) { 12056 drm_connector_list_iter_end(&conn_iter); 12057 return PTR_ERR(master_conn_state); 12058 } 12059 if (master_conn_state->crtc) { 12060 master_crtc = master_conn_state->crtc; 12061 break; 12062 } 12063 } 12064 drm_connector_list_iter_end(&conn_iter); 12065 12066 if (!master_crtc) { 12067 DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n", 12068 connector_state->crtc->base.id); 12069 return -EINVAL; 12070 } 12071 12072 master_crtc_state = drm_atomic_get_crtc_state(&state->base, 12073 master_crtc); 12074 if (IS_ERR(master_crtc_state)) 12075 return PTR_ERR(master_crtc_state); 12076 12077 master_pipe_config = to_intel_crtc_state(master_crtc_state); 12078 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder; 12079 master_pipe_config->sync_mode_slaves_mask |= 12080 BIT(crtc_state->cpu_transcoder); 12081 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n", 12082 transcoder_name(crtc_state->master_transcoder), 12083 crtc_state->base.crtc->base.id, 12084 master_pipe_config->sync_mode_slaves_mask); 12085 } 12086 12087 return 0; 12088 } 12089 12090 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 12091 struct intel_crtc *crtc) 12092 { 12093 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12094 struct intel_crtc_state *crtc_state = 12095 intel_atomic_get_new_crtc_state(state, crtc); 12096 bool mode_changed = needs_modeset(crtc_state); 12097 int ret; 12098 12099 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && 12100 mode_changed && !crtc_state->base.active) 12101 crtc_state->update_wm_post = true; 12102 12103 if (mode_changed && crtc_state->base.enable && 12104 dev_priv->display.crtc_compute_clock && 12105 !WARN_ON(crtc_state->shared_dpll)) { 12106 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); 12107 if (ret) 12108 return ret; 12109 } 12110 12111 /* 12112 * May need to update pipe gamma enable bits 12113 * when C8 planes are getting enabled/disabled. 12114 */ 12115 if (c8_planes_changed(crtc_state)) 12116 crtc_state->base.color_mgmt_changed = true; 12117 12118 if (mode_changed || crtc_state->update_pipe || 12119 crtc_state->base.color_mgmt_changed) { 12120 ret = intel_color_check(crtc_state); 12121 if (ret) 12122 return ret; 12123 } 12124 12125 ret = 0; 12126 if (dev_priv->display.compute_pipe_wm) { 12127 ret = dev_priv->display.compute_pipe_wm(crtc_state); 12128 if (ret) { 12129 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 12130 return ret; 12131 } 12132 } 12133 12134 if (dev_priv->display.compute_intermediate_wm) { 12135 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 12136 return 0; 12137 12138 /* 12139 * Calculate 'intermediate' watermarks that satisfy both the 12140 * old state and the new state. We can program these 12141 * immediately. 12142 */ 12143 ret = dev_priv->display.compute_intermediate_wm(crtc_state); 12144 if (ret) { 12145 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 12146 return ret; 12147 } 12148 } 12149 12150 if (INTEL_GEN(dev_priv) >= 9) { 12151 if (mode_changed || crtc_state->update_pipe) 12152 ret = skl_update_scaler_crtc(crtc_state); 12153 if (!ret) 12154 ret = intel_atomic_setup_scalers(dev_priv, crtc, 12155 crtc_state); 12156 } 12157 12158 if (HAS_IPS(dev_priv)) 12159 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state); 12160 12161 return ret; 12162 } 12163 12164 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 12165 { 12166 struct intel_connector *connector; 12167 struct drm_connector_list_iter conn_iter; 12168 12169 drm_connector_list_iter_begin(dev, &conn_iter); 12170 for_each_intel_connector_iter(connector, &conn_iter) { 12171 if (connector->base.state->crtc) 12172 drm_connector_put(&connector->base); 12173 12174 if (connector->base.encoder) { 12175 connector->base.state->best_encoder = 12176 connector->base.encoder; 12177 connector->base.state->crtc = 12178 connector->base.encoder->crtc; 12179 12180 drm_connector_get(&connector->base); 12181 } else { 12182 connector->base.state->best_encoder = NULL; 12183 connector->base.state->crtc = NULL; 12184 } 12185 } 12186 drm_connector_list_iter_end(&conn_iter); 12187 } 12188 12189 static int 12190 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 12191 struct intel_crtc_state *pipe_config) 12192 { 12193 struct drm_connector *connector = conn_state->connector; 12194 const struct drm_display_info *info = &connector->display_info; 12195 int bpp; 12196 12197 switch (conn_state->max_bpc) { 12198 case 6 ... 7: 12199 bpp = 6 * 3; 12200 break; 12201 case 8 ... 9: 12202 bpp = 8 * 3; 12203 break; 12204 case 10 ... 11: 12205 bpp = 10 * 3; 12206 break; 12207 case 12: 12208 bpp = 12 * 3; 12209 break; 12210 default: 12211 return -EINVAL; 12212 } 12213 12214 if (bpp < pipe_config->pipe_bpp) { 12215 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 12216 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 12217 connector->base.id, connector->name, 12218 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc, 12219 pipe_config->pipe_bpp); 12220 12221 pipe_config->pipe_bpp = bpp; 12222 } 12223 12224 return 0; 12225 } 12226 12227 static int 12228 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 12229 struct intel_crtc_state *pipe_config) 12230 { 12231 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12232 struct drm_atomic_state *state = pipe_config->base.state; 12233 struct drm_connector *connector; 12234 struct drm_connector_state *connector_state; 12235 int bpp, i; 12236 12237 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 12238 IS_CHERRYVIEW(dev_priv))) 12239 bpp = 10*3; 12240 else if (INTEL_GEN(dev_priv) >= 5) 12241 bpp = 12*3; 12242 else 12243 bpp = 8*3; 12244 12245 pipe_config->pipe_bpp = bpp; 12246 12247 /* Clamp display bpp to connector max bpp */ 12248 for_each_new_connector_in_state(state, connector, connector_state, i) { 12249 int ret; 12250 12251 if (connector_state->crtc != &crtc->base) 12252 continue; 12253 12254 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 12255 if (ret) 12256 return ret; 12257 } 12258 12259 return 0; 12260 } 12261 12262 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 12263 { 12264 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 12265 "type: 0x%x flags: 0x%x\n", 12266 mode->crtc_clock, 12267 mode->crtc_hdisplay, mode->crtc_hsync_start, 12268 mode->crtc_hsync_end, mode->crtc_htotal, 12269 mode->crtc_vdisplay, mode->crtc_vsync_start, 12270 mode->crtc_vsync_end, mode->crtc_vtotal, 12271 mode->type, mode->flags); 12272 } 12273 12274 static inline void 12275 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 12276 const char *id, unsigned int lane_count, 12277 const struct intel_link_m_n *m_n) 12278 { 12279 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12280 id, lane_count, 12281 m_n->gmch_m, m_n->gmch_n, 12282 m_n->link_m, m_n->link_n, m_n->tu); 12283 } 12284 12285 static void 12286 intel_dump_infoframe(struct drm_i915_private *dev_priv, 12287 const union hdmi_infoframe *frame) 12288 { 12289 if (!drm_debug_enabled(DRM_UT_KMS)) 12290 return; 12291 12292 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 12293 } 12294 12295 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 12296 12297 static const char * const output_type_str[] = { 12298 OUTPUT_TYPE(UNUSED), 12299 OUTPUT_TYPE(ANALOG), 12300 OUTPUT_TYPE(DVO), 12301 OUTPUT_TYPE(SDVO), 12302 OUTPUT_TYPE(LVDS), 12303 OUTPUT_TYPE(TVOUT), 12304 OUTPUT_TYPE(HDMI), 12305 OUTPUT_TYPE(DP), 12306 OUTPUT_TYPE(EDP), 12307 OUTPUT_TYPE(DSI), 12308 OUTPUT_TYPE(DDI), 12309 OUTPUT_TYPE(DP_MST), 12310 }; 12311 12312 #undef OUTPUT_TYPE 12313 12314 static void snprintf_output_types(char *buf, size_t len, 12315 unsigned int output_types) 12316 { 12317 char *str = buf; 12318 int i; 12319 12320 str[0] = '\0'; 12321 12322 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 12323 int r; 12324 12325 if ((output_types & BIT(i)) == 0) 12326 continue; 12327 12328 r = snprintf(str, len, "%s%s", 12329 str != buf ? "," : "", output_type_str[i]); 12330 if (r >= len) 12331 break; 12332 str += r; 12333 len -= r; 12334 12335 output_types &= ~BIT(i); 12336 } 12337 12338 WARN_ON_ONCE(output_types != 0); 12339 } 12340 12341 static const char * const output_format_str[] = { 12342 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", 12343 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 12344 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 12345 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 12346 }; 12347 12348 static const char *output_formats(enum intel_output_format format) 12349 { 12350 if (format >= ARRAY_SIZE(output_format_str)) 12351 format = INTEL_OUTPUT_FORMAT_INVALID; 12352 return output_format_str[format]; 12353 } 12354 12355 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 12356 { 12357 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 12358 const struct drm_framebuffer *fb = plane_state->base.fb; 12359 struct drm_format_name_buf format_name; 12360 12361 if (!fb) { 12362 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 12363 plane->base.base.id, plane->base.name, 12364 yesno(plane_state->base.visible)); 12365 return; 12366 } 12367 12368 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", 12369 plane->base.base.id, plane->base.name, 12370 fb->base.id, fb->width, fb->height, 12371 drm_get_format_name(fb->format->format, &format_name), 12372 yesno(plane_state->base.visible)); 12373 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n", 12374 plane_state->base.rotation, plane_state->scaler_id); 12375 if (plane_state->base.visible) 12376 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 12377 DRM_RECT_FP_ARG(&plane_state->base.src), 12378 DRM_RECT_ARG(&plane_state->base.dst)); 12379 } 12380 12381 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 12382 struct intel_atomic_state *state, 12383 const char *context) 12384 { 12385 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); 12386 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12387 const struct intel_plane_state *plane_state; 12388 struct intel_plane *plane; 12389 char buf[64]; 12390 int i; 12391 12392 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n", 12393 crtc->base.base.id, crtc->base.name, 12394 yesno(pipe_config->base.enable), context); 12395 12396 if (!pipe_config->base.enable) 12397 goto dump_planes; 12398 12399 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 12400 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n", 12401 yesno(pipe_config->base.active), 12402 buf, pipe_config->output_types, 12403 output_formats(pipe_config->output_format)); 12404 12405 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 12406 transcoder_name(pipe_config->cpu_transcoder), 12407 pipe_config->pipe_bpp, pipe_config->dither); 12408 12409 if (pipe_config->has_pch_encoder) 12410 intel_dump_m_n_config(pipe_config, "fdi", 12411 pipe_config->fdi_lanes, 12412 &pipe_config->fdi_m_n); 12413 12414 if (intel_crtc_has_dp_encoder(pipe_config)) { 12415 intel_dump_m_n_config(pipe_config, "dp m_n", 12416 pipe_config->lane_count, &pipe_config->dp_m_n); 12417 if (pipe_config->has_drrs) 12418 intel_dump_m_n_config(pipe_config, "dp m2_n2", 12419 pipe_config->lane_count, 12420 &pipe_config->dp_m2_n2); 12421 } 12422 12423 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 12424 pipe_config->has_audio, pipe_config->has_infoframe, 12425 pipe_config->infoframes.enable); 12426 12427 if (pipe_config->infoframes.enable & 12428 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 12429 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp); 12430 if (pipe_config->infoframes.enable & 12431 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 12432 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 12433 if (pipe_config->infoframes.enable & 12434 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 12435 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 12436 if (pipe_config->infoframes.enable & 12437 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 12438 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 12439 12440 DRM_DEBUG_KMS("requested mode:\n"); 12441 drm_mode_debug_printmodeline(&pipe_config->base.mode); 12442 DRM_DEBUG_KMS("adjusted mode:\n"); 12443 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 12444 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 12445 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 12446 pipe_config->port_clock, 12447 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 12448 pipe_config->pixel_rate); 12449 12450 if (INTEL_GEN(dev_priv) >= 9) 12451 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 12452 crtc->num_scalers, 12453 pipe_config->scaler_state.scaler_users, 12454 pipe_config->scaler_state.scaler_id); 12455 12456 if (HAS_GMCH(dev_priv)) 12457 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 12458 pipe_config->gmch_pfit.control, 12459 pipe_config->gmch_pfit.pgm_ratios, 12460 pipe_config->gmch_pfit.lvds_border_bits); 12461 else 12462 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n", 12463 pipe_config->pch_pfit.pos, 12464 pipe_config->pch_pfit.size, 12465 enableddisabled(pipe_config->pch_pfit.enabled), 12466 yesno(pipe_config->pch_pfit.force_thru)); 12467 12468 DRM_DEBUG_KMS("ips: %i, double wide: %i\n", 12469 pipe_config->ips_enabled, pipe_config->double_wide); 12470 12471 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 12472 12473 if (IS_CHERRYVIEW(dev_priv)) 12474 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 12475 pipe_config->cgm_mode, pipe_config->gamma_mode, 12476 pipe_config->gamma_enable, pipe_config->csc_enable); 12477 else 12478 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 12479 pipe_config->csc_mode, pipe_config->gamma_mode, 12480 pipe_config->gamma_enable, pipe_config->csc_enable); 12481 12482 dump_planes: 12483 if (!state) 12484 return; 12485 12486 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12487 if (plane->pipe == crtc->pipe) 12488 intel_dump_plane_state(plane_state); 12489 } 12490 } 12491 12492 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 12493 { 12494 struct drm_device *dev = state->base.dev; 12495 struct drm_connector *connector; 12496 struct drm_connector_list_iter conn_iter; 12497 unsigned int used_ports = 0; 12498 unsigned int used_mst_ports = 0; 12499 bool ret = true; 12500 12501 /* 12502 * We're going to peek into connector->state, 12503 * hence connection_mutex must be held. 12504 */ 12505 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 12506 12507 /* 12508 * Walk the connector list instead of the encoder 12509 * list to detect the problem on ddi platforms 12510 * where there's just one encoder per digital port. 12511 */ 12512 drm_connector_list_iter_begin(dev, &conn_iter); 12513 drm_for_each_connector_iter(connector, &conn_iter) { 12514 struct drm_connector_state *connector_state; 12515 struct intel_encoder *encoder; 12516 12517 connector_state = 12518 drm_atomic_get_new_connector_state(&state->base, 12519 connector); 12520 if (!connector_state) 12521 connector_state = connector->state; 12522 12523 if (!connector_state->best_encoder) 12524 continue; 12525 12526 encoder = to_intel_encoder(connector_state->best_encoder); 12527 12528 WARN_ON(!connector_state->crtc); 12529 12530 switch (encoder->type) { 12531 unsigned int port_mask; 12532 case INTEL_OUTPUT_DDI: 12533 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 12534 break; 12535 /* else, fall through */ 12536 case INTEL_OUTPUT_DP: 12537 case INTEL_OUTPUT_HDMI: 12538 case INTEL_OUTPUT_EDP: 12539 port_mask = 1 << encoder->port; 12540 12541 /* the same port mustn't appear more than once */ 12542 if (used_ports & port_mask) 12543 ret = false; 12544 12545 used_ports |= port_mask; 12546 break; 12547 case INTEL_OUTPUT_DP_MST: 12548 used_mst_ports |= 12549 1 << encoder->port; 12550 break; 12551 default: 12552 break; 12553 } 12554 } 12555 drm_connector_list_iter_end(&conn_iter); 12556 12557 /* can't mix MST and SST/HDMI on the same port */ 12558 if (used_ports & used_mst_ports) 12559 return false; 12560 12561 return ret; 12562 } 12563 12564 static int 12565 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 12566 { 12567 struct drm_i915_private *dev_priv = 12568 to_i915(crtc_state->base.crtc->dev); 12569 struct intel_crtc_state *saved_state; 12570 12571 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL); 12572 if (!saved_state) 12573 return -ENOMEM; 12574 12575 /* FIXME: before the switch to atomic started, a new pipe_config was 12576 * kzalloc'd. Code that depends on any field being zero should be 12577 * fixed, so that the crtc_state can be safely duplicated. For now, 12578 * only fields that are know to not cause problems are preserved. */ 12579 12580 saved_state->scaler_state = crtc_state->scaler_state; 12581 saved_state->shared_dpll = crtc_state->shared_dpll; 12582 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 12583 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 12584 sizeof(saved_state->icl_port_dplls)); 12585 saved_state->crc_enabled = crtc_state->crc_enabled; 12586 if (IS_G4X(dev_priv) || 12587 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12588 saved_state->wm = crtc_state->wm; 12589 /* 12590 * Save the slave bitmask which gets filled for master crtc state during 12591 * slave atomic check call. 12592 */ 12593 if (is_trans_port_sync_master(crtc_state)) 12594 saved_state->sync_mode_slaves_mask = 12595 crtc_state->sync_mode_slaves_mask; 12596 12597 /* Keep base drm_crtc_state intact, only clear our extended struct */ 12598 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); 12599 memcpy(&crtc_state->base + 1, &saved_state->base + 1, 12600 sizeof(*crtc_state) - sizeof(crtc_state->base)); 12601 12602 kfree(saved_state); 12603 return 0; 12604 } 12605 12606 static int 12607 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) 12608 { 12609 struct drm_crtc *crtc = pipe_config->base.crtc; 12610 struct drm_atomic_state *state = pipe_config->base.state; 12611 struct intel_encoder *encoder; 12612 struct drm_connector *connector; 12613 struct drm_connector_state *connector_state; 12614 int base_bpp, ret; 12615 int i; 12616 bool retry = true; 12617 12618 ret = clear_intel_crtc_state(pipe_config); 12619 if (ret) 12620 return ret; 12621 12622 pipe_config->cpu_transcoder = 12623 (enum transcoder) to_intel_crtc(crtc)->pipe; 12624 12625 /* 12626 * Sanitize sync polarity flags based on requested ones. If neither 12627 * positive or negative polarity is requested, treat this as meaning 12628 * negative polarity. 12629 */ 12630 if (!(pipe_config->base.adjusted_mode.flags & 12631 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 12632 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 12633 12634 if (!(pipe_config->base.adjusted_mode.flags & 12635 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12636 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12637 12638 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12639 pipe_config); 12640 if (ret) 12641 return ret; 12642 12643 base_bpp = pipe_config->pipe_bpp; 12644 12645 /* 12646 * Determine the real pipe dimensions. Note that stereo modes can 12647 * increase the actual pipe size due to the frame doubling and 12648 * insertion of additional space for blanks between the frame. This 12649 * is stored in the crtc timings. We use the requested mode to do this 12650 * computation to clearly distinguish it from the adjusted mode, which 12651 * can be changed by the connectors in the below retry loop. 12652 */ 12653 drm_mode_get_hv_timing(&pipe_config->base.mode, 12654 &pipe_config->pipe_src_w, 12655 &pipe_config->pipe_src_h); 12656 12657 for_each_new_connector_in_state(state, connector, connector_state, i) { 12658 if (connector_state->crtc != crtc) 12659 continue; 12660 12661 encoder = to_intel_encoder(connector_state->best_encoder); 12662 12663 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 12664 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 12665 return -EINVAL; 12666 } 12667 12668 /* 12669 * Determine output_types before calling the .compute_config() 12670 * hooks so that the hooks can use this information safely. 12671 */ 12672 if (encoder->compute_output_type) 12673 pipe_config->output_types |= 12674 BIT(encoder->compute_output_type(encoder, pipe_config, 12675 connector_state)); 12676 else 12677 pipe_config->output_types |= BIT(encoder->type); 12678 } 12679 12680 encoder_retry: 12681 /* Ensure the port clock defaults are reset when retrying. */ 12682 pipe_config->port_clock = 0; 12683 pipe_config->pixel_multiplier = 1; 12684 12685 /* Fill in default crtc timings, allow encoders to overwrite them. */ 12686 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 12687 CRTC_STEREO_DOUBLE); 12688 12689 /* Set the crtc_state defaults for trans_port_sync */ 12690 pipe_config->master_transcoder = INVALID_TRANSCODER; 12691 ret = icl_add_sync_mode_crtcs(pipe_config); 12692 if (ret) { 12693 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n", 12694 ret); 12695 return ret; 12696 } 12697 12698 /* Pass our mode to the connectors and the CRTC to give them a chance to 12699 * adjust it according to limitations or connector properties, and also 12700 * a chance to reject the mode entirely. 12701 */ 12702 for_each_new_connector_in_state(state, connector, connector_state, i) { 12703 if (connector_state->crtc != crtc) 12704 continue; 12705 12706 encoder = to_intel_encoder(connector_state->best_encoder); 12707 ret = encoder->compute_config(encoder, pipe_config, 12708 connector_state); 12709 if (ret < 0) { 12710 if (ret != -EDEADLK) 12711 DRM_DEBUG_KMS("Encoder config failure: %d\n", 12712 ret); 12713 return ret; 12714 } 12715 } 12716 12717 /* Set default port clock if not overwritten by the encoder. Needs to be 12718 * done afterwards in case the encoder adjusts the mode. */ 12719 if (!pipe_config->port_clock) 12720 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 12721 * pipe_config->pixel_multiplier; 12722 12723 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 12724 if (ret == -EDEADLK) 12725 return ret; 12726 if (ret < 0) { 12727 DRM_DEBUG_KMS("CRTC fixup failed\n"); 12728 return ret; 12729 } 12730 12731 if (ret == RETRY) { 12732 if (WARN(!retry, "loop in pipe configuration computation\n")) 12733 return -EINVAL; 12734 12735 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 12736 retry = false; 12737 goto encoder_retry; 12738 } 12739 12740 /* Dithering seems to not pass-through bits correctly when it should, so 12741 * only enable it on 6bpc panels and when its not a compliance 12742 * test requesting 6bpc video pattern. 12743 */ 12744 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 12745 !pipe_config->dither_force_disable; 12746 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 12747 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 12748 12749 return 0; 12750 } 12751 12752 bool intel_fuzzy_clock_check(int clock1, int clock2) 12753 { 12754 int diff; 12755 12756 if (clock1 == clock2) 12757 return true; 12758 12759 if (!clock1 || !clock2) 12760 return false; 12761 12762 diff = abs(clock1 - clock2); 12763 12764 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 12765 return true; 12766 12767 return false; 12768 } 12769 12770 static bool 12771 intel_compare_m_n(unsigned int m, unsigned int n, 12772 unsigned int m2, unsigned int n2, 12773 bool exact) 12774 { 12775 if (m == m2 && n == n2) 12776 return true; 12777 12778 if (exact || !m || !n || !m2 || !n2) 12779 return false; 12780 12781 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 12782 12783 if (n > n2) { 12784 while (n > n2) { 12785 m2 <<= 1; 12786 n2 <<= 1; 12787 } 12788 } else if (n < n2) { 12789 while (n < n2) { 12790 m <<= 1; 12791 n <<= 1; 12792 } 12793 } 12794 12795 if (n != n2) 12796 return false; 12797 12798 return intel_fuzzy_clock_check(m, m2); 12799 } 12800 12801 static bool 12802 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 12803 const struct intel_link_m_n *m2_n2, 12804 bool exact) 12805 { 12806 return m_n->tu == m2_n2->tu && 12807 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 12808 m2_n2->gmch_m, m2_n2->gmch_n, exact) && 12809 intel_compare_m_n(m_n->link_m, m_n->link_n, 12810 m2_n2->link_m, m2_n2->link_n, exact); 12811 } 12812 12813 static bool 12814 intel_compare_infoframe(const union hdmi_infoframe *a, 12815 const union hdmi_infoframe *b) 12816 { 12817 return memcmp(a, b, sizeof(*a)) == 0; 12818 } 12819 12820 static void 12821 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 12822 bool fastset, const char *name, 12823 const union hdmi_infoframe *a, 12824 const union hdmi_infoframe *b) 12825 { 12826 if (fastset) { 12827 if (!drm_debug_enabled(DRM_UT_KMS)) 12828 return; 12829 12830 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name); 12831 DRM_DEBUG_KMS("expected:\n"); 12832 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 12833 DRM_DEBUG_KMS("found:\n"); 12834 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 12835 } else { 12836 DRM_ERROR("mismatch in %s infoframe\n", name); 12837 DRM_ERROR("expected:\n"); 12838 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 12839 DRM_ERROR("found:\n"); 12840 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 12841 } 12842 } 12843 12844 static void __printf(4, 5) 12845 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 12846 const char *name, const char *format, ...) 12847 { 12848 struct va_format vaf; 12849 va_list args; 12850 12851 va_start(args, format); 12852 vaf.fmt = format; 12853 vaf.va = &args; 12854 12855 if (fastset) 12856 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n", 12857 crtc->base.base.id, crtc->base.name, name, &vaf); 12858 else 12859 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n", 12860 crtc->base.base.id, crtc->base.name, name, &vaf); 12861 12862 va_end(args); 12863 } 12864 12865 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 12866 { 12867 if (i915_modparams.fastboot != -1) 12868 return i915_modparams.fastboot; 12869 12870 /* Enable fastboot by default on Skylake and newer */ 12871 if (INTEL_GEN(dev_priv) >= 9) 12872 return true; 12873 12874 /* Enable fastboot by default on VLV and CHV */ 12875 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12876 return true; 12877 12878 /* Disabled by default on all others */ 12879 return false; 12880 } 12881 12882 static bool 12883 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 12884 const struct intel_crtc_state *pipe_config, 12885 bool fastset) 12886 { 12887 struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev); 12888 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); 12889 bool ret = true; 12890 u32 bp_gamma = 0; 12891 bool fixup_inherited = fastset && 12892 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && 12893 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED); 12894 12895 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 12896 DRM_DEBUG_KMS("initial modeset and fastboot not set\n"); 12897 ret = false; 12898 } 12899 12900 #define PIPE_CONF_CHECK_X(name) do { \ 12901 if (current_config->name != pipe_config->name) { \ 12902 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 12903 "(expected 0x%08x, found 0x%08x)", \ 12904 current_config->name, \ 12905 pipe_config->name); \ 12906 ret = false; \ 12907 } \ 12908 } while (0) 12909 12910 #define PIPE_CONF_CHECK_I(name) do { \ 12911 if (current_config->name != pipe_config->name) { \ 12912 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 12913 "(expected %i, found %i)", \ 12914 current_config->name, \ 12915 pipe_config->name); \ 12916 ret = false; \ 12917 } \ 12918 } while (0) 12919 12920 #define PIPE_CONF_CHECK_BOOL(name) do { \ 12921 if (current_config->name != pipe_config->name) { \ 12922 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 12923 "(expected %s, found %s)", \ 12924 yesno(current_config->name), \ 12925 yesno(pipe_config->name)); \ 12926 ret = false; \ 12927 } \ 12928 } while (0) 12929 12930 /* 12931 * Checks state where we only read out the enabling, but not the entire 12932 * state itself (like full infoframes or ELD for audio). These states 12933 * require a full modeset on bootup to fix up. 12934 */ 12935 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 12936 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 12937 PIPE_CONF_CHECK_BOOL(name); \ 12938 } else { \ 12939 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 12940 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 12941 yesno(current_config->name), \ 12942 yesno(pipe_config->name)); \ 12943 ret = false; \ 12944 } \ 12945 } while (0) 12946 12947 #define PIPE_CONF_CHECK_P(name) do { \ 12948 if (current_config->name != pipe_config->name) { \ 12949 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 12950 "(expected %p, found %p)", \ 12951 current_config->name, \ 12952 pipe_config->name); \ 12953 ret = false; \ 12954 } \ 12955 } while (0) 12956 12957 #define PIPE_CONF_CHECK_M_N(name) do { \ 12958 if (!intel_compare_link_m_n(¤t_config->name, \ 12959 &pipe_config->name,\ 12960 !fastset)) { \ 12961 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 12962 "(expected tu %i gmch %i/%i link %i/%i, " \ 12963 "found tu %i, gmch %i/%i link %i/%i)", \ 12964 current_config->name.tu, \ 12965 current_config->name.gmch_m, \ 12966 current_config->name.gmch_n, \ 12967 current_config->name.link_m, \ 12968 current_config->name.link_n, \ 12969 pipe_config->name.tu, \ 12970 pipe_config->name.gmch_m, \ 12971 pipe_config->name.gmch_n, \ 12972 pipe_config->name.link_m, \ 12973 pipe_config->name.link_n); \ 12974 ret = false; \ 12975 } \ 12976 } while (0) 12977 12978 /* This is required for BDW+ where there is only one set of registers for 12979 * switching between high and low RR. 12980 * This macro can be used whenever a comparison has to be made between one 12981 * hw state and multiple sw state variables. 12982 */ 12983 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 12984 if (!intel_compare_link_m_n(¤t_config->name, \ 12985 &pipe_config->name, !fastset) && \ 12986 !intel_compare_link_m_n(¤t_config->alt_name, \ 12987 &pipe_config->name, !fastset)) { \ 12988 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 12989 "(expected tu %i gmch %i/%i link %i/%i, " \ 12990 "or tu %i gmch %i/%i link %i/%i, " \ 12991 "found tu %i, gmch %i/%i link %i/%i)", \ 12992 current_config->name.tu, \ 12993 current_config->name.gmch_m, \ 12994 current_config->name.gmch_n, \ 12995 current_config->name.link_m, \ 12996 current_config->name.link_n, \ 12997 current_config->alt_name.tu, \ 12998 current_config->alt_name.gmch_m, \ 12999 current_config->alt_name.gmch_n, \ 13000 current_config->alt_name.link_m, \ 13001 current_config->alt_name.link_n, \ 13002 pipe_config->name.tu, \ 13003 pipe_config->name.gmch_m, \ 13004 pipe_config->name.gmch_n, \ 13005 pipe_config->name.link_m, \ 13006 pipe_config->name.link_n); \ 13007 ret = false; \ 13008 } \ 13009 } while (0) 13010 13011 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 13012 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 13013 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13014 "(%x) (expected %i, found %i)", \ 13015 (mask), \ 13016 current_config->name & (mask), \ 13017 pipe_config->name & (mask)); \ 13018 ret = false; \ 13019 } \ 13020 } while (0) 13021 13022 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 13023 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 13024 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13025 "(expected %i, found %i)", \ 13026 current_config->name, \ 13027 pipe_config->name); \ 13028 ret = false; \ 13029 } \ 13030 } while (0) 13031 13032 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 13033 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 13034 &pipe_config->infoframes.name)) { \ 13035 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 13036 ¤t_config->infoframes.name, \ 13037 &pipe_config->infoframes.name); \ 13038 ret = false; \ 13039 } \ 13040 } while (0) 13041 13042 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ 13043 if (current_config->name1 != pipe_config->name1) { \ 13044 pipe_config_mismatch(fastset, crtc, __stringify(name1), \ 13045 "(expected %i, found %i, won't compare lut values)", \ 13046 current_config->name1, \ 13047 pipe_config->name1); \ 13048 ret = false;\ 13049 } else { \ 13050 if (!intel_color_lut_equal(current_config->name2, \ 13051 pipe_config->name2, pipe_config->name1, \ 13052 bit_precision)) { \ 13053 pipe_config_mismatch(fastset, crtc, __stringify(name2), \ 13054 "hw_state doesn't match sw_state"); \ 13055 ret = false; \ 13056 } \ 13057 } \ 13058 } while (0) 13059 13060 #define PIPE_CONF_QUIRK(quirk) \ 13061 ((current_config->quirks | pipe_config->quirks) & (quirk)) 13062 13063 PIPE_CONF_CHECK_I(cpu_transcoder); 13064 13065 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 13066 PIPE_CONF_CHECK_I(fdi_lanes); 13067 PIPE_CONF_CHECK_M_N(fdi_m_n); 13068 13069 PIPE_CONF_CHECK_I(lane_count); 13070 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 13071 13072 if (INTEL_GEN(dev_priv) < 8) { 13073 PIPE_CONF_CHECK_M_N(dp_m_n); 13074 13075 if (current_config->has_drrs) 13076 PIPE_CONF_CHECK_M_N(dp_m2_n2); 13077 } else 13078 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 13079 13080 PIPE_CONF_CHECK_X(output_types); 13081 13082 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 13083 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 13084 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 13085 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 13086 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 13087 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 13088 13089 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 13090 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 13091 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 13092 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 13093 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 13094 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 13095 13096 PIPE_CONF_CHECK_I(pixel_multiplier); 13097 PIPE_CONF_CHECK_I(output_format); 13098 PIPE_CONF_CHECK_I(dc3co_exitline); 13099 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 13100 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 13101 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13102 PIPE_CONF_CHECK_BOOL(limited_color_range); 13103 13104 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 13105 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 13106 PIPE_CONF_CHECK_BOOL(has_infoframe); 13107 PIPE_CONF_CHECK_BOOL(fec_enable); 13108 13109 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 13110 13111 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 13112 DRM_MODE_FLAG_INTERLACE); 13113 13114 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 13115 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 13116 DRM_MODE_FLAG_PHSYNC); 13117 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 13118 DRM_MODE_FLAG_NHSYNC); 13119 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 13120 DRM_MODE_FLAG_PVSYNC); 13121 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 13122 DRM_MODE_FLAG_NVSYNC); 13123 } 13124 13125 PIPE_CONF_CHECK_X(gmch_pfit.control); 13126 /* pfit ratios are autocomputed by the hw on gen4+ */ 13127 if (INTEL_GEN(dev_priv) < 4) 13128 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 13129 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 13130 13131 /* 13132 * Changing the EDP transcoder input mux 13133 * (A_ONOFF vs. A_ON) requires a full modeset. 13134 */ 13135 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 13136 13137 if (!fastset) { 13138 PIPE_CONF_CHECK_I(pipe_src_w); 13139 PIPE_CONF_CHECK_I(pipe_src_h); 13140 13141 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 13142 if (current_config->pch_pfit.enabled) { 13143 PIPE_CONF_CHECK_X(pch_pfit.pos); 13144 PIPE_CONF_CHECK_X(pch_pfit.size); 13145 } 13146 13147 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 13148 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 13149 13150 PIPE_CONF_CHECK_X(gamma_mode); 13151 if (IS_CHERRYVIEW(dev_priv)) 13152 PIPE_CONF_CHECK_X(cgm_mode); 13153 else 13154 PIPE_CONF_CHECK_X(csc_mode); 13155 PIPE_CONF_CHECK_BOOL(gamma_enable); 13156 PIPE_CONF_CHECK_BOOL(csc_enable); 13157 13158 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); 13159 if (bp_gamma) 13160 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma); 13161 13162 } 13163 13164 PIPE_CONF_CHECK_BOOL(double_wide); 13165 13166 PIPE_CONF_CHECK_P(shared_dpll); 13167 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 13168 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 13169 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 13170 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 13171 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 13172 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 13173 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 13174 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 13175 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 13176 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 13177 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 13178 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 13179 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 13180 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 13181 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 13182 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 13183 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 13184 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 13185 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 13186 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 13187 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 13188 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 13189 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 13190 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 13191 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 13192 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 13193 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 13194 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 13195 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 13196 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 13197 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 13198 13199 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 13200 PIPE_CONF_CHECK_X(dsi_pll.div); 13201 13202 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 13203 PIPE_CONF_CHECK_I(pipe_bpp); 13204 13205 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 13206 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 13207 13208 PIPE_CONF_CHECK_I(min_voltage_level); 13209 13210 PIPE_CONF_CHECK_X(infoframes.enable); 13211 PIPE_CONF_CHECK_X(infoframes.gcp); 13212 PIPE_CONF_CHECK_INFOFRAME(avi); 13213 PIPE_CONF_CHECK_INFOFRAME(spd); 13214 PIPE_CONF_CHECK_INFOFRAME(hdmi); 13215 PIPE_CONF_CHECK_INFOFRAME(drm); 13216 13217 PIPE_CONF_CHECK_I(sync_mode_slaves_mask); 13218 PIPE_CONF_CHECK_I(master_transcoder); 13219 13220 #undef PIPE_CONF_CHECK_X 13221 #undef PIPE_CONF_CHECK_I 13222 #undef PIPE_CONF_CHECK_BOOL 13223 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 13224 #undef PIPE_CONF_CHECK_P 13225 #undef PIPE_CONF_CHECK_FLAGS 13226 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 13227 #undef PIPE_CONF_CHECK_COLOR_LUT 13228 #undef PIPE_CONF_QUIRK 13229 13230 return ret; 13231 } 13232 13233 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 13234 const struct intel_crtc_state *pipe_config) 13235 { 13236 if (pipe_config->has_pch_encoder) { 13237 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 13238 &pipe_config->fdi_m_n); 13239 int dotclock = pipe_config->base.adjusted_mode.crtc_clock; 13240 13241 /* 13242 * FDI already provided one idea for the dotclock. 13243 * Yell if the encoder disagrees. 13244 */ 13245 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 13246 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 13247 fdi_dotclock, dotclock); 13248 } 13249 } 13250 13251 static void verify_wm_state(struct intel_crtc *crtc, 13252 struct intel_crtc_state *new_crtc_state) 13253 { 13254 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13255 struct skl_hw_state { 13256 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 13257 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 13258 struct skl_ddb_allocation ddb; 13259 struct skl_pipe_wm wm; 13260 } *hw; 13261 struct skl_ddb_allocation *sw_ddb; 13262 struct skl_pipe_wm *sw_wm; 13263 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 13264 const enum pipe pipe = crtc->pipe; 13265 int plane, level, max_level = ilk_wm_max_level(dev_priv); 13266 13267 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active) 13268 return; 13269 13270 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 13271 if (!hw) 13272 return; 13273 13274 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 13275 sw_wm = &new_crtc_state->wm.skl.optimal; 13276 13277 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 13278 13279 skl_ddb_get_hw_state(dev_priv, &hw->ddb); 13280 sw_ddb = &dev_priv->wm.skl_hw.ddb; 13281 13282 if (INTEL_GEN(dev_priv) >= 11 && 13283 hw->ddb.enabled_slices != sw_ddb->enabled_slices) 13284 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", 13285 sw_ddb->enabled_slices, 13286 hw->ddb.enabled_slices); 13287 13288 /* planes */ 13289 for_each_universal_plane(dev_priv, pipe, plane) { 13290 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 13291 13292 hw_plane_wm = &hw->wm.planes[plane]; 13293 sw_plane_wm = &sw_wm->planes[plane]; 13294 13295 /* Watermarks */ 13296 for (level = 0; level <= max_level; level++) { 13297 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 13298 &sw_plane_wm->wm[level])) 13299 continue; 13300 13301 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13302 pipe_name(pipe), plane + 1, level, 13303 sw_plane_wm->wm[level].plane_en, 13304 sw_plane_wm->wm[level].plane_res_b, 13305 sw_plane_wm->wm[level].plane_res_l, 13306 hw_plane_wm->wm[level].plane_en, 13307 hw_plane_wm->wm[level].plane_res_b, 13308 hw_plane_wm->wm[level].plane_res_l); 13309 } 13310 13311 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 13312 &sw_plane_wm->trans_wm)) { 13313 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13314 pipe_name(pipe), plane + 1, 13315 sw_plane_wm->trans_wm.plane_en, 13316 sw_plane_wm->trans_wm.plane_res_b, 13317 sw_plane_wm->trans_wm.plane_res_l, 13318 hw_plane_wm->trans_wm.plane_en, 13319 hw_plane_wm->trans_wm.plane_res_b, 13320 hw_plane_wm->trans_wm.plane_res_l); 13321 } 13322 13323 /* DDB */ 13324 hw_ddb_entry = &hw->ddb_y[plane]; 13325 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; 13326 13327 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 13328 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 13329 pipe_name(pipe), plane + 1, 13330 sw_ddb_entry->start, sw_ddb_entry->end, 13331 hw_ddb_entry->start, hw_ddb_entry->end); 13332 } 13333 } 13334 13335 /* 13336 * cursor 13337 * If the cursor plane isn't active, we may not have updated it's ddb 13338 * allocation. In that case since the ddb allocation will be updated 13339 * once the plane becomes visible, we can skip this check 13340 */ 13341 if (1) { 13342 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 13343 13344 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; 13345 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 13346 13347 /* Watermarks */ 13348 for (level = 0; level <= max_level; level++) { 13349 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 13350 &sw_plane_wm->wm[level])) 13351 continue; 13352 13353 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13354 pipe_name(pipe), level, 13355 sw_plane_wm->wm[level].plane_en, 13356 sw_plane_wm->wm[level].plane_res_b, 13357 sw_plane_wm->wm[level].plane_res_l, 13358 hw_plane_wm->wm[level].plane_en, 13359 hw_plane_wm->wm[level].plane_res_b, 13360 hw_plane_wm->wm[level].plane_res_l); 13361 } 13362 13363 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 13364 &sw_plane_wm->trans_wm)) { 13365 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13366 pipe_name(pipe), 13367 sw_plane_wm->trans_wm.plane_en, 13368 sw_plane_wm->trans_wm.plane_res_b, 13369 sw_plane_wm->trans_wm.plane_res_l, 13370 hw_plane_wm->trans_wm.plane_en, 13371 hw_plane_wm->trans_wm.plane_res_b, 13372 hw_plane_wm->trans_wm.plane_res_l); 13373 } 13374 13375 /* DDB */ 13376 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; 13377 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 13378 13379 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 13380 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 13381 pipe_name(pipe), 13382 sw_ddb_entry->start, sw_ddb_entry->end, 13383 hw_ddb_entry->start, hw_ddb_entry->end); 13384 } 13385 } 13386 13387 kfree(hw); 13388 } 13389 13390 static void 13391 verify_connector_state(struct intel_atomic_state *state, 13392 struct intel_crtc *crtc) 13393 { 13394 struct drm_connector *connector; 13395 struct drm_connector_state *new_conn_state; 13396 int i; 13397 13398 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 13399 struct drm_encoder *encoder = connector->encoder; 13400 struct intel_crtc_state *crtc_state = NULL; 13401 13402 if (new_conn_state->crtc != &crtc->base) 13403 continue; 13404 13405 if (crtc) 13406 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 13407 13408 intel_connector_verify_state(crtc_state, new_conn_state); 13409 13410 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 13411 "connector's atomic encoder doesn't match legacy encoder\n"); 13412 } 13413 } 13414 13415 static void 13416 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 13417 { 13418 struct intel_encoder *encoder; 13419 struct drm_connector *connector; 13420 struct drm_connector_state *old_conn_state, *new_conn_state; 13421 int i; 13422 13423 for_each_intel_encoder(&dev_priv->drm, encoder) { 13424 bool enabled = false, found = false; 13425 enum pipe pipe; 13426 13427 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 13428 encoder->base.base.id, 13429 encoder->base.name); 13430 13431 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 13432 new_conn_state, i) { 13433 if (old_conn_state->best_encoder == &encoder->base) 13434 found = true; 13435 13436 if (new_conn_state->best_encoder != &encoder->base) 13437 continue; 13438 found = enabled = true; 13439 13440 I915_STATE_WARN(new_conn_state->crtc != 13441 encoder->base.crtc, 13442 "connector's crtc doesn't match encoder crtc\n"); 13443 } 13444 13445 if (!found) 13446 continue; 13447 13448 I915_STATE_WARN(!!encoder->base.crtc != enabled, 13449 "encoder's enabled state mismatch " 13450 "(expected %i, found %i)\n", 13451 !!encoder->base.crtc, enabled); 13452 13453 if (!encoder->base.crtc) { 13454 bool active; 13455 13456 active = encoder->get_hw_state(encoder, &pipe); 13457 I915_STATE_WARN(active, 13458 "encoder detached but still enabled on pipe %c.\n", 13459 pipe_name(pipe)); 13460 } 13461 } 13462 } 13463 13464 static void 13465 verify_crtc_state(struct intel_crtc *crtc, 13466 struct intel_crtc_state *old_crtc_state, 13467 struct intel_crtc_state *new_crtc_state) 13468 { 13469 struct drm_device *dev = crtc->base.dev; 13470 struct drm_i915_private *dev_priv = to_i915(dev); 13471 struct intel_encoder *encoder; 13472 struct intel_crtc_state *pipe_config; 13473 struct drm_atomic_state *state; 13474 bool active; 13475 13476 state = old_crtc_state->base.state; 13477 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base); 13478 pipe_config = old_crtc_state; 13479 memset(pipe_config, 0, sizeof(*pipe_config)); 13480 pipe_config->base.crtc = &crtc->base; 13481 pipe_config->base.state = state; 13482 13483 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); 13484 13485 active = dev_priv->display.get_pipe_config(crtc, pipe_config); 13486 13487 /* we keep both pipes enabled on 830 */ 13488 if (IS_I830(dev_priv)) 13489 active = new_crtc_state->base.active; 13490 13491 I915_STATE_WARN(new_crtc_state->base.active != active, 13492 "crtc active state doesn't match with hw state " 13493 "(expected %i, found %i)\n", new_crtc_state->base.active, active); 13494 13495 I915_STATE_WARN(crtc->active != new_crtc_state->base.active, 13496 "transitional active state does not match atomic hw state " 13497 "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active); 13498 13499 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 13500 enum pipe pipe; 13501 13502 active = encoder->get_hw_state(encoder, &pipe); 13503 I915_STATE_WARN(active != new_crtc_state->base.active, 13504 "[ENCODER:%i] active %i with crtc active %i\n", 13505 encoder->base.base.id, active, new_crtc_state->base.active); 13506 13507 I915_STATE_WARN(active && crtc->pipe != pipe, 13508 "Encoder connected to wrong pipe %c\n", 13509 pipe_name(pipe)); 13510 13511 if (active) 13512 encoder->get_config(encoder, pipe_config); 13513 } 13514 13515 intel_crtc_compute_pixel_rate(pipe_config); 13516 13517 if (!new_crtc_state->base.active) 13518 return; 13519 13520 intel_pipe_config_sanity_check(dev_priv, pipe_config); 13521 13522 if (!intel_pipe_config_compare(new_crtc_state, 13523 pipe_config, false)) { 13524 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 13525 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 13526 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 13527 } 13528 } 13529 13530 static void 13531 intel_verify_planes(struct intel_atomic_state *state) 13532 { 13533 struct intel_plane *plane; 13534 const struct intel_plane_state *plane_state; 13535 int i; 13536 13537 for_each_new_intel_plane_in_state(state, plane, 13538 plane_state, i) 13539 assert_plane(plane, plane_state->planar_slave || 13540 plane_state->base.visible); 13541 } 13542 13543 static void 13544 verify_single_dpll_state(struct drm_i915_private *dev_priv, 13545 struct intel_shared_dpll *pll, 13546 struct intel_crtc *crtc, 13547 struct intel_crtc_state *new_crtc_state) 13548 { 13549 struct intel_dpll_hw_state dpll_hw_state; 13550 unsigned int crtc_mask; 13551 bool active; 13552 13553 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 13554 13555 DRM_DEBUG_KMS("%s\n", pll->info->name); 13556 13557 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); 13558 13559 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 13560 I915_STATE_WARN(!pll->on && pll->active_mask, 13561 "pll in active use but not on in sw tracking\n"); 13562 I915_STATE_WARN(pll->on && !pll->active_mask, 13563 "pll is on but not used by any active crtc\n"); 13564 I915_STATE_WARN(pll->on != active, 13565 "pll on state mismatch (expected %i, found %i)\n", 13566 pll->on, active); 13567 } 13568 13569 if (!crtc) { 13570 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 13571 "more active pll users than references: %x vs %x\n", 13572 pll->active_mask, pll->state.crtc_mask); 13573 13574 return; 13575 } 13576 13577 crtc_mask = drm_crtc_mask(&crtc->base); 13578 13579 if (new_crtc_state->base.active) 13580 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 13581 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 13582 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 13583 else 13584 I915_STATE_WARN(pll->active_mask & crtc_mask, 13585 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 13586 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 13587 13588 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 13589 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 13590 crtc_mask, pll->state.crtc_mask); 13591 13592 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 13593 &dpll_hw_state, 13594 sizeof(dpll_hw_state)), 13595 "pll hw state mismatch\n"); 13596 } 13597 13598 static void 13599 verify_shared_dpll_state(struct intel_crtc *crtc, 13600 struct intel_crtc_state *old_crtc_state, 13601 struct intel_crtc_state *new_crtc_state) 13602 { 13603 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13604 13605 if (new_crtc_state->shared_dpll) 13606 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 13607 13608 if (old_crtc_state->shared_dpll && 13609 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 13610 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 13611 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 13612 13613 I915_STATE_WARN(pll->active_mask & crtc_mask, 13614 "pll active mismatch (didn't expect pipe %c in active mask)\n", 13615 pipe_name(drm_crtc_index(&crtc->base))); 13616 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 13617 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 13618 pipe_name(drm_crtc_index(&crtc->base))); 13619 } 13620 } 13621 13622 static void 13623 intel_modeset_verify_crtc(struct intel_crtc *crtc, 13624 struct intel_atomic_state *state, 13625 struct intel_crtc_state *old_crtc_state, 13626 struct intel_crtc_state *new_crtc_state) 13627 { 13628 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 13629 return; 13630 13631 verify_wm_state(crtc, new_crtc_state); 13632 verify_connector_state(state, crtc); 13633 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 13634 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 13635 } 13636 13637 static void 13638 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 13639 { 13640 int i; 13641 13642 for (i = 0; i < dev_priv->num_shared_dpll; i++) 13643 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 13644 } 13645 13646 static void 13647 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 13648 struct intel_atomic_state *state) 13649 { 13650 verify_encoder_state(dev_priv, state); 13651 verify_connector_state(state, NULL); 13652 verify_disabled_dpll_state(dev_priv); 13653 } 13654 13655 static void 13656 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 13657 { 13658 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 13659 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13660 const struct drm_display_mode *adjusted_mode = 13661 &crtc_state->base.adjusted_mode; 13662 13663 drm_calc_timestamping_constants(&crtc->base, adjusted_mode); 13664 13665 /* 13666 * The scanline counter increments at the leading edge of hsync. 13667 * 13668 * On most platforms it starts counting from vtotal-1 on the 13669 * first active line. That means the scanline counter value is 13670 * always one less than what we would expect. Ie. just after 13671 * start of vblank, which also occurs at start of hsync (on the 13672 * last active line), the scanline counter will read vblank_start-1. 13673 * 13674 * On gen2 the scanline counter starts counting from 1 instead 13675 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 13676 * to keep the value positive), instead of adding one. 13677 * 13678 * On HSW+ the behaviour of the scanline counter depends on the output 13679 * type. For DP ports it behaves like most other platforms, but on HDMI 13680 * there's an extra 1 line difference. So we need to add two instead of 13681 * one to the value. 13682 * 13683 * On VLV/CHV DSI the scanline counter would appear to increment 13684 * approx. 1/3 of a scanline before start of vblank. Unfortunately 13685 * that means we can't tell whether we're in vblank or not while 13686 * we're on that particular line. We must still set scanline_offset 13687 * to 1 so that the vblank timestamps come out correct when we query 13688 * the scanline counter from within the vblank interrupt handler. 13689 * However if queried just before the start of vblank we'll get an 13690 * answer that's slightly in the future. 13691 */ 13692 if (IS_GEN(dev_priv, 2)) { 13693 int vtotal; 13694 13695 vtotal = adjusted_mode->crtc_vtotal; 13696 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 13697 vtotal /= 2; 13698 13699 crtc->scanline_offset = vtotal - 1; 13700 } else if (HAS_DDI(dev_priv) && 13701 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 13702 crtc->scanline_offset = 2; 13703 } else { 13704 crtc->scanline_offset = 1; 13705 } 13706 } 13707 13708 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 13709 { 13710 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13711 struct intel_crtc_state *new_crtc_state; 13712 struct intel_crtc *crtc; 13713 int i; 13714 13715 if (!dev_priv->display.crtc_compute_clock) 13716 return; 13717 13718 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13719 if (!needs_modeset(new_crtc_state)) 13720 continue; 13721 13722 intel_release_shared_dplls(state, crtc); 13723 } 13724 } 13725 13726 /* 13727 * This implements the workaround described in the "notes" section of the mode 13728 * set sequence documentation. When going from no pipes or single pipe to 13729 * multiple pipes, and planes are enabled after the pipe, we need to wait at 13730 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 13731 */ 13732 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) 13733 { 13734 struct intel_crtc_state *crtc_state; 13735 struct intel_crtc *crtc; 13736 struct intel_crtc_state *first_crtc_state = NULL; 13737 struct intel_crtc_state *other_crtc_state = NULL; 13738 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 13739 int i; 13740 13741 /* look at all crtc's that are going to be enabled in during modeset */ 13742 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 13743 if (!crtc_state->base.active || 13744 !needs_modeset(crtc_state)) 13745 continue; 13746 13747 if (first_crtc_state) { 13748 other_crtc_state = crtc_state; 13749 break; 13750 } else { 13751 first_crtc_state = crtc_state; 13752 first_pipe = crtc->pipe; 13753 } 13754 } 13755 13756 /* No workaround needed? */ 13757 if (!first_crtc_state) 13758 return 0; 13759 13760 /* w/a possibly needed, check how many crtc's are already enabled. */ 13761 for_each_intel_crtc(state->base.dev, crtc) { 13762 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 13763 if (IS_ERR(crtc_state)) 13764 return PTR_ERR(crtc_state); 13765 13766 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 13767 13768 if (!crtc_state->base.active || 13769 needs_modeset(crtc_state)) 13770 continue; 13771 13772 /* 2 or more enabled crtcs means no need for w/a */ 13773 if (enabled_pipe != INVALID_PIPE) 13774 return 0; 13775 13776 enabled_pipe = crtc->pipe; 13777 } 13778 13779 if (enabled_pipe != INVALID_PIPE) 13780 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 13781 else if (other_crtc_state) 13782 other_crtc_state->hsw_workaround_pipe = first_pipe; 13783 13784 return 0; 13785 } 13786 13787 static int intel_modeset_checks(struct intel_atomic_state *state) 13788 { 13789 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13790 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13791 struct intel_crtc *crtc; 13792 int ret, i; 13793 13794 /* keep the current setting */ 13795 if (!state->cdclk.force_min_cdclk_changed) 13796 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; 13797 13798 state->modeset = true; 13799 state->active_pipes = dev_priv->active_pipes; 13800 state->cdclk.logical = dev_priv->cdclk.logical; 13801 state->cdclk.actual = dev_priv->cdclk.actual; 13802 13803 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13804 new_crtc_state, i) { 13805 if (new_crtc_state->base.active) 13806 state->active_pipes |= BIT(crtc->pipe); 13807 else 13808 state->active_pipes &= ~BIT(crtc->pipe); 13809 13810 if (old_crtc_state->base.active != new_crtc_state->base.active) 13811 state->active_pipe_changes |= BIT(crtc->pipe); 13812 } 13813 13814 if (state->active_pipe_changes) { 13815 ret = intel_atomic_lock_global_state(state); 13816 if (ret) 13817 return ret; 13818 } 13819 13820 ret = intel_modeset_calc_cdclk(state); 13821 if (ret) 13822 return ret; 13823 13824 intel_modeset_clear_plls(state); 13825 13826 if (IS_HASWELL(dev_priv)) 13827 return haswell_mode_set_planes_workaround(state); 13828 13829 return 0; 13830 } 13831 13832 /* 13833 * Handle calculation of various watermark data at the end of the atomic check 13834 * phase. The code here should be run after the per-crtc and per-plane 'check' 13835 * handlers to ensure that all derived state has been updated. 13836 */ 13837 static int calc_watermark_data(struct intel_atomic_state *state) 13838 { 13839 struct drm_device *dev = state->base.dev; 13840 struct drm_i915_private *dev_priv = to_i915(dev); 13841 13842 /* Is there platform-specific watermark information to calculate? */ 13843 if (dev_priv->display.compute_global_watermarks) 13844 return dev_priv->display.compute_global_watermarks(state); 13845 13846 return 0; 13847 } 13848 13849 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 13850 struct intel_crtc_state *new_crtc_state) 13851 { 13852 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 13853 return; 13854 13855 new_crtc_state->base.mode_changed = false; 13856 new_crtc_state->update_pipe = true; 13857 13858 /* 13859 * If we're not doing the full modeset we want to 13860 * keep the current M/N values as they may be 13861 * sufficiently different to the computed values 13862 * to cause problems. 13863 * 13864 * FIXME: should really copy more fuzzy state here 13865 */ 13866 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 13867 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 13868 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 13869 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 13870 } 13871 13872 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 13873 struct intel_crtc *crtc, 13874 u8 plane_ids_mask) 13875 { 13876 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13877 struct intel_plane *plane; 13878 13879 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 13880 struct intel_plane_state *plane_state; 13881 13882 if ((plane_ids_mask & BIT(plane->id)) == 0) 13883 continue; 13884 13885 plane_state = intel_atomic_get_plane_state(state, plane); 13886 if (IS_ERR(plane_state)) 13887 return PTR_ERR(plane_state); 13888 } 13889 13890 return 0; 13891 } 13892 13893 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 13894 { 13895 /* See {hsw,vlv,ivb}_plane_ratio() */ 13896 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 13897 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 13898 IS_IVYBRIDGE(dev_priv); 13899 } 13900 13901 static int intel_atomic_check_planes(struct intel_atomic_state *state, 13902 bool *need_modeset) 13903 { 13904 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13905 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13906 struct intel_plane_state *plane_state; 13907 struct intel_plane *plane; 13908 struct intel_crtc *crtc; 13909 int i, ret; 13910 13911 ret = icl_add_linked_planes(state); 13912 if (ret) 13913 return ret; 13914 13915 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 13916 ret = intel_plane_atomic_check(state, plane); 13917 if (ret) { 13918 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", 13919 plane->base.base.id, plane->base.name); 13920 return ret; 13921 } 13922 } 13923 13924 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13925 new_crtc_state, i) { 13926 u8 old_active_planes, new_active_planes; 13927 13928 ret = icl_check_nv12_planes(new_crtc_state); 13929 if (ret) 13930 return ret; 13931 13932 /* 13933 * On some platforms the number of active planes affects 13934 * the planes' minimum cdclk calculation. Add such planes 13935 * to the state before we compute the minimum cdclk. 13936 */ 13937 if (!active_planes_affects_min_cdclk(dev_priv)) 13938 continue; 13939 13940 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 13941 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 13942 13943 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 13944 continue; 13945 13946 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 13947 if (ret) 13948 return ret; 13949 } 13950 13951 /* 13952 * active_planes bitmask has been updated, and potentially 13953 * affected planes are part of the state. We can now 13954 * compute the minimum cdclk for each plane. 13955 */ 13956 for_each_new_intel_plane_in_state(state, plane, plane_state, i) 13957 *need_modeset |= intel_plane_calc_min_cdclk(state, plane); 13958 13959 return 0; 13960 } 13961 13962 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 13963 { 13964 struct intel_crtc_state *crtc_state; 13965 struct intel_crtc *crtc; 13966 int i; 13967 13968 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 13969 int ret = intel_crtc_atomic_check(state, crtc); 13970 if (ret) { 13971 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", 13972 crtc->base.base.id, crtc->base.name); 13973 return ret; 13974 } 13975 } 13976 13977 return 0; 13978 } 13979 13980 /** 13981 * intel_atomic_check - validate state object 13982 * @dev: drm device 13983 * @_state: state to validate 13984 */ 13985 static int intel_atomic_check(struct drm_device *dev, 13986 struct drm_atomic_state *_state) 13987 { 13988 struct drm_i915_private *dev_priv = to_i915(dev); 13989 struct intel_atomic_state *state = to_intel_atomic_state(_state); 13990 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13991 struct intel_crtc *crtc; 13992 int ret, i; 13993 bool any_ms = false; 13994 13995 /* Catch I915_MODE_FLAG_INHERITED */ 13996 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13997 new_crtc_state, i) { 13998 if (new_crtc_state->base.mode.private_flags != 13999 old_crtc_state->base.mode.private_flags) 14000 new_crtc_state->base.mode_changed = true; 14001 } 14002 14003 ret = drm_atomic_helper_check_modeset(dev, &state->base); 14004 if (ret) 14005 goto fail; 14006 14007 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14008 new_crtc_state, i) { 14009 if (!needs_modeset(new_crtc_state)) 14010 continue; 14011 14012 if (!new_crtc_state->base.enable) { 14013 any_ms = true; 14014 continue; 14015 } 14016 14017 ret = intel_modeset_pipe_config(new_crtc_state); 14018 if (ret) 14019 goto fail; 14020 14021 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 14022 14023 if (needs_modeset(new_crtc_state)) 14024 any_ms = true; 14025 } 14026 14027 if (any_ms && !check_digital_port_conflicts(state)) { 14028 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 14029 ret = EINVAL; 14030 goto fail; 14031 } 14032 14033 ret = drm_dp_mst_atomic_check(&state->base); 14034 if (ret) 14035 goto fail; 14036 14037 any_ms |= state->cdclk.force_min_cdclk_changed; 14038 14039 ret = intel_atomic_check_planes(state, &any_ms); 14040 if (ret) 14041 goto fail; 14042 14043 if (any_ms) { 14044 ret = intel_modeset_checks(state); 14045 if (ret) 14046 goto fail; 14047 } else { 14048 state->cdclk.logical = dev_priv->cdclk.logical; 14049 } 14050 14051 ret = intel_atomic_check_crtcs(state); 14052 if (ret) 14053 goto fail; 14054 14055 intel_fbc_choose_crtc(dev_priv, state); 14056 ret = calc_watermark_data(state); 14057 if (ret) 14058 goto fail; 14059 14060 ret = intel_bw_atomic_check(state); 14061 if (ret) 14062 goto fail; 14063 14064 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14065 new_crtc_state, i) { 14066 if (!needs_modeset(new_crtc_state) && 14067 !new_crtc_state->update_pipe) 14068 continue; 14069 14070 intel_dump_pipe_config(new_crtc_state, state, 14071 needs_modeset(new_crtc_state) ? 14072 "[modeset]" : "[fastset]"); 14073 } 14074 14075 return 0; 14076 14077 fail: 14078 if (ret == -EDEADLK) 14079 return ret; 14080 14081 /* 14082 * FIXME would probably be nice to know which crtc specifically 14083 * caused the failure, in cases where we can pinpoint it. 14084 */ 14085 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14086 new_crtc_state, i) 14087 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 14088 14089 return ret; 14090 } 14091 14092 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 14093 { 14094 return drm_atomic_helper_prepare_planes(state->base.dev, 14095 &state->base); 14096 } 14097 14098 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 14099 { 14100 struct drm_device *dev = crtc->base.dev; 14101 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; 14102 14103 if (!vblank->max_vblank_count) 14104 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 14105 14106 return crtc->base.funcs->get_vblank_counter(&crtc->base); 14107 } 14108 14109 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 14110 struct intel_crtc_state *crtc_state) 14111 { 14112 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14113 14114 if (!IS_GEN(dev_priv, 2)) 14115 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 14116 14117 if (crtc_state->has_pch_encoder) { 14118 enum pipe pch_transcoder = 14119 intel_crtc_pch_transcoder(crtc); 14120 14121 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 14122 } 14123 } 14124 14125 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 14126 const struct intel_crtc_state *new_crtc_state) 14127 { 14128 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 14129 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14130 14131 /* 14132 * Update pipe size and adjust fitter if needed: the reason for this is 14133 * that in compute_mode_changes we check the native mode (not the pfit 14134 * mode) to see if we can flip rather than do a full mode set. In the 14135 * fastboot case, we'll flip, but if we don't update the pipesrc and 14136 * pfit state, we'll end up with a big fb scanned out into the wrong 14137 * sized surface. 14138 */ 14139 intel_set_pipe_src_size(new_crtc_state); 14140 14141 /* on skylake this is done by detaching scalers */ 14142 if (INTEL_GEN(dev_priv) >= 9) { 14143 skl_detach_scalers(new_crtc_state); 14144 14145 if (new_crtc_state->pch_pfit.enabled) 14146 skylake_pfit_enable(new_crtc_state); 14147 } else if (HAS_PCH_SPLIT(dev_priv)) { 14148 if (new_crtc_state->pch_pfit.enabled) 14149 ironlake_pfit_enable(new_crtc_state); 14150 else if (old_crtc_state->pch_pfit.enabled) 14151 ironlake_pfit_disable(old_crtc_state); 14152 } 14153 14154 if (INTEL_GEN(dev_priv) >= 11) 14155 icl_set_pipe_chicken(crtc); 14156 } 14157 14158 static void commit_pipe_config(struct intel_atomic_state *state, 14159 struct intel_crtc_state *old_crtc_state, 14160 struct intel_crtc_state *new_crtc_state) 14161 { 14162 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14163 bool modeset = needs_modeset(new_crtc_state); 14164 14165 /* 14166 * During modesets pipe configuration was programmed as the 14167 * CRTC was enabled. 14168 */ 14169 if (!modeset) { 14170 if (new_crtc_state->base.color_mgmt_changed || 14171 new_crtc_state->update_pipe) 14172 intel_color_commit(new_crtc_state); 14173 14174 if (INTEL_GEN(dev_priv) >= 9) 14175 skl_detach_scalers(new_crtc_state); 14176 14177 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 14178 bdw_set_pipemisc(new_crtc_state); 14179 14180 if (new_crtc_state->update_pipe) 14181 intel_pipe_fastset(old_crtc_state, new_crtc_state); 14182 } 14183 14184 if (dev_priv->display.atomic_update_watermarks) 14185 dev_priv->display.atomic_update_watermarks(state, 14186 new_crtc_state); 14187 } 14188 14189 static void intel_update_crtc(struct intel_crtc *crtc, 14190 struct intel_atomic_state *state, 14191 struct intel_crtc_state *old_crtc_state, 14192 struct intel_crtc_state *new_crtc_state) 14193 { 14194 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14195 bool modeset = needs_modeset(new_crtc_state); 14196 struct intel_plane_state *new_plane_state = 14197 intel_atomic_get_new_plane_state(state, 14198 to_intel_plane(crtc->base.primary)); 14199 14200 if (modeset) { 14201 intel_crtc_update_active_timings(new_crtc_state); 14202 14203 dev_priv->display.crtc_enable(new_crtc_state, state); 14204 14205 /* vblanks work again, re-enable pipe CRC. */ 14206 intel_crtc_enable_pipe_crc(crtc); 14207 } else { 14208 if (new_crtc_state->preload_luts && 14209 (new_crtc_state->base.color_mgmt_changed || 14210 new_crtc_state->update_pipe)) 14211 intel_color_load_luts(new_crtc_state); 14212 14213 intel_pre_plane_update(old_crtc_state, new_crtc_state); 14214 14215 if (new_crtc_state->update_pipe) 14216 intel_encoders_update_pipe(crtc, new_crtc_state, state); 14217 } 14218 14219 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 14220 intel_fbc_disable(crtc); 14221 else if (new_plane_state) 14222 intel_fbc_enable(crtc, new_crtc_state, new_plane_state); 14223 14224 /* Perform vblank evasion around commit operation */ 14225 intel_pipe_update_start(new_crtc_state); 14226 14227 commit_pipe_config(state, old_crtc_state, new_crtc_state); 14228 14229 if (INTEL_GEN(dev_priv) >= 9) 14230 skl_update_planes_on_crtc(state, crtc); 14231 else 14232 i9xx_update_planes_on_crtc(state, crtc); 14233 14234 intel_pipe_update_end(new_crtc_state); 14235 14236 /* 14237 * We usually enable FIFO underrun interrupts as part of the 14238 * CRTC enable sequence during modesets. But when we inherit a 14239 * valid pipe configuration from the BIOS we need to take care 14240 * of enabling them on the CRTC's first fastset. 14241 */ 14242 if (new_crtc_state->update_pipe && !modeset && 14243 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) 14244 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 14245 } 14246 14247 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state) 14248 { 14249 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->base.crtc->dev); 14250 enum transcoder slave_transcoder; 14251 14252 WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask)); 14253 14254 slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1; 14255 return intel_get_crtc_for_pipe(dev_priv, 14256 (enum pipe)slave_transcoder); 14257 } 14258 14259 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 14260 struct intel_crtc_state *old_crtc_state, 14261 struct intel_crtc_state *new_crtc_state, 14262 struct intel_crtc *crtc) 14263 { 14264 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14265 14266 intel_crtc_disable_planes(state, crtc); 14267 14268 /* 14269 * We need to disable pipe CRC before disabling the pipe, 14270 * or we race against vblank off. 14271 */ 14272 intel_crtc_disable_pipe_crc(crtc); 14273 14274 dev_priv->display.crtc_disable(old_crtc_state, state); 14275 crtc->active = false; 14276 intel_fbc_disable(crtc); 14277 intel_disable_shared_dpll(old_crtc_state); 14278 14279 /* 14280 * Underruns don't always raise interrupts, 14281 * so check manually. 14282 */ 14283 intel_check_cpu_fifo_underruns(dev_priv); 14284 intel_check_pch_fifo_underruns(dev_priv); 14285 14286 /* FIXME unify this for all platforms */ 14287 if (!new_crtc_state->base.active && 14288 !HAS_GMCH(dev_priv) && 14289 dev_priv->display.initial_watermarks) 14290 dev_priv->display.initial_watermarks(state, 14291 new_crtc_state); 14292 } 14293 14294 static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state, 14295 struct intel_crtc *crtc, 14296 struct intel_crtc_state *old_crtc_state, 14297 struct intel_crtc_state *new_crtc_state) 14298 { 14299 struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state); 14300 struct intel_crtc_state *new_slave_crtc_state = 14301 intel_atomic_get_new_crtc_state(state, slave_crtc); 14302 struct intel_crtc_state *old_slave_crtc_state = 14303 intel_atomic_get_old_crtc_state(state, slave_crtc); 14304 14305 WARN_ON(!slave_crtc || !new_slave_crtc_state || 14306 !old_slave_crtc_state); 14307 14308 /* Disable Slave first */ 14309 intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state); 14310 if (old_slave_crtc_state->base.active) 14311 intel_old_crtc_state_disables(state, 14312 old_slave_crtc_state, 14313 new_slave_crtc_state, 14314 slave_crtc); 14315 14316 /* Disable Master */ 14317 intel_pre_plane_update(old_crtc_state, new_crtc_state); 14318 if (old_crtc_state->base.active) 14319 intel_old_crtc_state_disables(state, 14320 old_crtc_state, 14321 new_crtc_state, 14322 crtc); 14323 } 14324 14325 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 14326 { 14327 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 14328 struct intel_crtc *crtc; 14329 int i; 14330 14331 /* 14332 * Disable CRTC/pipes in reverse order because some features(MST in 14333 * TGL+) requires master and slave relationship between pipes, so it 14334 * should always pick the lowest pipe as master as it will be enabled 14335 * first and disable in the reverse order so the master will be the 14336 * last one to be disabled. 14337 */ 14338 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, 14339 new_crtc_state, i) { 14340 if (!needs_modeset(new_crtc_state)) 14341 continue; 14342 14343 /* In case of Transcoder port Sync master slave CRTCs can be 14344 * assigned in any order and we need to make sure that 14345 * slave CRTCs are disabled first and then master CRTC since 14346 * Slave vblanks are masked till Master Vblanks. 14347 */ 14348 if (is_trans_port_sync_mode(new_crtc_state)) { 14349 if (is_trans_port_sync_master(new_crtc_state)) 14350 intel_trans_port_sync_modeset_disables(state, 14351 crtc, 14352 old_crtc_state, 14353 new_crtc_state); 14354 else 14355 continue; 14356 } else { 14357 intel_pre_plane_update(old_crtc_state, new_crtc_state); 14358 14359 if (old_crtc_state->base.active) 14360 intel_old_crtc_state_disables(state, 14361 old_crtc_state, 14362 new_crtc_state, 14363 crtc); 14364 } 14365 } 14366 } 14367 14368 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 14369 { 14370 struct intel_crtc *crtc; 14371 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14372 int i; 14373 14374 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 14375 if (!new_crtc_state->base.active) 14376 continue; 14377 14378 intel_update_crtc(crtc, state, old_crtc_state, 14379 new_crtc_state); 14380 } 14381 } 14382 14383 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc, 14384 struct intel_atomic_state *state, 14385 struct intel_crtc_state *new_crtc_state) 14386 { 14387 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14388 14389 intel_crtc_update_active_timings(new_crtc_state); 14390 dev_priv->display.crtc_enable(new_crtc_state, state); 14391 intel_crtc_enable_pipe_crc(crtc); 14392 } 14393 14394 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc, 14395 struct intel_atomic_state *state) 14396 { 14397 struct drm_connector *uninitialized_var(conn); 14398 struct drm_connector_state *conn_state; 14399 struct intel_dp *intel_dp; 14400 int i; 14401 14402 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 14403 if (conn_state->crtc == &crtc->base) 14404 break; 14405 } 14406 intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base); 14407 intel_dp_stop_link_train(intel_dp); 14408 } 14409 14410 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc, 14411 struct intel_atomic_state *state) 14412 { 14413 struct intel_crtc_state *new_crtc_state = 14414 intel_atomic_get_new_crtc_state(state, crtc); 14415 struct intel_crtc_state *old_crtc_state = 14416 intel_atomic_get_old_crtc_state(state, crtc); 14417 struct intel_plane_state *new_plane_state = 14418 intel_atomic_get_new_plane_state(state, 14419 to_intel_plane(crtc->base.primary)); 14420 bool modeset = needs_modeset(new_crtc_state); 14421 14422 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 14423 intel_fbc_disable(crtc); 14424 else if (new_plane_state) 14425 intel_fbc_enable(crtc, new_crtc_state, new_plane_state); 14426 14427 /* Perform vblank evasion around commit operation */ 14428 intel_pipe_update_start(new_crtc_state); 14429 commit_pipe_config(state, old_crtc_state, new_crtc_state); 14430 skl_update_planes_on_crtc(state, crtc); 14431 intel_pipe_update_end(new_crtc_state); 14432 14433 /* 14434 * We usually enable FIFO underrun interrupts as part of the 14435 * CRTC enable sequence during modesets. But when we inherit a 14436 * valid pipe configuration from the BIOS we need to take care 14437 * of enabling them on the CRTC's first fastset. 14438 */ 14439 if (new_crtc_state->update_pipe && !modeset && 14440 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) 14441 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 14442 } 14443 14444 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc, 14445 struct intel_atomic_state *state, 14446 struct intel_crtc_state *old_crtc_state, 14447 struct intel_crtc_state *new_crtc_state) 14448 { 14449 struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state); 14450 struct intel_crtc_state *new_slave_crtc_state = 14451 intel_atomic_get_new_crtc_state(state, slave_crtc); 14452 struct intel_crtc_state *old_slave_crtc_state = 14453 intel_atomic_get_old_crtc_state(state, slave_crtc); 14454 14455 WARN_ON(!slave_crtc || !new_slave_crtc_state || 14456 !old_slave_crtc_state); 14457 14458 DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n", 14459 crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id, 14460 slave_crtc->base.name); 14461 14462 /* Enable seq for slave with with DP_TP_CTL left Idle until the 14463 * master is ready 14464 */ 14465 intel_crtc_enable_trans_port_sync(slave_crtc, 14466 state, 14467 new_slave_crtc_state); 14468 14469 /* Enable seq for master with with DP_TP_CTL left Idle */ 14470 intel_crtc_enable_trans_port_sync(crtc, 14471 state, 14472 new_crtc_state); 14473 14474 /* Set Slave's DP_TP_CTL to Normal */ 14475 intel_set_dp_tp_ctl_normal(slave_crtc, 14476 state); 14477 14478 /* Set Master's DP_TP_CTL To Normal */ 14479 usleep_range(200, 400); 14480 intel_set_dp_tp_ctl_normal(crtc, 14481 state); 14482 14483 /* Now do the post crtc enable for all master and slaves */ 14484 intel_post_crtc_enable_updates(slave_crtc, 14485 state); 14486 intel_post_crtc_enable_updates(crtc, 14487 state); 14488 } 14489 14490 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 14491 { 14492 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14493 struct intel_crtc *crtc; 14494 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14495 unsigned int updated = 0; 14496 bool progress; 14497 int i; 14498 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 14499 u8 required_slices = state->wm_results.ddb.enabled_slices; 14500 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 14501 14502 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) 14503 /* ignore allocations for crtc's that have been turned off. */ 14504 if (new_crtc_state->base.active) 14505 entries[i] = old_crtc_state->wm.skl.ddb; 14506 14507 /* If 2nd DBuf slice required, enable it here */ 14508 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) 14509 icl_dbuf_slices_update(dev_priv, required_slices); 14510 14511 /* 14512 * Whenever the number of active pipes changes, we need to make sure we 14513 * update the pipes in the right order so that their ddb allocations 14514 * never overlap with eachother inbetween CRTC updates. Otherwise we'll 14515 * cause pipe underruns and other bad stuff. 14516 */ 14517 do { 14518 progress = false; 14519 14520 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 14521 enum pipe pipe = crtc->pipe; 14522 bool vbl_wait = false; 14523 bool modeset = needs_modeset(new_crtc_state); 14524 14525 if (updated & BIT(crtc->pipe) || !new_crtc_state->base.active) 14526 continue; 14527 14528 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 14529 entries, 14530 INTEL_NUM_PIPES(dev_priv), i)) 14531 continue; 14532 14533 updated |= BIT(pipe); 14534 entries[i] = new_crtc_state->wm.skl.ddb; 14535 14536 /* 14537 * If this is an already active pipe, it's DDB changed, 14538 * and this isn't the last pipe that needs updating 14539 * then we need to wait for a vblank to pass for the 14540 * new ddb allocation to take effect. 14541 */ 14542 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 14543 &old_crtc_state->wm.skl.ddb) && 14544 !modeset && 14545 state->wm_results.dirty_pipes != updated) 14546 vbl_wait = true; 14547 14548 if (modeset && is_trans_port_sync_mode(new_crtc_state)) { 14549 if (is_trans_port_sync_master(new_crtc_state)) 14550 intel_update_trans_port_sync_crtcs(crtc, 14551 state, 14552 old_crtc_state, 14553 new_crtc_state); 14554 else 14555 continue; 14556 } else { 14557 intel_update_crtc(crtc, state, old_crtc_state, 14558 new_crtc_state); 14559 } 14560 14561 if (vbl_wait) 14562 intel_wait_for_vblank(dev_priv, pipe); 14563 14564 progress = true; 14565 } 14566 } while (progress); 14567 14568 /* If 2nd DBuf slice is no more required disable it */ 14569 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) 14570 icl_dbuf_slices_update(dev_priv, required_slices); 14571 } 14572 14573 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 14574 { 14575 struct intel_atomic_state *state, *next; 14576 struct llist_node *freed; 14577 14578 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 14579 llist_for_each_entry_safe(state, next, freed, freed) 14580 drm_atomic_state_put(&state->base); 14581 } 14582 14583 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 14584 { 14585 struct drm_i915_private *dev_priv = 14586 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 14587 14588 intel_atomic_helper_free_state(dev_priv); 14589 } 14590 14591 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 14592 { 14593 struct wait_queue_entry wait_fence, wait_reset; 14594 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 14595 14596 init_wait_entry(&wait_fence, 0); 14597 init_wait_entry(&wait_reset, 0); 14598 for (;;) { 14599 prepare_to_wait(&intel_state->commit_ready.wait, 14600 &wait_fence, TASK_UNINTERRUPTIBLE); 14601 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 14602 I915_RESET_MODESET), 14603 &wait_reset, TASK_UNINTERRUPTIBLE); 14604 14605 14606 if (i915_sw_fence_done(&intel_state->commit_ready) || 14607 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 14608 break; 14609 14610 schedule(); 14611 } 14612 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 14613 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 14614 I915_RESET_MODESET), 14615 &wait_reset); 14616 } 14617 14618 static void intel_atomic_cleanup_work(struct work_struct *work) 14619 { 14620 struct drm_atomic_state *state = 14621 container_of(work, struct drm_atomic_state, commit_work); 14622 struct drm_i915_private *i915 = to_i915(state->dev); 14623 14624 drm_atomic_helper_cleanup_planes(&i915->drm, state); 14625 drm_atomic_helper_commit_cleanup_done(state); 14626 drm_atomic_state_put(state); 14627 14628 intel_atomic_helper_free_state(i915); 14629 } 14630 14631 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 14632 { 14633 struct drm_device *dev = state->base.dev; 14634 struct drm_i915_private *dev_priv = to_i915(dev); 14635 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 14636 struct intel_crtc *crtc; 14637 u64 put_domains[I915_MAX_PIPES] = {}; 14638 intel_wakeref_t wakeref = 0; 14639 int i; 14640 14641 intel_atomic_commit_fence_wait(state); 14642 14643 drm_atomic_helper_wait_for_dependencies(&state->base); 14644 14645 if (state->modeset) 14646 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 14647 14648 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14649 new_crtc_state, i) { 14650 if (needs_modeset(new_crtc_state) || 14651 new_crtc_state->update_pipe) { 14652 14653 put_domains[crtc->pipe] = 14654 modeset_get_crtc_power_domains(new_crtc_state); 14655 } 14656 } 14657 14658 intel_commit_modeset_disables(state); 14659 14660 /* FIXME: Eventually get rid of our crtc->config pointer */ 14661 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 14662 crtc->config = new_crtc_state; 14663 14664 if (state->modeset) { 14665 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 14666 14667 intel_set_cdclk_pre_plane_update(dev_priv, 14668 &state->cdclk.actual, 14669 &dev_priv->cdclk.actual, 14670 state->cdclk.pipe); 14671 14672 /* 14673 * SKL workaround: bspec recommends we disable the SAGV when we 14674 * have more then one pipe enabled 14675 */ 14676 if (!intel_can_enable_sagv(state)) 14677 intel_disable_sagv(dev_priv); 14678 14679 intel_modeset_verify_disabled(dev_priv, state); 14680 } 14681 14682 /* Complete the events for pipes that have now been disabled */ 14683 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14684 bool modeset = needs_modeset(new_crtc_state); 14685 14686 /* Complete events for now disable pipes here. */ 14687 if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) { 14688 spin_lock_irq(&dev->event_lock); 14689 drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event); 14690 spin_unlock_irq(&dev->event_lock); 14691 14692 new_crtc_state->base.event = NULL; 14693 } 14694 } 14695 14696 if (state->modeset) 14697 intel_encoders_update_prepare(state); 14698 14699 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 14700 dev_priv->display.commit_modeset_enables(state); 14701 14702 if (state->modeset) { 14703 intel_encoders_update_complete(state); 14704 14705 intel_set_cdclk_post_plane_update(dev_priv, 14706 &state->cdclk.actual, 14707 &dev_priv->cdclk.actual, 14708 state->cdclk.pipe); 14709 } 14710 14711 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 14712 * already, but still need the state for the delayed optimization. To 14713 * fix this: 14714 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 14715 * - schedule that vblank worker _before_ calling hw_done 14716 * - at the start of commit_tail, cancel it _synchrously 14717 * - switch over to the vblank wait helper in the core after that since 14718 * we don't need out special handling any more. 14719 */ 14720 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 14721 14722 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14723 if (new_crtc_state->base.active && 14724 !needs_modeset(new_crtc_state) && 14725 !new_crtc_state->preload_luts && 14726 (new_crtc_state->base.color_mgmt_changed || 14727 new_crtc_state->update_pipe)) 14728 intel_color_load_luts(new_crtc_state); 14729 } 14730 14731 /* 14732 * Now that the vblank has passed, we can go ahead and program the 14733 * optimal watermarks on platforms that need two-step watermark 14734 * programming. 14735 * 14736 * TODO: Move this (and other cleanup) to an async worker eventually. 14737 */ 14738 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14739 if (dev_priv->display.optimize_watermarks) 14740 dev_priv->display.optimize_watermarks(state, 14741 new_crtc_state); 14742 } 14743 14744 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 14745 intel_post_plane_update(old_crtc_state); 14746 14747 if (put_domains[i]) 14748 modeset_put_power_domains(dev_priv, put_domains[i]); 14749 14750 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 14751 } 14752 14753 if (state->modeset) 14754 intel_verify_planes(state); 14755 14756 if (state->modeset && intel_can_enable_sagv(state)) 14757 intel_enable_sagv(dev_priv); 14758 14759 drm_atomic_helper_commit_hw_done(&state->base); 14760 14761 if (state->modeset) { 14762 /* As one of the primary mmio accessors, KMS has a high 14763 * likelihood of triggering bugs in unclaimed access. After we 14764 * finish modesetting, see if an error has been flagged, and if 14765 * so enable debugging for the next modeset - and hope we catch 14766 * the culprit. 14767 */ 14768 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 14769 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 14770 } 14771 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14772 14773 /* 14774 * Defer the cleanup of the old state to a separate worker to not 14775 * impede the current task (userspace for blocking modesets) that 14776 * are executed inline. For out-of-line asynchronous modesets/flips, 14777 * deferring to a new worker seems overkill, but we would place a 14778 * schedule point (cond_resched()) here anyway to keep latencies 14779 * down. 14780 */ 14781 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 14782 queue_work(system_highpri_wq, &state->base.commit_work); 14783 } 14784 14785 static void intel_atomic_commit_work(struct work_struct *work) 14786 { 14787 struct intel_atomic_state *state = 14788 container_of(work, struct intel_atomic_state, base.commit_work); 14789 14790 intel_atomic_commit_tail(state); 14791 } 14792 14793 static int __i915_sw_fence_call 14794 intel_atomic_commit_ready(struct i915_sw_fence *fence, 14795 enum i915_sw_fence_notify notify) 14796 { 14797 struct intel_atomic_state *state = 14798 container_of(fence, struct intel_atomic_state, commit_ready); 14799 14800 switch (notify) { 14801 case FENCE_COMPLETE: 14802 /* we do blocking waits in the worker, nothing to do here */ 14803 break; 14804 case FENCE_FREE: 14805 { 14806 struct intel_atomic_helper *helper = 14807 &to_i915(state->base.dev)->atomic_helper; 14808 14809 if (llist_add(&state->freed, &helper->free_list)) 14810 schedule_work(&helper->free_work); 14811 break; 14812 } 14813 } 14814 14815 return NOTIFY_DONE; 14816 } 14817 14818 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 14819 { 14820 struct intel_plane_state *old_plane_state, *new_plane_state; 14821 struct intel_plane *plane; 14822 int i; 14823 14824 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 14825 new_plane_state, i) 14826 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb), 14827 to_intel_frontbuffer(new_plane_state->base.fb), 14828 plane->frontbuffer_bit); 14829 } 14830 14831 static void assert_global_state_locked(struct drm_i915_private *dev_priv) 14832 { 14833 struct intel_crtc *crtc; 14834 14835 for_each_intel_crtc(&dev_priv->drm, crtc) 14836 drm_modeset_lock_assert_held(&crtc->base.mutex); 14837 } 14838 14839 static int intel_atomic_commit(struct drm_device *dev, 14840 struct drm_atomic_state *_state, 14841 bool nonblock) 14842 { 14843 struct intel_atomic_state *state = to_intel_atomic_state(_state); 14844 struct drm_i915_private *dev_priv = to_i915(dev); 14845 int ret = 0; 14846 14847 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 14848 14849 drm_atomic_state_get(&state->base); 14850 i915_sw_fence_init(&state->commit_ready, 14851 intel_atomic_commit_ready); 14852 14853 /* 14854 * The intel_legacy_cursor_update() fast path takes care 14855 * of avoiding the vblank waits for simple cursor 14856 * movement and flips. For cursor on/off and size changes, 14857 * we want to perform the vblank waits so that watermark 14858 * updates happen during the correct frames. Gen9+ have 14859 * double buffered watermarks and so shouldn't need this. 14860 * 14861 * Unset state->legacy_cursor_update before the call to 14862 * drm_atomic_helper_setup_commit() because otherwise 14863 * drm_atomic_helper_wait_for_flip_done() is a noop and 14864 * we get FIFO underruns because we didn't wait 14865 * for vblank. 14866 * 14867 * FIXME doing watermarks and fb cleanup from a vblank worker 14868 * (assuming we had any) would solve these problems. 14869 */ 14870 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { 14871 struct intel_crtc_state *new_crtc_state; 14872 struct intel_crtc *crtc; 14873 int i; 14874 14875 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 14876 if (new_crtc_state->wm.need_postvbl_update || 14877 new_crtc_state->update_wm_post) 14878 state->base.legacy_cursor_update = false; 14879 } 14880 14881 ret = intel_atomic_prepare_commit(state); 14882 if (ret) { 14883 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 14884 i915_sw_fence_commit(&state->commit_ready); 14885 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14886 return ret; 14887 } 14888 14889 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 14890 if (!ret) 14891 ret = drm_atomic_helper_swap_state(&state->base, true); 14892 14893 if (ret) { 14894 i915_sw_fence_commit(&state->commit_ready); 14895 14896 drm_atomic_helper_cleanup_planes(dev, &state->base); 14897 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14898 return ret; 14899 } 14900 dev_priv->wm.distrust_bios_wm = false; 14901 intel_shared_dpll_swap_state(state); 14902 intel_atomic_track_fbs(state); 14903 14904 if (state->global_state_changed) { 14905 assert_global_state_locked(dev_priv); 14906 14907 memcpy(dev_priv->min_cdclk, state->min_cdclk, 14908 sizeof(state->min_cdclk)); 14909 memcpy(dev_priv->min_voltage_level, state->min_voltage_level, 14910 sizeof(state->min_voltage_level)); 14911 dev_priv->active_pipes = state->active_pipes; 14912 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; 14913 14914 intel_cdclk_swap_state(state); 14915 } 14916 14917 drm_atomic_state_get(&state->base); 14918 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 14919 14920 i915_sw_fence_commit(&state->commit_ready); 14921 if (nonblock && state->modeset) { 14922 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 14923 } else if (nonblock) { 14924 queue_work(dev_priv->flip_wq, &state->base.commit_work); 14925 } else { 14926 if (state->modeset) 14927 flush_workqueue(dev_priv->modeset_wq); 14928 intel_atomic_commit_tail(state); 14929 } 14930 14931 return 0; 14932 } 14933 14934 struct wait_rps_boost { 14935 struct wait_queue_entry wait; 14936 14937 struct drm_crtc *crtc; 14938 struct i915_request *request; 14939 }; 14940 14941 static int do_rps_boost(struct wait_queue_entry *_wait, 14942 unsigned mode, int sync, void *key) 14943 { 14944 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 14945 struct i915_request *rq = wait->request; 14946 14947 /* 14948 * If we missed the vblank, but the request is already running it 14949 * is reasonable to assume that it will complete before the next 14950 * vblank without our intervention, so leave RPS alone. 14951 */ 14952 if (!i915_request_started(rq)) 14953 intel_rps_boost(rq); 14954 i915_request_put(rq); 14955 14956 drm_crtc_vblank_put(wait->crtc); 14957 14958 list_del(&wait->wait.entry); 14959 kfree(wait); 14960 return 1; 14961 } 14962 14963 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 14964 struct dma_fence *fence) 14965 { 14966 struct wait_rps_boost *wait; 14967 14968 if (!dma_fence_is_i915(fence)) 14969 return; 14970 14971 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 14972 return; 14973 14974 if (drm_crtc_vblank_get(crtc)) 14975 return; 14976 14977 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 14978 if (!wait) { 14979 drm_crtc_vblank_put(crtc); 14980 return; 14981 } 14982 14983 wait->request = to_request(dma_fence_get(fence)); 14984 wait->crtc = crtc; 14985 14986 wait->wait.func = do_rps_boost; 14987 wait->wait.flags = 0; 14988 14989 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 14990 } 14991 14992 static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 14993 { 14994 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 14995 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 14996 struct drm_framebuffer *fb = plane_state->base.fb; 14997 struct i915_vma *vma; 14998 14999 if (plane->id == PLANE_CURSOR && 15000 INTEL_INFO(dev_priv)->display.cursor_needs_physical) { 15001 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15002 const int align = intel_cursor_alignment(dev_priv); 15003 int err; 15004 15005 err = i915_gem_object_attach_phys(obj, align); 15006 if (err) 15007 return err; 15008 } 15009 15010 vma = intel_pin_and_fence_fb_obj(fb, 15011 &plane_state->view, 15012 intel_plane_uses_fence(plane_state), 15013 &plane_state->flags); 15014 if (IS_ERR(vma)) 15015 return PTR_ERR(vma); 15016 15017 plane_state->vma = vma; 15018 15019 return 0; 15020 } 15021 15022 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 15023 { 15024 struct i915_vma *vma; 15025 15026 vma = fetch_and_zero(&old_plane_state->vma); 15027 if (vma) 15028 intel_unpin_fb_vma(vma, old_plane_state->flags); 15029 } 15030 15031 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) 15032 { 15033 struct i915_sched_attr attr = { 15034 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), 15035 }; 15036 15037 i915_gem_object_wait_priority(obj, 0, &attr); 15038 } 15039 15040 /** 15041 * intel_prepare_plane_fb - Prepare fb for usage on plane 15042 * @plane: drm plane to prepare for 15043 * @_new_plane_state: the plane state being prepared 15044 * 15045 * Prepares a framebuffer for usage on a display plane. Generally this 15046 * involves pinning the underlying object and updating the frontbuffer tracking 15047 * bits. Some older platforms need special physical address handling for 15048 * cursor planes. 15049 * 15050 * Returns 0 on success, negative error code on failure. 15051 */ 15052 int 15053 intel_prepare_plane_fb(struct drm_plane *plane, 15054 struct drm_plane_state *_new_plane_state) 15055 { 15056 struct intel_plane_state *new_plane_state = 15057 to_intel_plane_state(_new_plane_state); 15058 struct intel_atomic_state *intel_state = 15059 to_intel_atomic_state(new_plane_state->base.state); 15060 struct drm_i915_private *dev_priv = to_i915(plane->dev); 15061 struct drm_framebuffer *fb = new_plane_state->base.fb; 15062 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15063 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 15064 int ret; 15065 15066 if (old_obj) { 15067 struct intel_crtc_state *crtc_state = 15068 intel_atomic_get_new_crtc_state(intel_state, 15069 to_intel_crtc(plane->state->crtc)); 15070 15071 /* Big Hammer, we also need to ensure that any pending 15072 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 15073 * current scanout is retired before unpinning the old 15074 * framebuffer. Note that we rely on userspace rendering 15075 * into the buffer attached to the pipe they are waiting 15076 * on. If not, userspace generates a GPU hang with IPEHR 15077 * point to the MI_WAIT_FOR_EVENT. 15078 * 15079 * This should only fail upon a hung GPU, in which case we 15080 * can safely continue. 15081 */ 15082 if (needs_modeset(crtc_state)) { 15083 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 15084 old_obj->base.resv, NULL, 15085 false, 0, 15086 GFP_KERNEL); 15087 if (ret < 0) 15088 return ret; 15089 } 15090 } 15091 15092 if (new_plane_state->base.fence) { /* explicit fencing */ 15093 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, 15094 new_plane_state->base.fence, 15095 I915_FENCE_TIMEOUT, 15096 GFP_KERNEL); 15097 if (ret < 0) 15098 return ret; 15099 } 15100 15101 if (!obj) 15102 return 0; 15103 15104 ret = i915_gem_object_pin_pages(obj); 15105 if (ret) 15106 return ret; 15107 15108 ret = intel_plane_pin_fb(new_plane_state); 15109 15110 i915_gem_object_unpin_pages(obj); 15111 if (ret) 15112 return ret; 15113 15114 fb_obj_bump_render_priority(obj); 15115 intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB); 15116 15117 if (!new_plane_state->base.fence) { /* implicit fencing */ 15118 struct dma_fence *fence; 15119 15120 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 15121 obj->base.resv, NULL, 15122 false, I915_FENCE_TIMEOUT, 15123 GFP_KERNEL); 15124 if (ret < 0) 15125 return ret; 15126 15127 fence = dma_resv_get_excl_rcu(obj->base.resv); 15128 if (fence) { 15129 add_rps_boost_after_vblank(new_plane_state->base.crtc, 15130 fence); 15131 dma_fence_put(fence); 15132 } 15133 } else { 15134 add_rps_boost_after_vblank(new_plane_state->base.crtc, 15135 new_plane_state->base.fence); 15136 } 15137 15138 /* 15139 * We declare pageflips to be interactive and so merit a small bias 15140 * towards upclocking to deliver the frame on time. By only changing 15141 * the RPS thresholds to sample more regularly and aim for higher 15142 * clocks we can hopefully deliver low power workloads (like kodi) 15143 * that are not quite steady state without resorting to forcing 15144 * maximum clocks following a vblank miss (see do_rps_boost()). 15145 */ 15146 if (!intel_state->rps_interactive) { 15147 intel_rps_mark_interactive(&dev_priv->gt.rps, true); 15148 intel_state->rps_interactive = true; 15149 } 15150 15151 return 0; 15152 } 15153 15154 /** 15155 * intel_cleanup_plane_fb - Cleans up an fb after plane use 15156 * @plane: drm plane to clean up for 15157 * @_old_plane_state: the state from the previous modeset 15158 * 15159 * Cleans up a framebuffer that has just been removed from a plane. 15160 */ 15161 void 15162 intel_cleanup_plane_fb(struct drm_plane *plane, 15163 struct drm_plane_state *_old_plane_state) 15164 { 15165 struct intel_plane_state *old_plane_state = 15166 to_intel_plane_state(_old_plane_state); 15167 struct intel_atomic_state *intel_state = 15168 to_intel_atomic_state(old_plane_state->base.state); 15169 struct drm_i915_private *dev_priv = to_i915(plane->dev); 15170 15171 if (intel_state->rps_interactive) { 15172 intel_rps_mark_interactive(&dev_priv->gt.rps, false); 15173 intel_state->rps_interactive = false; 15174 } 15175 15176 /* Should only be called after a successful intel_prepare_plane_fb()! */ 15177 intel_plane_unpin_fb(old_plane_state); 15178 } 15179 15180 /** 15181 * intel_plane_destroy - destroy a plane 15182 * @plane: plane to destroy 15183 * 15184 * Common destruction function for all types of planes (primary, cursor, 15185 * sprite). 15186 */ 15187 void intel_plane_destroy(struct drm_plane *plane) 15188 { 15189 drm_plane_cleanup(plane); 15190 kfree(to_intel_plane(plane)); 15191 } 15192 15193 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 15194 u32 format, u64 modifier) 15195 { 15196 switch (modifier) { 15197 case DRM_FORMAT_MOD_LINEAR: 15198 case I915_FORMAT_MOD_X_TILED: 15199 break; 15200 default: 15201 return false; 15202 } 15203 15204 switch (format) { 15205 case DRM_FORMAT_C8: 15206 case DRM_FORMAT_RGB565: 15207 case DRM_FORMAT_XRGB1555: 15208 case DRM_FORMAT_XRGB8888: 15209 return modifier == DRM_FORMAT_MOD_LINEAR || 15210 modifier == I915_FORMAT_MOD_X_TILED; 15211 default: 15212 return false; 15213 } 15214 } 15215 15216 static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 15217 u32 format, u64 modifier) 15218 { 15219 switch (modifier) { 15220 case DRM_FORMAT_MOD_LINEAR: 15221 case I915_FORMAT_MOD_X_TILED: 15222 break; 15223 default: 15224 return false; 15225 } 15226 15227 switch (format) { 15228 case DRM_FORMAT_C8: 15229 case DRM_FORMAT_RGB565: 15230 case DRM_FORMAT_XRGB8888: 15231 case DRM_FORMAT_XBGR8888: 15232 case DRM_FORMAT_XRGB2101010: 15233 case DRM_FORMAT_XBGR2101010: 15234 case DRM_FORMAT_XBGR16161616F: 15235 return modifier == DRM_FORMAT_MOD_LINEAR || 15236 modifier == I915_FORMAT_MOD_X_TILED; 15237 default: 15238 return false; 15239 } 15240 } 15241 15242 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 15243 u32 format, u64 modifier) 15244 { 15245 return modifier == DRM_FORMAT_MOD_LINEAR && 15246 format == DRM_FORMAT_ARGB8888; 15247 } 15248 15249 static const struct drm_plane_funcs i965_plane_funcs = { 15250 .update_plane = drm_atomic_helper_update_plane, 15251 .disable_plane = drm_atomic_helper_disable_plane, 15252 .destroy = intel_plane_destroy, 15253 .atomic_duplicate_state = intel_plane_duplicate_state, 15254 .atomic_destroy_state = intel_plane_destroy_state, 15255 .format_mod_supported = i965_plane_format_mod_supported, 15256 }; 15257 15258 static const struct drm_plane_funcs i8xx_plane_funcs = { 15259 .update_plane = drm_atomic_helper_update_plane, 15260 .disable_plane = drm_atomic_helper_disable_plane, 15261 .destroy = intel_plane_destroy, 15262 .atomic_duplicate_state = intel_plane_duplicate_state, 15263 .atomic_destroy_state = intel_plane_destroy_state, 15264 .format_mod_supported = i8xx_plane_format_mod_supported, 15265 }; 15266 15267 static int 15268 intel_legacy_cursor_update(struct drm_plane *_plane, 15269 struct drm_crtc *_crtc, 15270 struct drm_framebuffer *fb, 15271 int crtc_x, int crtc_y, 15272 unsigned int crtc_w, unsigned int crtc_h, 15273 u32 src_x, u32 src_y, 15274 u32 src_w, u32 src_h, 15275 struct drm_modeset_acquire_ctx *ctx) 15276 { 15277 struct intel_plane *plane = to_intel_plane(_plane); 15278 struct intel_crtc *crtc = to_intel_crtc(_crtc); 15279 struct intel_plane_state *old_plane_state = 15280 to_intel_plane_state(plane->base.state); 15281 struct intel_plane_state *new_plane_state; 15282 struct intel_crtc_state *crtc_state = 15283 to_intel_crtc_state(crtc->base.state); 15284 struct intel_crtc_state *new_crtc_state; 15285 int ret; 15286 15287 /* 15288 * When crtc is inactive or there is a modeset pending, 15289 * wait for it to complete in the slowpath 15290 */ 15291 if (!crtc_state->base.active || needs_modeset(crtc_state) || 15292 crtc_state->update_pipe) 15293 goto slow; 15294 15295 /* 15296 * Don't do an async update if there is an outstanding commit modifying 15297 * the plane. This prevents our async update's changes from getting 15298 * overridden by a previous synchronous update's state. 15299 */ 15300 if (old_plane_state->base.commit && 15301 !try_wait_for_completion(&old_plane_state->base.commit->hw_done)) 15302 goto slow; 15303 15304 /* 15305 * If any parameters change that may affect watermarks, 15306 * take the slowpath. Only changing fb or position should be 15307 * in the fastpath. 15308 */ 15309 if (old_plane_state->base.crtc != &crtc->base || 15310 old_plane_state->base.src_w != src_w || 15311 old_plane_state->base.src_h != src_h || 15312 old_plane_state->base.crtc_w != crtc_w || 15313 old_plane_state->base.crtc_h != crtc_h || 15314 !old_plane_state->base.fb != !fb) 15315 goto slow; 15316 15317 new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); 15318 if (!new_plane_state) 15319 return -ENOMEM; 15320 15321 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); 15322 if (!new_crtc_state) { 15323 ret = -ENOMEM; 15324 goto out_free; 15325 } 15326 15327 drm_atomic_set_fb_for_plane(&new_plane_state->base, fb); 15328 15329 new_plane_state->base.src_x = src_x; 15330 new_plane_state->base.src_y = src_y; 15331 new_plane_state->base.src_w = src_w; 15332 new_plane_state->base.src_h = src_h; 15333 new_plane_state->base.crtc_x = crtc_x; 15334 new_plane_state->base.crtc_y = crtc_y; 15335 new_plane_state->base.crtc_w = crtc_w; 15336 new_plane_state->base.crtc_h = crtc_h; 15337 15338 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 15339 old_plane_state, new_plane_state); 15340 if (ret) 15341 goto out_free; 15342 15343 ret = intel_plane_pin_fb(new_plane_state); 15344 if (ret) 15345 goto out_free; 15346 15347 intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->base.fb), ORIGIN_FLIP); 15348 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb), 15349 to_intel_frontbuffer(new_plane_state->base.fb), 15350 plane->frontbuffer_bit); 15351 15352 /* Swap plane state */ 15353 plane->base.state = &new_plane_state->base; 15354 15355 /* 15356 * We cannot swap crtc_state as it may be in use by an atomic commit or 15357 * page flip that's running simultaneously. If we swap crtc_state and 15358 * destroy the old state, we will cause a use-after-free there. 15359 * 15360 * Only update active_planes, which is needed for our internal 15361 * bookkeeping. Either value will do the right thing when updating 15362 * planes atomically. If the cursor was part of the atomic update then 15363 * we would have taken the slowpath. 15364 */ 15365 crtc_state->active_planes = new_crtc_state->active_planes; 15366 15367 if (new_plane_state->base.visible) 15368 intel_update_plane(plane, crtc_state, new_plane_state); 15369 else 15370 intel_disable_plane(plane, crtc_state); 15371 15372 intel_plane_unpin_fb(old_plane_state); 15373 15374 out_free: 15375 if (new_crtc_state) 15376 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->base); 15377 if (ret) 15378 intel_plane_destroy_state(&plane->base, &new_plane_state->base); 15379 else 15380 intel_plane_destroy_state(&plane->base, &old_plane_state->base); 15381 return ret; 15382 15383 slow: 15384 return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, 15385 crtc_x, crtc_y, crtc_w, crtc_h, 15386 src_x, src_y, src_w, src_h, ctx); 15387 } 15388 15389 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 15390 .update_plane = intel_legacy_cursor_update, 15391 .disable_plane = drm_atomic_helper_disable_plane, 15392 .destroy = intel_plane_destroy, 15393 .atomic_duplicate_state = intel_plane_duplicate_state, 15394 .atomic_destroy_state = intel_plane_destroy_state, 15395 .format_mod_supported = intel_cursor_format_mod_supported, 15396 }; 15397 15398 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 15399 enum i9xx_plane_id i9xx_plane) 15400 { 15401 if (!HAS_FBC(dev_priv)) 15402 return false; 15403 15404 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 15405 return i9xx_plane == PLANE_A; /* tied to pipe A */ 15406 else if (IS_IVYBRIDGE(dev_priv)) 15407 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 15408 i9xx_plane == PLANE_C; 15409 else if (INTEL_GEN(dev_priv) >= 4) 15410 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 15411 else 15412 return i9xx_plane == PLANE_A; 15413 } 15414 15415 static struct intel_plane * 15416 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 15417 { 15418 struct intel_plane *plane; 15419 const struct drm_plane_funcs *plane_funcs; 15420 unsigned int supported_rotations; 15421 unsigned int possible_crtcs; 15422 const u64 *modifiers; 15423 const u32 *formats; 15424 int num_formats; 15425 int ret, zpos; 15426 15427 if (INTEL_GEN(dev_priv) >= 9) 15428 return skl_universal_plane_create(dev_priv, pipe, 15429 PLANE_PRIMARY); 15430 15431 plane = intel_plane_alloc(); 15432 if (IS_ERR(plane)) 15433 return plane; 15434 15435 plane->pipe = pipe; 15436 /* 15437 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 15438 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 15439 */ 15440 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 15441 plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 15442 else 15443 plane->i9xx_plane = (enum i9xx_plane_id) pipe; 15444 plane->id = PLANE_PRIMARY; 15445 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 15446 15447 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 15448 if (plane->has_fbc) { 15449 struct intel_fbc *fbc = &dev_priv->fbc; 15450 15451 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 15452 } 15453 15454 if (INTEL_GEN(dev_priv) >= 4) { 15455 /* 15456 * WaFP16GammaEnabling:ivb 15457 * "Workaround : When using the 64-bit format, the plane 15458 * output on each color channel has one quarter amplitude. 15459 * It can be brought up to full amplitude by using pipe 15460 * gamma correction or pipe color space conversion to 15461 * multiply the plane output by four." 15462 * 15463 * There is no dedicated plane gamma for the primary plane, 15464 * and using the pipe gamma/csc could conflict with other 15465 * planes, so we choose not to expose fp16 on IVB primary 15466 * planes. HSW primary planes no longer have this problem. 15467 */ 15468 if (IS_IVYBRIDGE(dev_priv)) { 15469 formats = ivb_primary_formats; 15470 num_formats = ARRAY_SIZE(ivb_primary_formats); 15471 } else { 15472 formats = i965_primary_formats; 15473 num_formats = ARRAY_SIZE(i965_primary_formats); 15474 } 15475 modifiers = i9xx_format_modifiers; 15476 15477 plane->max_stride = i9xx_plane_max_stride; 15478 plane->update_plane = i9xx_update_plane; 15479 plane->disable_plane = i9xx_disable_plane; 15480 plane->get_hw_state = i9xx_plane_get_hw_state; 15481 plane->check_plane = i9xx_plane_check; 15482 15483 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 15484 plane->min_cdclk = hsw_plane_min_cdclk; 15485 else if (IS_IVYBRIDGE(dev_priv)) 15486 plane->min_cdclk = ivb_plane_min_cdclk; 15487 else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv)) 15488 plane->min_cdclk = vlv_plane_min_cdclk; 15489 else 15490 plane->min_cdclk = i9xx_plane_min_cdclk; 15491 15492 plane_funcs = &i965_plane_funcs; 15493 } else { 15494 formats = i8xx_primary_formats; 15495 num_formats = ARRAY_SIZE(i8xx_primary_formats); 15496 modifiers = i9xx_format_modifiers; 15497 15498 plane->max_stride = i9xx_plane_max_stride; 15499 plane->update_plane = i9xx_update_plane; 15500 plane->disable_plane = i9xx_disable_plane; 15501 plane->get_hw_state = i9xx_plane_get_hw_state; 15502 plane->check_plane = i9xx_plane_check; 15503 plane->min_cdclk = i9xx_plane_min_cdclk; 15504 15505 plane_funcs = &i8xx_plane_funcs; 15506 } 15507 15508 possible_crtcs = BIT(pipe); 15509 15510 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 15511 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 15512 possible_crtcs, plane_funcs, 15513 formats, num_formats, modifiers, 15514 DRM_PLANE_TYPE_PRIMARY, 15515 "primary %c", pipe_name(pipe)); 15516 else 15517 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 15518 possible_crtcs, plane_funcs, 15519 formats, num_formats, modifiers, 15520 DRM_PLANE_TYPE_PRIMARY, 15521 "plane %c", 15522 plane_name(plane->i9xx_plane)); 15523 if (ret) 15524 goto fail; 15525 15526 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 15527 supported_rotations = 15528 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 15529 DRM_MODE_REFLECT_X; 15530 } else if (INTEL_GEN(dev_priv) >= 4) { 15531 supported_rotations = 15532 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 15533 } else { 15534 supported_rotations = DRM_MODE_ROTATE_0; 15535 } 15536 15537 if (INTEL_GEN(dev_priv) >= 4) 15538 drm_plane_create_rotation_property(&plane->base, 15539 DRM_MODE_ROTATE_0, 15540 supported_rotations); 15541 15542 zpos = 0; 15543 drm_plane_create_zpos_immutable_property(&plane->base, zpos); 15544 15545 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 15546 15547 return plane; 15548 15549 fail: 15550 intel_plane_free(plane); 15551 15552 return ERR_PTR(ret); 15553 } 15554 15555 static struct intel_plane * 15556 intel_cursor_plane_create(struct drm_i915_private *dev_priv, 15557 enum pipe pipe) 15558 { 15559 unsigned int possible_crtcs; 15560 struct intel_plane *cursor; 15561 int ret, zpos; 15562 15563 cursor = intel_plane_alloc(); 15564 if (IS_ERR(cursor)) 15565 return cursor; 15566 15567 cursor->pipe = pipe; 15568 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 15569 cursor->id = PLANE_CURSOR; 15570 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 15571 15572 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 15573 cursor->max_stride = i845_cursor_max_stride; 15574 cursor->update_plane = i845_update_cursor; 15575 cursor->disable_plane = i845_disable_cursor; 15576 cursor->get_hw_state = i845_cursor_get_hw_state; 15577 cursor->check_plane = i845_check_cursor; 15578 } else { 15579 cursor->max_stride = i9xx_cursor_max_stride; 15580 cursor->update_plane = i9xx_update_cursor; 15581 cursor->disable_plane = i9xx_disable_cursor; 15582 cursor->get_hw_state = i9xx_cursor_get_hw_state; 15583 cursor->check_plane = i9xx_check_cursor; 15584 } 15585 15586 cursor->cursor.base = ~0; 15587 cursor->cursor.cntl = ~0; 15588 15589 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 15590 cursor->cursor.size = ~0; 15591 15592 possible_crtcs = BIT(pipe); 15593 15594 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 15595 possible_crtcs, &intel_cursor_plane_funcs, 15596 intel_cursor_formats, 15597 ARRAY_SIZE(intel_cursor_formats), 15598 cursor_format_modifiers, 15599 DRM_PLANE_TYPE_CURSOR, 15600 "cursor %c", pipe_name(pipe)); 15601 if (ret) 15602 goto fail; 15603 15604 if (INTEL_GEN(dev_priv) >= 4) 15605 drm_plane_create_rotation_property(&cursor->base, 15606 DRM_MODE_ROTATE_0, 15607 DRM_MODE_ROTATE_0 | 15608 DRM_MODE_ROTATE_180); 15609 15610 zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; 15611 drm_plane_create_zpos_immutable_property(&cursor->base, zpos); 15612 15613 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 15614 15615 return cursor; 15616 15617 fail: 15618 intel_plane_free(cursor); 15619 15620 return ERR_PTR(ret); 15621 } 15622 15623 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 15624 struct intel_crtc_state *crtc_state) 15625 { 15626 struct intel_crtc_scaler_state *scaler_state = 15627 &crtc_state->scaler_state; 15628 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15629 int i; 15630 15631 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe]; 15632 if (!crtc->num_scalers) 15633 return; 15634 15635 for (i = 0; i < crtc->num_scalers; i++) { 15636 struct intel_scaler *scaler = &scaler_state->scalers[i]; 15637 15638 scaler->in_use = 0; 15639 scaler->mode = 0; 15640 } 15641 15642 scaler_state->scaler_id = -1; 15643 } 15644 15645 #define INTEL_CRTC_FUNCS \ 15646 .gamma_set = drm_atomic_helper_legacy_gamma_set, \ 15647 .set_config = drm_atomic_helper_set_config, \ 15648 .destroy = intel_crtc_destroy, \ 15649 .page_flip = drm_atomic_helper_page_flip, \ 15650 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 15651 .atomic_destroy_state = intel_crtc_destroy_state, \ 15652 .set_crc_source = intel_crtc_set_crc_source, \ 15653 .verify_crc_source = intel_crtc_verify_crc_source, \ 15654 .get_crc_sources = intel_crtc_get_crc_sources 15655 15656 static const struct drm_crtc_funcs bdw_crtc_funcs = { 15657 INTEL_CRTC_FUNCS, 15658 15659 .get_vblank_counter = g4x_get_vblank_counter, 15660 .enable_vblank = bdw_enable_vblank, 15661 .disable_vblank = bdw_disable_vblank, 15662 }; 15663 15664 static const struct drm_crtc_funcs ilk_crtc_funcs = { 15665 INTEL_CRTC_FUNCS, 15666 15667 .get_vblank_counter = g4x_get_vblank_counter, 15668 .enable_vblank = ilk_enable_vblank, 15669 .disable_vblank = ilk_disable_vblank, 15670 }; 15671 15672 static const struct drm_crtc_funcs g4x_crtc_funcs = { 15673 INTEL_CRTC_FUNCS, 15674 15675 .get_vblank_counter = g4x_get_vblank_counter, 15676 .enable_vblank = i965_enable_vblank, 15677 .disable_vblank = i965_disable_vblank, 15678 }; 15679 15680 static const struct drm_crtc_funcs i965_crtc_funcs = { 15681 INTEL_CRTC_FUNCS, 15682 15683 .get_vblank_counter = i915_get_vblank_counter, 15684 .enable_vblank = i965_enable_vblank, 15685 .disable_vblank = i965_disable_vblank, 15686 }; 15687 15688 static const struct drm_crtc_funcs i915gm_crtc_funcs = { 15689 INTEL_CRTC_FUNCS, 15690 15691 .get_vblank_counter = i915_get_vblank_counter, 15692 .enable_vblank = i915gm_enable_vblank, 15693 .disable_vblank = i915gm_disable_vblank, 15694 }; 15695 15696 static const struct drm_crtc_funcs i915_crtc_funcs = { 15697 INTEL_CRTC_FUNCS, 15698 15699 .get_vblank_counter = i915_get_vblank_counter, 15700 .enable_vblank = i8xx_enable_vblank, 15701 .disable_vblank = i8xx_disable_vblank, 15702 }; 15703 15704 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 15705 INTEL_CRTC_FUNCS, 15706 15707 /* no hw vblank counter */ 15708 .enable_vblank = i8xx_enable_vblank, 15709 .disable_vblank = i8xx_disable_vblank, 15710 }; 15711 15712 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 15713 { 15714 const struct drm_crtc_funcs *funcs; 15715 struct intel_crtc *intel_crtc; 15716 struct intel_crtc_state *crtc_state = NULL; 15717 struct intel_plane *primary = NULL; 15718 struct intel_plane *cursor = NULL; 15719 int sprite, ret; 15720 15721 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 15722 if (!intel_crtc) 15723 return -ENOMEM; 15724 15725 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 15726 if (!crtc_state) { 15727 ret = -ENOMEM; 15728 goto fail; 15729 } 15730 __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base); 15731 intel_crtc->config = crtc_state; 15732 15733 primary = intel_primary_plane_create(dev_priv, pipe); 15734 if (IS_ERR(primary)) { 15735 ret = PTR_ERR(primary); 15736 goto fail; 15737 } 15738 intel_crtc->plane_ids_mask |= BIT(primary->id); 15739 15740 for_each_sprite(dev_priv, pipe, sprite) { 15741 struct intel_plane *plane; 15742 15743 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 15744 if (IS_ERR(plane)) { 15745 ret = PTR_ERR(plane); 15746 goto fail; 15747 } 15748 intel_crtc->plane_ids_mask |= BIT(plane->id); 15749 } 15750 15751 cursor = intel_cursor_plane_create(dev_priv, pipe); 15752 if (IS_ERR(cursor)) { 15753 ret = PTR_ERR(cursor); 15754 goto fail; 15755 } 15756 intel_crtc->plane_ids_mask |= BIT(cursor->id); 15757 15758 if (HAS_GMCH(dev_priv)) { 15759 if (IS_CHERRYVIEW(dev_priv) || 15760 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 15761 funcs = &g4x_crtc_funcs; 15762 else if (IS_GEN(dev_priv, 4)) 15763 funcs = &i965_crtc_funcs; 15764 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) 15765 funcs = &i915gm_crtc_funcs; 15766 else if (IS_GEN(dev_priv, 3)) 15767 funcs = &i915_crtc_funcs; 15768 else 15769 funcs = &i8xx_crtc_funcs; 15770 } else { 15771 if (INTEL_GEN(dev_priv) >= 8) 15772 funcs = &bdw_crtc_funcs; 15773 else 15774 funcs = &ilk_crtc_funcs; 15775 } 15776 15777 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, 15778 &primary->base, &cursor->base, 15779 funcs, "pipe %c", pipe_name(pipe)); 15780 if (ret) 15781 goto fail; 15782 15783 intel_crtc->pipe = pipe; 15784 15785 /* initialize shared scalers */ 15786 intel_crtc_init_scalers(intel_crtc, crtc_state); 15787 15788 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || 15789 dev_priv->pipe_to_crtc_mapping[pipe] != NULL); 15790 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc; 15791 15792 if (INTEL_GEN(dev_priv) < 9) { 15793 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; 15794 15795 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 15796 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); 15797 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc; 15798 } 15799 15800 intel_color_init(intel_crtc); 15801 15802 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 15803 15804 return 0; 15805 15806 fail: 15807 /* 15808 * drm_mode_config_cleanup() will free up any 15809 * crtcs/planes already initialized. 15810 */ 15811 kfree(crtc_state); 15812 kfree(intel_crtc); 15813 15814 return ret; 15815 } 15816 15817 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 15818 struct drm_file *file) 15819 { 15820 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 15821 struct drm_crtc *drmmode_crtc; 15822 struct intel_crtc *crtc; 15823 15824 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 15825 if (!drmmode_crtc) 15826 return -ENOENT; 15827 15828 crtc = to_intel_crtc(drmmode_crtc); 15829 pipe_from_crtc_id->pipe = crtc->pipe; 15830 15831 return 0; 15832 } 15833 15834 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 15835 { 15836 struct drm_device *dev = encoder->base.dev; 15837 struct intel_encoder *source_encoder; 15838 u32 possible_clones = 0; 15839 15840 for_each_intel_encoder(dev, source_encoder) { 15841 if (encoders_cloneable(encoder, source_encoder)) 15842 possible_clones |= drm_encoder_mask(&source_encoder->base); 15843 } 15844 15845 return possible_clones; 15846 } 15847 15848 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 15849 { 15850 struct drm_device *dev = encoder->base.dev; 15851 struct intel_crtc *crtc; 15852 u32 possible_crtcs = 0; 15853 15854 for_each_intel_crtc(dev, crtc) { 15855 if (encoder->pipe_mask & BIT(crtc->pipe)) 15856 possible_crtcs |= drm_crtc_mask(&crtc->base); 15857 } 15858 15859 return possible_crtcs; 15860 } 15861 15862 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 15863 { 15864 if (!IS_MOBILE(dev_priv)) 15865 return false; 15866 15867 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 15868 return false; 15869 15870 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 15871 return false; 15872 15873 return true; 15874 } 15875 15876 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 15877 { 15878 if (INTEL_GEN(dev_priv) >= 9) 15879 return false; 15880 15881 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 15882 return false; 15883 15884 if (HAS_PCH_LPT_H(dev_priv) && 15885 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 15886 return false; 15887 15888 /* DDI E can't be used if DDI A requires 4 lanes */ 15889 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 15890 return false; 15891 15892 if (!dev_priv->vbt.int_crt_support) 15893 return false; 15894 15895 return true; 15896 } 15897 15898 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 15899 { 15900 int pps_num; 15901 int pps_idx; 15902 15903 if (HAS_DDI(dev_priv)) 15904 return; 15905 /* 15906 * This w/a is needed at least on CPT/PPT, but to be sure apply it 15907 * everywhere where registers can be write protected. 15908 */ 15909 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15910 pps_num = 2; 15911 else 15912 pps_num = 1; 15913 15914 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 15915 u32 val = I915_READ(PP_CONTROL(pps_idx)); 15916 15917 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 15918 I915_WRITE(PP_CONTROL(pps_idx), val); 15919 } 15920 } 15921 15922 static void intel_pps_init(struct drm_i915_private *dev_priv) 15923 { 15924 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 15925 dev_priv->pps_mmio_base = PCH_PPS_BASE; 15926 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15927 dev_priv->pps_mmio_base = VLV_PPS_BASE; 15928 else 15929 dev_priv->pps_mmio_base = PPS_BASE; 15930 15931 intel_pps_unlock_regs_wa(dev_priv); 15932 } 15933 15934 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 15935 { 15936 struct intel_encoder *encoder; 15937 bool dpd_is_edp = false; 15938 15939 intel_pps_init(dev_priv); 15940 15941 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 15942 return; 15943 15944 if (INTEL_GEN(dev_priv) >= 12) { 15945 intel_ddi_init(dev_priv, PORT_A); 15946 intel_ddi_init(dev_priv, PORT_B); 15947 intel_ddi_init(dev_priv, PORT_D); 15948 intel_ddi_init(dev_priv, PORT_E); 15949 intel_ddi_init(dev_priv, PORT_F); 15950 intel_ddi_init(dev_priv, PORT_G); 15951 intel_ddi_init(dev_priv, PORT_H); 15952 intel_ddi_init(dev_priv, PORT_I); 15953 icl_dsi_init(dev_priv); 15954 } else if (IS_ELKHARTLAKE(dev_priv)) { 15955 intel_ddi_init(dev_priv, PORT_A); 15956 intel_ddi_init(dev_priv, PORT_B); 15957 intel_ddi_init(dev_priv, PORT_C); 15958 intel_ddi_init(dev_priv, PORT_D); 15959 icl_dsi_init(dev_priv); 15960 } else if (IS_GEN(dev_priv, 11)) { 15961 intel_ddi_init(dev_priv, PORT_A); 15962 intel_ddi_init(dev_priv, PORT_B); 15963 intel_ddi_init(dev_priv, PORT_C); 15964 intel_ddi_init(dev_priv, PORT_D); 15965 intel_ddi_init(dev_priv, PORT_E); 15966 /* 15967 * On some ICL SKUs port F is not present. No strap bits for 15968 * this, so rely on VBT. 15969 * Work around broken VBTs on SKUs known to have no port F. 15970 */ 15971 if (IS_ICL_WITH_PORT_F(dev_priv) && 15972 intel_bios_is_port_present(dev_priv, PORT_F)) 15973 intel_ddi_init(dev_priv, PORT_F); 15974 15975 icl_dsi_init(dev_priv); 15976 } else if (IS_GEN9_LP(dev_priv)) { 15977 /* 15978 * FIXME: Broxton doesn't support port detection via the 15979 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 15980 * detect the ports. 15981 */ 15982 intel_ddi_init(dev_priv, PORT_A); 15983 intel_ddi_init(dev_priv, PORT_B); 15984 intel_ddi_init(dev_priv, PORT_C); 15985 15986 vlv_dsi_init(dev_priv); 15987 } else if (HAS_DDI(dev_priv)) { 15988 int found; 15989 15990 if (intel_ddi_crt_present(dev_priv)) 15991 intel_crt_init(dev_priv); 15992 15993 /* 15994 * Haswell uses DDI functions to detect digital outputs. 15995 * On SKL pre-D0 the strap isn't connected, so we assume 15996 * it's there. 15997 */ 15998 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 15999 /* WaIgnoreDDIAStrap: skl */ 16000 if (found || IS_GEN9_BC(dev_priv)) 16001 intel_ddi_init(dev_priv, PORT_A); 16002 16003 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP 16004 * register */ 16005 found = I915_READ(SFUSE_STRAP); 16006 16007 if (found & SFUSE_STRAP_DDIB_DETECTED) 16008 intel_ddi_init(dev_priv, PORT_B); 16009 if (found & SFUSE_STRAP_DDIC_DETECTED) 16010 intel_ddi_init(dev_priv, PORT_C); 16011 if (found & SFUSE_STRAP_DDID_DETECTED) 16012 intel_ddi_init(dev_priv, PORT_D); 16013 if (found & SFUSE_STRAP_DDIF_DETECTED) 16014 intel_ddi_init(dev_priv, PORT_F); 16015 /* 16016 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 16017 */ 16018 if (IS_GEN9_BC(dev_priv) && 16019 intel_bios_is_port_present(dev_priv, PORT_E)) 16020 intel_ddi_init(dev_priv, PORT_E); 16021 16022 } else if (HAS_PCH_SPLIT(dev_priv)) { 16023 int found; 16024 16025 /* 16026 * intel_edp_init_connector() depends on this completing first, 16027 * to prevent the registration of both eDP and LVDS and the 16028 * incorrect sharing of the PPS. 16029 */ 16030 intel_lvds_init(dev_priv); 16031 intel_crt_init(dev_priv); 16032 16033 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 16034 16035 if (ilk_has_edp_a(dev_priv)) 16036 intel_dp_init(dev_priv, DP_A, PORT_A); 16037 16038 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 16039 /* PCH SDVOB multiplex with HDMIB */ 16040 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 16041 if (!found) 16042 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 16043 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 16044 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 16045 } 16046 16047 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 16048 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 16049 16050 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 16051 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 16052 16053 if (I915_READ(PCH_DP_C) & DP_DETECTED) 16054 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 16055 16056 if (I915_READ(PCH_DP_D) & DP_DETECTED) 16057 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 16058 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16059 bool has_edp, has_port; 16060 16061 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 16062 intel_crt_init(dev_priv); 16063 16064 /* 16065 * The DP_DETECTED bit is the latched state of the DDC 16066 * SDA pin at boot. However since eDP doesn't require DDC 16067 * (no way to plug in a DP->HDMI dongle) the DDC pins for 16068 * eDP ports may have been muxed to an alternate function. 16069 * Thus we can't rely on the DP_DETECTED bit alone to detect 16070 * eDP ports. Consult the VBT as well as DP_DETECTED to 16071 * detect eDP ports. 16072 * 16073 * Sadly the straps seem to be missing sometimes even for HDMI 16074 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 16075 * and VBT for the presence of the port. Additionally we can't 16076 * trust the port type the VBT declares as we've seen at least 16077 * HDMI ports that the VBT claim are DP or eDP. 16078 */ 16079 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 16080 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 16081 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 16082 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 16083 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 16084 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 16085 16086 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 16087 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 16088 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 16089 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 16090 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 16091 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 16092 16093 if (IS_CHERRYVIEW(dev_priv)) { 16094 /* 16095 * eDP not supported on port D, 16096 * so no need to worry about it 16097 */ 16098 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 16099 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 16100 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 16101 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 16102 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 16103 } 16104 16105 vlv_dsi_init(dev_priv); 16106 } else if (IS_PINEVIEW(dev_priv)) { 16107 intel_lvds_init(dev_priv); 16108 intel_crt_init(dev_priv); 16109 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { 16110 bool found = false; 16111 16112 if (IS_MOBILE(dev_priv)) 16113 intel_lvds_init(dev_priv); 16114 16115 intel_crt_init(dev_priv); 16116 16117 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 16118 DRM_DEBUG_KMS("probing SDVOB\n"); 16119 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 16120 if (!found && IS_G4X(dev_priv)) { 16121 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 16122 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 16123 } 16124 16125 if (!found && IS_G4X(dev_priv)) 16126 intel_dp_init(dev_priv, DP_B, PORT_B); 16127 } 16128 16129 /* Before G4X SDVOC doesn't have its own detect register */ 16130 16131 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 16132 DRM_DEBUG_KMS("probing SDVOC\n"); 16133 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 16134 } 16135 16136 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 16137 16138 if (IS_G4X(dev_priv)) { 16139 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 16140 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 16141 } 16142 if (IS_G4X(dev_priv)) 16143 intel_dp_init(dev_priv, DP_C, PORT_C); 16144 } 16145 16146 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 16147 intel_dp_init(dev_priv, DP_D, PORT_D); 16148 16149 if (SUPPORTS_TV(dev_priv)) 16150 intel_tv_init(dev_priv); 16151 } else if (IS_GEN(dev_priv, 2)) { 16152 if (IS_I85X(dev_priv)) 16153 intel_lvds_init(dev_priv); 16154 16155 intel_crt_init(dev_priv); 16156 intel_dvo_init(dev_priv); 16157 } 16158 16159 intel_psr_init(dev_priv); 16160 16161 for_each_intel_encoder(&dev_priv->drm, encoder) { 16162 encoder->base.possible_crtcs = 16163 intel_encoder_possible_crtcs(encoder); 16164 encoder->base.possible_clones = 16165 intel_encoder_possible_clones(encoder); 16166 } 16167 16168 intel_init_pch_refclk(dev_priv); 16169 16170 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 16171 } 16172 16173 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 16174 { 16175 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 16176 16177 drm_framebuffer_cleanup(fb); 16178 intel_frontbuffer_put(intel_fb->frontbuffer); 16179 16180 kfree(intel_fb); 16181 } 16182 16183 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 16184 struct drm_file *file, 16185 unsigned int *handle) 16186 { 16187 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 16188 16189 if (obj->userptr.mm) { 16190 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 16191 return -EINVAL; 16192 } 16193 16194 return drm_gem_handle_create(file, &obj->base, handle); 16195 } 16196 16197 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 16198 struct drm_file *file, 16199 unsigned flags, unsigned color, 16200 struct drm_clip_rect *clips, 16201 unsigned num_clips) 16202 { 16203 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 16204 16205 i915_gem_object_flush_if_display(obj); 16206 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 16207 16208 return 0; 16209 } 16210 16211 static const struct drm_framebuffer_funcs intel_fb_funcs = { 16212 .destroy = intel_user_framebuffer_destroy, 16213 .create_handle = intel_user_framebuffer_create_handle, 16214 .dirty = intel_user_framebuffer_dirty, 16215 }; 16216 16217 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 16218 struct drm_i915_gem_object *obj, 16219 struct drm_mode_fb_cmd2 *mode_cmd) 16220 { 16221 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 16222 struct drm_framebuffer *fb = &intel_fb->base; 16223 u32 max_stride; 16224 unsigned int tiling, stride; 16225 int ret = -EINVAL; 16226 int i; 16227 16228 intel_fb->frontbuffer = intel_frontbuffer_get(obj); 16229 if (!intel_fb->frontbuffer) 16230 return -ENOMEM; 16231 16232 i915_gem_object_lock(obj); 16233 tiling = i915_gem_object_get_tiling(obj); 16234 stride = i915_gem_object_get_stride(obj); 16235 i915_gem_object_unlock(obj); 16236 16237 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 16238 /* 16239 * If there's a fence, enforce that 16240 * the fb modifier and tiling mode match. 16241 */ 16242 if (tiling != I915_TILING_NONE && 16243 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 16244 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); 16245 goto err; 16246 } 16247 } else { 16248 if (tiling == I915_TILING_X) { 16249 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 16250 } else if (tiling == I915_TILING_Y) { 16251 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); 16252 goto err; 16253 } 16254 } 16255 16256 if (!drm_any_plane_has_format(&dev_priv->drm, 16257 mode_cmd->pixel_format, 16258 mode_cmd->modifier[0])) { 16259 struct drm_format_name_buf format_name; 16260 16261 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n", 16262 drm_get_format_name(mode_cmd->pixel_format, 16263 &format_name), 16264 mode_cmd->modifier[0]); 16265 goto err; 16266 } 16267 16268 /* 16269 * gen2/3 display engine uses the fence if present, 16270 * so the tiling mode must match the fb modifier exactly. 16271 */ 16272 if (INTEL_GEN(dev_priv) < 4 && 16273 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 16274 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 16275 goto err; 16276 } 16277 16278 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, 16279 mode_cmd->modifier[0]); 16280 if (mode_cmd->pitches[0] > max_stride) { 16281 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 16282 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 16283 "tiled" : "linear", 16284 mode_cmd->pitches[0], max_stride); 16285 goto err; 16286 } 16287 16288 /* 16289 * If there's a fence, enforce that 16290 * the fb pitch and fence stride match. 16291 */ 16292 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 16293 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", 16294 mode_cmd->pitches[0], stride); 16295 goto err; 16296 } 16297 16298 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 16299 if (mode_cmd->offsets[0] != 0) 16300 goto err; 16301 16302 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 16303 16304 for (i = 0; i < fb->format->num_planes; i++) { 16305 u32 stride_alignment; 16306 16307 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 16308 DRM_DEBUG_KMS("bad plane %d handle\n", i); 16309 goto err; 16310 } 16311 16312 stride_alignment = intel_fb_stride_alignment(fb, i); 16313 16314 /* 16315 * Display WA #0531: skl,bxt,kbl,glk 16316 * 16317 * Render decompression and plane width > 3840 16318 * combined with horizontal panning requires the 16319 * plane stride to be a multiple of 4. We'll just 16320 * require the entire fb to accommodate that to avoid 16321 * potential runtime errors at plane configuration time. 16322 */ 16323 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 && 16324 is_ccs_modifier(fb->modifier)) 16325 stride_alignment *= 4; 16326 16327 if (fb->pitches[i] & (stride_alignment - 1)) { 16328 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n", 16329 i, fb->pitches[i], stride_alignment); 16330 goto err; 16331 } 16332 16333 fb->obj[i] = &obj->base; 16334 } 16335 16336 ret = intel_fill_fb_info(dev_priv, fb); 16337 if (ret) 16338 goto err; 16339 16340 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 16341 if (ret) { 16342 DRM_ERROR("framebuffer init failed %d\n", ret); 16343 goto err; 16344 } 16345 16346 return 0; 16347 16348 err: 16349 intel_frontbuffer_put(intel_fb->frontbuffer); 16350 return ret; 16351 } 16352 16353 static struct drm_framebuffer * 16354 intel_user_framebuffer_create(struct drm_device *dev, 16355 struct drm_file *filp, 16356 const struct drm_mode_fb_cmd2 *user_mode_cmd) 16357 { 16358 struct drm_framebuffer *fb; 16359 struct drm_i915_gem_object *obj; 16360 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 16361 16362 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 16363 if (!obj) 16364 return ERR_PTR(-ENOENT); 16365 16366 fb = intel_framebuffer_create(obj, &mode_cmd); 16367 i915_gem_object_put(obj); 16368 16369 return fb; 16370 } 16371 16372 static void intel_atomic_state_free(struct drm_atomic_state *state) 16373 { 16374 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 16375 16376 drm_atomic_state_default_release(state); 16377 16378 i915_sw_fence_fini(&intel_state->commit_ready); 16379 16380 kfree(state); 16381 } 16382 16383 static enum drm_mode_status 16384 intel_mode_valid(struct drm_device *dev, 16385 const struct drm_display_mode *mode) 16386 { 16387 struct drm_i915_private *dev_priv = to_i915(dev); 16388 int hdisplay_max, htotal_max; 16389 int vdisplay_max, vtotal_max; 16390 16391 /* 16392 * Can't reject DBLSCAN here because Xorg ddxen can add piles 16393 * of DBLSCAN modes to the output's mode list when they detect 16394 * the scaling mode property on the connector. And they don't 16395 * ask the kernel to validate those modes in any way until 16396 * modeset time at which point the client gets a protocol error. 16397 * So in order to not upset those clients we silently ignore the 16398 * DBLSCAN flag on such connectors. For other connectors we will 16399 * reject modes with the DBLSCAN flag in encoder->compute_config(). 16400 * And we always reject DBLSCAN modes in connector->mode_valid() 16401 * as we never want such modes on the connector's mode list. 16402 */ 16403 16404 if (mode->vscan > 1) 16405 return MODE_NO_VSCAN; 16406 16407 if (mode->flags & DRM_MODE_FLAG_HSKEW) 16408 return MODE_H_ILLEGAL; 16409 16410 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 16411 DRM_MODE_FLAG_NCSYNC | 16412 DRM_MODE_FLAG_PCSYNC)) 16413 return MODE_HSYNC; 16414 16415 if (mode->flags & (DRM_MODE_FLAG_BCAST | 16416 DRM_MODE_FLAG_PIXMUX | 16417 DRM_MODE_FLAG_CLKDIV2)) 16418 return MODE_BAD; 16419 16420 /* Transcoder timing limits */ 16421 if (INTEL_GEN(dev_priv) >= 11) { 16422 hdisplay_max = 16384; 16423 vdisplay_max = 8192; 16424 htotal_max = 16384; 16425 vtotal_max = 8192; 16426 } else if (INTEL_GEN(dev_priv) >= 9 || 16427 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 16428 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 16429 vdisplay_max = 4096; 16430 htotal_max = 8192; 16431 vtotal_max = 8192; 16432 } else if (INTEL_GEN(dev_priv) >= 3) { 16433 hdisplay_max = 4096; 16434 vdisplay_max = 4096; 16435 htotal_max = 8192; 16436 vtotal_max = 8192; 16437 } else { 16438 hdisplay_max = 2048; 16439 vdisplay_max = 2048; 16440 htotal_max = 4096; 16441 vtotal_max = 4096; 16442 } 16443 16444 if (mode->hdisplay > hdisplay_max || 16445 mode->hsync_start > htotal_max || 16446 mode->hsync_end > htotal_max || 16447 mode->htotal > htotal_max) 16448 return MODE_H_ILLEGAL; 16449 16450 if (mode->vdisplay > vdisplay_max || 16451 mode->vsync_start > vtotal_max || 16452 mode->vsync_end > vtotal_max || 16453 mode->vtotal > vtotal_max) 16454 return MODE_V_ILLEGAL; 16455 16456 if (INTEL_GEN(dev_priv) >= 5) { 16457 if (mode->hdisplay < 64 || 16458 mode->htotal - mode->hdisplay < 32) 16459 return MODE_H_ILLEGAL; 16460 16461 if (mode->vtotal - mode->vdisplay < 5) 16462 return MODE_V_ILLEGAL; 16463 } else { 16464 if (mode->htotal - mode->hdisplay < 32) 16465 return MODE_H_ILLEGAL; 16466 16467 if (mode->vtotal - mode->vdisplay < 3) 16468 return MODE_V_ILLEGAL; 16469 } 16470 16471 return MODE_OK; 16472 } 16473 16474 enum drm_mode_status 16475 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 16476 const struct drm_display_mode *mode) 16477 { 16478 int plane_width_max, plane_height_max; 16479 16480 /* 16481 * intel_mode_valid() should be 16482 * sufficient on older platforms. 16483 */ 16484 if (INTEL_GEN(dev_priv) < 9) 16485 return MODE_OK; 16486 16487 /* 16488 * Most people will probably want a fullscreen 16489 * plane so let's not advertize modes that are 16490 * too big for that. 16491 */ 16492 if (INTEL_GEN(dev_priv) >= 11) { 16493 plane_width_max = 5120; 16494 plane_height_max = 4320; 16495 } else { 16496 plane_width_max = 5120; 16497 plane_height_max = 4096; 16498 } 16499 16500 if (mode->hdisplay > plane_width_max) 16501 return MODE_H_ILLEGAL; 16502 16503 if (mode->vdisplay > plane_height_max) 16504 return MODE_V_ILLEGAL; 16505 16506 return MODE_OK; 16507 } 16508 16509 static const struct drm_mode_config_funcs intel_mode_funcs = { 16510 .fb_create = intel_user_framebuffer_create, 16511 .get_format_info = intel_get_format_info, 16512 .output_poll_changed = intel_fbdev_output_poll_changed, 16513 .mode_valid = intel_mode_valid, 16514 .atomic_check = intel_atomic_check, 16515 .atomic_commit = intel_atomic_commit, 16516 .atomic_state_alloc = intel_atomic_state_alloc, 16517 .atomic_state_clear = intel_atomic_state_clear, 16518 .atomic_state_free = intel_atomic_state_free, 16519 }; 16520 16521 /** 16522 * intel_init_display_hooks - initialize the display modesetting hooks 16523 * @dev_priv: device private 16524 */ 16525 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 16526 { 16527 intel_init_cdclk_hooks(dev_priv); 16528 16529 if (INTEL_GEN(dev_priv) >= 9) { 16530 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 16531 dev_priv->display.get_initial_plane_config = 16532 skylake_get_initial_plane_config; 16533 dev_priv->display.crtc_compute_clock = 16534 haswell_crtc_compute_clock; 16535 dev_priv->display.crtc_enable = haswell_crtc_enable; 16536 dev_priv->display.crtc_disable = haswell_crtc_disable; 16537 } else if (HAS_DDI(dev_priv)) { 16538 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 16539 dev_priv->display.get_initial_plane_config = 16540 i9xx_get_initial_plane_config; 16541 dev_priv->display.crtc_compute_clock = 16542 haswell_crtc_compute_clock; 16543 dev_priv->display.crtc_enable = haswell_crtc_enable; 16544 dev_priv->display.crtc_disable = haswell_crtc_disable; 16545 } else if (HAS_PCH_SPLIT(dev_priv)) { 16546 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 16547 dev_priv->display.get_initial_plane_config = 16548 i9xx_get_initial_plane_config; 16549 dev_priv->display.crtc_compute_clock = 16550 ironlake_crtc_compute_clock; 16551 dev_priv->display.crtc_enable = ironlake_crtc_enable; 16552 dev_priv->display.crtc_disable = ironlake_crtc_disable; 16553 } else if (IS_CHERRYVIEW(dev_priv)) { 16554 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16555 dev_priv->display.get_initial_plane_config = 16556 i9xx_get_initial_plane_config; 16557 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 16558 dev_priv->display.crtc_enable = valleyview_crtc_enable; 16559 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16560 } else if (IS_VALLEYVIEW(dev_priv)) { 16561 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16562 dev_priv->display.get_initial_plane_config = 16563 i9xx_get_initial_plane_config; 16564 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 16565 dev_priv->display.crtc_enable = valleyview_crtc_enable; 16566 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16567 } else if (IS_G4X(dev_priv)) { 16568 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16569 dev_priv->display.get_initial_plane_config = 16570 i9xx_get_initial_plane_config; 16571 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 16572 dev_priv->display.crtc_enable = i9xx_crtc_enable; 16573 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16574 } else if (IS_PINEVIEW(dev_priv)) { 16575 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16576 dev_priv->display.get_initial_plane_config = 16577 i9xx_get_initial_plane_config; 16578 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 16579 dev_priv->display.crtc_enable = i9xx_crtc_enable; 16580 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16581 } else if (!IS_GEN(dev_priv, 2)) { 16582 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16583 dev_priv->display.get_initial_plane_config = 16584 i9xx_get_initial_plane_config; 16585 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 16586 dev_priv->display.crtc_enable = i9xx_crtc_enable; 16587 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16588 } else { 16589 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16590 dev_priv->display.get_initial_plane_config = 16591 i9xx_get_initial_plane_config; 16592 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 16593 dev_priv->display.crtc_enable = i9xx_crtc_enable; 16594 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16595 } 16596 16597 if (IS_GEN(dev_priv, 5)) { 16598 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 16599 } else if (IS_GEN(dev_priv, 6)) { 16600 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 16601 } else if (IS_IVYBRIDGE(dev_priv)) { 16602 /* FIXME: detect B0+ stepping and use auto training */ 16603 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 16604 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 16605 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 16606 } 16607 16608 if (INTEL_GEN(dev_priv) >= 9) 16609 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; 16610 else 16611 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; 16612 16613 } 16614 16615 void intel_modeset_init_hw(struct drm_i915_private *i915) 16616 { 16617 intel_update_cdclk(i915); 16618 intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK"); 16619 i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw; 16620 } 16621 16622 /* 16623 * Calculate what we think the watermarks should be for the state we've read 16624 * out of the hardware and then immediately program those watermarks so that 16625 * we ensure the hardware settings match our internal state. 16626 * 16627 * We can calculate what we think WM's should be by creating a duplicate of the 16628 * current state (which was constructed during hardware readout) and running it 16629 * through the atomic check code to calculate new watermark values in the 16630 * state object. 16631 */ 16632 static void sanitize_watermarks(struct drm_device *dev) 16633 { 16634 struct drm_i915_private *dev_priv = to_i915(dev); 16635 struct drm_atomic_state *state; 16636 struct intel_atomic_state *intel_state; 16637 struct intel_crtc *crtc; 16638 struct intel_crtc_state *crtc_state; 16639 struct drm_modeset_acquire_ctx ctx; 16640 int ret; 16641 int i; 16642 16643 /* Only supported on platforms that use atomic watermark design */ 16644 if (!dev_priv->display.optimize_watermarks) 16645 return; 16646 16647 /* 16648 * We need to hold connection_mutex before calling duplicate_state so 16649 * that the connector loop is protected. 16650 */ 16651 drm_modeset_acquire_init(&ctx, 0); 16652 retry: 16653 ret = drm_modeset_lock_all_ctx(dev, &ctx); 16654 if (ret == -EDEADLK) { 16655 drm_modeset_backoff(&ctx); 16656 goto retry; 16657 } else if (WARN_ON(ret)) { 16658 goto fail; 16659 } 16660 16661 state = drm_atomic_helper_duplicate_state(dev, &ctx); 16662 if (WARN_ON(IS_ERR(state))) 16663 goto fail; 16664 16665 intel_state = to_intel_atomic_state(state); 16666 16667 /* 16668 * Hardware readout is the only time we don't want to calculate 16669 * intermediate watermarks (since we don't trust the current 16670 * watermarks). 16671 */ 16672 if (!HAS_GMCH(dev_priv)) 16673 intel_state->skip_intermediate_wm = true; 16674 16675 ret = intel_atomic_check(dev, state); 16676 if (ret) { 16677 /* 16678 * If we fail here, it means that the hardware appears to be 16679 * programmed in a way that shouldn't be possible, given our 16680 * understanding of watermark requirements. This might mean a 16681 * mistake in the hardware readout code or a mistake in the 16682 * watermark calculations for a given platform. Raise a WARN 16683 * so that this is noticeable. 16684 * 16685 * If this actually happens, we'll have to just leave the 16686 * BIOS-programmed watermarks untouched and hope for the best. 16687 */ 16688 WARN(true, "Could not determine valid watermarks for inherited state\n"); 16689 goto put_state; 16690 } 16691 16692 /* Write calculated watermark values back */ 16693 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 16694 crtc_state->wm.need_postvbl_update = true; 16695 dev_priv->display.optimize_watermarks(intel_state, crtc_state); 16696 16697 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 16698 } 16699 16700 put_state: 16701 drm_atomic_state_put(state); 16702 fail: 16703 drm_modeset_drop_locks(&ctx); 16704 drm_modeset_acquire_fini(&ctx); 16705 } 16706 16707 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 16708 { 16709 if (IS_GEN(dev_priv, 5)) { 16710 u32 fdi_pll_clk = 16711 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 16712 16713 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 16714 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { 16715 dev_priv->fdi_pll_freq = 270000; 16716 } else { 16717 return; 16718 } 16719 16720 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); 16721 } 16722 16723 static int intel_initial_commit(struct drm_device *dev) 16724 { 16725 struct drm_atomic_state *state = NULL; 16726 struct drm_modeset_acquire_ctx ctx; 16727 struct drm_crtc *crtc; 16728 struct drm_crtc_state *crtc_state; 16729 int ret = 0; 16730 16731 state = drm_atomic_state_alloc(dev); 16732 if (!state) 16733 return -ENOMEM; 16734 16735 drm_modeset_acquire_init(&ctx, 0); 16736 16737 retry: 16738 state->acquire_ctx = &ctx; 16739 16740 drm_for_each_crtc(crtc, dev) { 16741 crtc_state = drm_atomic_get_crtc_state(state, crtc); 16742 if (IS_ERR(crtc_state)) { 16743 ret = PTR_ERR(crtc_state); 16744 goto out; 16745 } 16746 16747 if (crtc_state->active) { 16748 ret = drm_atomic_add_affected_planes(state, crtc); 16749 if (ret) 16750 goto out; 16751 16752 /* 16753 * FIXME hack to force a LUT update to avoid the 16754 * plane update forcing the pipe gamma on without 16755 * having a proper LUT loaded. Remove once we 16756 * have readout for pipe gamma enable. 16757 */ 16758 crtc_state->color_mgmt_changed = true; 16759 } 16760 } 16761 16762 ret = drm_atomic_commit(state); 16763 16764 out: 16765 if (ret == -EDEADLK) { 16766 drm_atomic_state_clear(state); 16767 drm_modeset_backoff(&ctx); 16768 goto retry; 16769 } 16770 16771 drm_atomic_state_put(state); 16772 16773 drm_modeset_drop_locks(&ctx); 16774 drm_modeset_acquire_fini(&ctx); 16775 16776 return ret; 16777 } 16778 16779 static void intel_mode_config_init(struct drm_i915_private *i915) 16780 { 16781 struct drm_mode_config *mode_config = &i915->drm.mode_config; 16782 16783 drm_mode_config_init(&i915->drm); 16784 16785 mode_config->min_width = 0; 16786 mode_config->min_height = 0; 16787 16788 mode_config->preferred_depth = 24; 16789 mode_config->prefer_shadow = 1; 16790 16791 mode_config->allow_fb_modifiers = true; 16792 16793 mode_config->funcs = &intel_mode_funcs; 16794 16795 /* 16796 * Maximum framebuffer dimensions, chosen to match 16797 * the maximum render engine surface size on gen4+. 16798 */ 16799 if (INTEL_GEN(i915) >= 7) { 16800 mode_config->max_width = 16384; 16801 mode_config->max_height = 16384; 16802 } else if (INTEL_GEN(i915) >= 4) { 16803 mode_config->max_width = 8192; 16804 mode_config->max_height = 8192; 16805 } else if (IS_GEN(i915, 3)) { 16806 mode_config->max_width = 4096; 16807 mode_config->max_height = 4096; 16808 } else { 16809 mode_config->max_width = 2048; 16810 mode_config->max_height = 2048; 16811 } 16812 16813 if (IS_I845G(i915) || IS_I865G(i915)) { 16814 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; 16815 mode_config->cursor_height = 1023; 16816 } else if (IS_GEN(i915, 2)) { 16817 mode_config->cursor_width = 64; 16818 mode_config->cursor_height = 64; 16819 } else { 16820 mode_config->cursor_width = 256; 16821 mode_config->cursor_height = 256; 16822 } 16823 } 16824 16825 int intel_modeset_init(struct drm_i915_private *i915) 16826 { 16827 struct drm_device *dev = &i915->drm; 16828 enum pipe pipe; 16829 struct intel_crtc *crtc; 16830 int ret; 16831 16832 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 16833 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | 16834 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 16835 16836 intel_mode_config_init(i915); 16837 16838 ret = intel_bw_init(i915); 16839 if (ret) 16840 return ret; 16841 16842 init_llist_head(&i915->atomic_helper.free_list); 16843 INIT_WORK(&i915->atomic_helper.free_work, 16844 intel_atomic_helper_free_state_worker); 16845 16846 intel_init_quirks(i915); 16847 16848 intel_fbc_init(i915); 16849 16850 intel_init_pm(i915); 16851 16852 intel_panel_sanitize_ssc(i915); 16853 16854 intel_gmbus_setup(i915); 16855 16856 DRM_DEBUG_KMS("%d display pipe%s available.\n", 16857 INTEL_NUM_PIPES(i915), 16858 INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); 16859 16860 if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { 16861 for_each_pipe(i915, pipe) { 16862 ret = intel_crtc_init(i915, pipe); 16863 if (ret) { 16864 drm_mode_config_cleanup(dev); 16865 return ret; 16866 } 16867 } 16868 } 16869 16870 intel_shared_dpll_init(dev); 16871 intel_update_fdi_pll_freq(i915); 16872 16873 intel_update_czclk(i915); 16874 intel_modeset_init_hw(i915); 16875 16876 intel_hdcp_component_init(i915); 16877 16878 if (i915->max_cdclk_freq == 0) 16879 intel_update_max_cdclk(i915); 16880 16881 /* Just disable it once at startup */ 16882 intel_vga_disable(i915); 16883 intel_setup_outputs(i915); 16884 16885 drm_modeset_lock_all(dev); 16886 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 16887 drm_modeset_unlock_all(dev); 16888 16889 for_each_intel_crtc(dev, crtc) { 16890 struct intel_initial_plane_config plane_config = {}; 16891 16892 if (!crtc->active) 16893 continue; 16894 16895 /* 16896 * Note that reserving the BIOS fb up front prevents us 16897 * from stuffing other stolen allocations like the ring 16898 * on top. This prevents some ugliness at boot time, and 16899 * can even allow for smooth boot transitions if the BIOS 16900 * fb is large enough for the active pipe configuration. 16901 */ 16902 i915->display.get_initial_plane_config(crtc, &plane_config); 16903 16904 /* 16905 * If the fb is shared between multiple heads, we'll 16906 * just get the first one. 16907 */ 16908 intel_find_initial_plane_obj(crtc, &plane_config); 16909 } 16910 16911 /* 16912 * Make sure hardware watermarks really match the state we read out. 16913 * Note that we need to do this after reconstructing the BIOS fb's 16914 * since the watermark calculation done here will use pstate->fb. 16915 */ 16916 if (!HAS_GMCH(i915)) 16917 sanitize_watermarks(dev); 16918 16919 /* 16920 * Force all active planes to recompute their states. So that on 16921 * mode_setcrtc after probe, all the intel_plane_state variables 16922 * are already calculated and there is no assert_plane warnings 16923 * during bootup. 16924 */ 16925 ret = intel_initial_commit(dev); 16926 if (ret) 16927 DRM_DEBUG_KMS("Initial commit in probe failed.\n"); 16928 16929 return 0; 16930 } 16931 16932 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 16933 { 16934 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16935 /* 640x480@60Hz, ~25175 kHz */ 16936 struct dpll clock = { 16937 .m1 = 18, 16938 .m2 = 7, 16939 .p1 = 13, 16940 .p2 = 4, 16941 .n = 2, 16942 }; 16943 u32 dpll, fp; 16944 int i; 16945 16946 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154); 16947 16948 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 16949 pipe_name(pipe), clock.vco, clock.dot); 16950 16951 fp = i9xx_dpll_compute_fp(&clock); 16952 dpll = DPLL_DVO_2X_MODE | 16953 DPLL_VGA_MODE_DIS | 16954 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 16955 PLL_P2_DIVIDE_BY_4 | 16956 PLL_REF_INPUT_DREFCLK | 16957 DPLL_VCO_ENABLE; 16958 16959 I915_WRITE(FP0(pipe), fp); 16960 I915_WRITE(FP1(pipe), fp); 16961 16962 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 16963 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 16964 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 16965 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 16966 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 16967 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 16968 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 16969 16970 /* 16971 * Apparently we need to have VGA mode enabled prior to changing 16972 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 16973 * dividers, even though the register value does change. 16974 */ 16975 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 16976 I915_WRITE(DPLL(pipe), dpll); 16977 16978 /* Wait for the clocks to stabilize. */ 16979 POSTING_READ(DPLL(pipe)); 16980 udelay(150); 16981 16982 /* The pixel multiplier can only be updated once the 16983 * DPLL is enabled and the clocks are stable. 16984 * 16985 * So write it again. 16986 */ 16987 I915_WRITE(DPLL(pipe), dpll); 16988 16989 /* We do this three times for luck */ 16990 for (i = 0; i < 3 ; i++) { 16991 I915_WRITE(DPLL(pipe), dpll); 16992 POSTING_READ(DPLL(pipe)); 16993 udelay(150); /* wait for warmup */ 16994 } 16995 16996 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 16997 POSTING_READ(PIPECONF(pipe)); 16998 16999 intel_wait_for_pipe_scanline_moving(crtc); 17000 } 17001 17002 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 17003 { 17004 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17005 17006 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", 17007 pipe_name(pipe)); 17008 17009 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); 17010 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); 17011 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); 17012 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE); 17013 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE); 17014 17015 I915_WRITE(PIPECONF(pipe), 0); 17016 POSTING_READ(PIPECONF(pipe)); 17017 17018 intel_wait_for_pipe_scanline_stopped(crtc); 17019 17020 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 17021 POSTING_READ(DPLL(pipe)); 17022 } 17023 17024 static void 17025 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 17026 { 17027 struct intel_crtc *crtc; 17028 17029 if (INTEL_GEN(dev_priv) >= 4) 17030 return; 17031 17032 for_each_intel_crtc(&dev_priv->drm, crtc) { 17033 struct intel_plane *plane = 17034 to_intel_plane(crtc->base.primary); 17035 struct intel_crtc *plane_crtc; 17036 enum pipe pipe; 17037 17038 if (!plane->get_hw_state(plane, &pipe)) 17039 continue; 17040 17041 if (pipe == crtc->pipe) 17042 continue; 17043 17044 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 17045 plane->base.base.id, plane->base.name); 17046 17047 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17048 intel_plane_disable_noatomic(plane_crtc, plane); 17049 } 17050 } 17051 17052 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 17053 { 17054 struct drm_device *dev = crtc->base.dev; 17055 struct intel_encoder *encoder; 17056 17057 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 17058 return true; 17059 17060 return false; 17061 } 17062 17063 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 17064 { 17065 struct drm_device *dev = encoder->base.dev; 17066 struct intel_connector *connector; 17067 17068 for_each_connector_on_encoder(dev, &encoder->base, connector) 17069 return connector; 17070 17071 return NULL; 17072 } 17073 17074 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 17075 enum pipe pch_transcoder) 17076 { 17077 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 17078 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 17079 } 17080 17081 static void intel_sanitize_crtc(struct intel_crtc *crtc, 17082 struct drm_modeset_acquire_ctx *ctx) 17083 { 17084 struct drm_device *dev = crtc->base.dev; 17085 struct drm_i915_private *dev_priv = to_i915(dev); 17086 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 17087 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 17088 17089 /* Clear any frame start delays used for debugging left by the BIOS */ 17090 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) { 17091 i915_reg_t reg = PIPECONF(cpu_transcoder); 17092 17093 I915_WRITE(reg, 17094 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 17095 } 17096 17097 if (crtc_state->base.active) { 17098 struct intel_plane *plane; 17099 17100 /* Disable everything but the primary plane */ 17101 for_each_intel_plane_on_crtc(dev, crtc, plane) { 17102 const struct intel_plane_state *plane_state = 17103 to_intel_plane_state(plane->base.state); 17104 17105 if (plane_state->base.visible && 17106 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 17107 intel_plane_disable_noatomic(crtc, plane); 17108 } 17109 17110 /* 17111 * Disable any background color set by the BIOS, but enable the 17112 * gamma and CSC to match how we program our planes. 17113 */ 17114 if (INTEL_GEN(dev_priv) >= 9) 17115 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe), 17116 SKL_BOTTOM_COLOR_GAMMA_ENABLE | 17117 SKL_BOTTOM_COLOR_CSC_ENABLE); 17118 } 17119 17120 /* Adjust the state of the output pipe according to whether we 17121 * have active connectors/encoders. */ 17122 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc)) 17123 intel_crtc_disable_noatomic(&crtc->base, ctx); 17124 17125 if (crtc_state->base.active || HAS_GMCH(dev_priv)) { 17126 /* 17127 * We start out with underrun reporting disabled to avoid races. 17128 * For correct bookkeeping mark this on active crtcs. 17129 * 17130 * Also on gmch platforms we dont have any hardware bits to 17131 * disable the underrun reporting. Which means we need to start 17132 * out with underrun reporting disabled also on inactive pipes, 17133 * since otherwise we'll complain about the garbage we read when 17134 * e.g. coming up after runtime pm. 17135 * 17136 * No protection against concurrent access is required - at 17137 * worst a fifo underrun happens which also sets this to false. 17138 */ 17139 crtc->cpu_fifo_underrun_disabled = true; 17140 /* 17141 * We track the PCH trancoder underrun reporting state 17142 * within the crtc. With crtc for pipe A housing the underrun 17143 * reporting state for PCH transcoder A, crtc for pipe B housing 17144 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 17145 * and marking underrun reporting as disabled for the non-existing 17146 * PCH transcoders B and C would prevent enabling the south 17147 * error interrupt (see cpt_can_enable_serr_int()). 17148 */ 17149 if (has_pch_trancoder(dev_priv, crtc->pipe)) 17150 crtc->pch_fifo_underrun_disabled = true; 17151 } 17152 } 17153 17154 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 17155 { 17156 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 17157 17158 /* 17159 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 17160 * the hardware when a high res displays plugged in. DPLL P 17161 * divider is zero, and the pipe timings are bonkers. We'll 17162 * try to disable everything in that case. 17163 * 17164 * FIXME would be nice to be able to sanitize this state 17165 * without several WARNs, but for now let's take the easy 17166 * road. 17167 */ 17168 return IS_GEN(dev_priv, 6) && 17169 crtc_state->base.active && 17170 crtc_state->shared_dpll && 17171 crtc_state->port_clock == 0; 17172 } 17173 17174 static void intel_sanitize_encoder(struct intel_encoder *encoder) 17175 { 17176 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 17177 struct intel_connector *connector; 17178 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 17179 struct intel_crtc_state *crtc_state = crtc ? 17180 to_intel_crtc_state(crtc->base.state) : NULL; 17181 17182 /* We need to check both for a crtc link (meaning that the 17183 * encoder is active and trying to read from a pipe) and the 17184 * pipe itself being active. */ 17185 bool has_active_crtc = crtc_state && 17186 crtc_state->base.active; 17187 17188 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 17189 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", 17190 pipe_name(crtc->pipe)); 17191 has_active_crtc = false; 17192 } 17193 17194 connector = intel_encoder_find_connector(encoder); 17195 if (connector && !has_active_crtc) { 17196 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 17197 encoder->base.base.id, 17198 encoder->base.name); 17199 17200 /* Connector is active, but has no active pipe. This is 17201 * fallout from our resume register restoring. Disable 17202 * the encoder manually again. */ 17203 if (crtc_state) { 17204 struct drm_encoder *best_encoder; 17205 17206 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 17207 encoder->base.base.id, 17208 encoder->base.name); 17209 17210 /* avoid oopsing in case the hooks consult best_encoder */ 17211 best_encoder = connector->base.state->best_encoder; 17212 connector->base.state->best_encoder = &encoder->base; 17213 17214 if (encoder->disable) 17215 encoder->disable(encoder, crtc_state, 17216 connector->base.state); 17217 if (encoder->post_disable) 17218 encoder->post_disable(encoder, crtc_state, 17219 connector->base.state); 17220 17221 connector->base.state->best_encoder = best_encoder; 17222 } 17223 encoder->base.crtc = NULL; 17224 17225 /* Inconsistent output/port/pipe state happens presumably due to 17226 * a bug in one of the get_hw_state functions. Or someplace else 17227 * in our code, like the register restore mess on resume. Clamp 17228 * things to off as a safer default. */ 17229 17230 connector->base.dpms = DRM_MODE_DPMS_OFF; 17231 connector->base.encoder = NULL; 17232 } 17233 17234 /* notify opregion of the sanitized encoder state */ 17235 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 17236 17237 if (INTEL_GEN(dev_priv) >= 11) 17238 icl_sanitize_encoder_pll_mapping(encoder); 17239 } 17240 17241 /* FIXME read out full plane state for all planes */ 17242 static void readout_plane_state(struct drm_i915_private *dev_priv) 17243 { 17244 struct intel_plane *plane; 17245 struct intel_crtc *crtc; 17246 17247 for_each_intel_plane(&dev_priv->drm, plane) { 17248 struct intel_plane_state *plane_state = 17249 to_intel_plane_state(plane->base.state); 17250 struct intel_crtc_state *crtc_state; 17251 enum pipe pipe = PIPE_A; 17252 bool visible; 17253 17254 visible = plane->get_hw_state(plane, &pipe); 17255 17256 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17257 crtc_state = to_intel_crtc_state(crtc->base.state); 17258 17259 intel_set_plane_visible(crtc_state, plane_state, visible); 17260 17261 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 17262 plane->base.base.id, plane->base.name, 17263 enableddisabled(visible), pipe_name(pipe)); 17264 } 17265 17266 for_each_intel_crtc(&dev_priv->drm, crtc) { 17267 struct intel_crtc_state *crtc_state = 17268 to_intel_crtc_state(crtc->base.state); 17269 17270 fixup_active_planes(crtc_state); 17271 } 17272 } 17273 17274 static void intel_modeset_readout_hw_state(struct drm_device *dev) 17275 { 17276 struct drm_i915_private *dev_priv = to_i915(dev); 17277 enum pipe pipe; 17278 struct intel_crtc *crtc; 17279 struct intel_encoder *encoder; 17280 struct intel_connector *connector; 17281 struct drm_connector_list_iter conn_iter; 17282 int i; 17283 17284 dev_priv->active_pipes = 0; 17285 17286 for_each_intel_crtc(dev, crtc) { 17287 struct intel_crtc_state *crtc_state = 17288 to_intel_crtc_state(crtc->base.state); 17289 17290 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 17291 memset(crtc_state, 0, sizeof(*crtc_state)); 17292 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base); 17293 17294 crtc_state->base.active = crtc_state->base.enable = 17295 dev_priv->display.get_pipe_config(crtc, crtc_state); 17296 17297 crtc->base.enabled = crtc_state->base.enable; 17298 crtc->active = crtc_state->base.active; 17299 17300 if (crtc_state->base.active) 17301 dev_priv->active_pipes |= BIT(crtc->pipe); 17302 17303 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 17304 crtc->base.base.id, crtc->base.name, 17305 enableddisabled(crtc_state->base.active)); 17306 } 17307 17308 readout_plane_state(dev_priv); 17309 17310 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 17311 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 17312 17313 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, 17314 &pll->state.hw_state); 17315 17316 if (IS_ELKHARTLAKE(dev_priv) && pll->on && 17317 pll->info->id == DPLL_ID_EHL_DPLL4) { 17318 pll->wakeref = intel_display_power_get(dev_priv, 17319 POWER_DOMAIN_DPLL_DC_OFF); 17320 } 17321 17322 pll->state.crtc_mask = 0; 17323 for_each_intel_crtc(dev, crtc) { 17324 struct intel_crtc_state *crtc_state = 17325 to_intel_crtc_state(crtc->base.state); 17326 17327 if (crtc_state->base.active && 17328 crtc_state->shared_dpll == pll) 17329 pll->state.crtc_mask |= 1 << crtc->pipe; 17330 } 17331 pll->active_mask = pll->state.crtc_mask; 17332 17333 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 17334 pll->info->name, pll->state.crtc_mask, pll->on); 17335 } 17336 17337 for_each_intel_encoder(dev, encoder) { 17338 pipe = 0; 17339 17340 if (encoder->get_hw_state(encoder, &pipe)) { 17341 struct intel_crtc_state *crtc_state; 17342 17343 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17344 crtc_state = to_intel_crtc_state(crtc->base.state); 17345 17346 encoder->base.crtc = &crtc->base; 17347 encoder->get_config(encoder, crtc_state); 17348 } else { 17349 encoder->base.crtc = NULL; 17350 } 17351 17352 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 17353 encoder->base.base.id, encoder->base.name, 17354 enableddisabled(encoder->base.crtc), 17355 pipe_name(pipe)); 17356 } 17357 17358 drm_connector_list_iter_begin(dev, &conn_iter); 17359 for_each_intel_connector_iter(connector, &conn_iter) { 17360 if (connector->get_hw_state(connector)) { 17361 struct intel_crtc_state *crtc_state; 17362 struct intel_crtc *crtc; 17363 17364 connector->base.dpms = DRM_MODE_DPMS_ON; 17365 17366 encoder = connector->encoder; 17367 connector->base.encoder = &encoder->base; 17368 17369 crtc = to_intel_crtc(encoder->base.crtc); 17370 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; 17371 17372 if (crtc_state && crtc_state->base.active) { 17373 /* 17374 * This has to be done during hardware readout 17375 * because anything calling .crtc_disable may 17376 * rely on the connector_mask being accurate. 17377 */ 17378 crtc_state->base.connector_mask |= 17379 drm_connector_mask(&connector->base); 17380 crtc_state->base.encoder_mask |= 17381 drm_encoder_mask(&encoder->base); 17382 } 17383 } else { 17384 connector->base.dpms = DRM_MODE_DPMS_OFF; 17385 connector->base.encoder = NULL; 17386 } 17387 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 17388 connector->base.base.id, connector->base.name, 17389 enableddisabled(connector->base.encoder)); 17390 } 17391 drm_connector_list_iter_end(&conn_iter); 17392 17393 for_each_intel_crtc(dev, crtc) { 17394 struct intel_bw_state *bw_state = 17395 to_intel_bw_state(dev_priv->bw_obj.state); 17396 struct intel_crtc_state *crtc_state = 17397 to_intel_crtc_state(crtc->base.state); 17398 struct intel_plane *plane; 17399 int min_cdclk = 0; 17400 17401 if (crtc_state->base.active) { 17402 struct drm_display_mode mode; 17403 17404 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, 17405 crtc_state); 17406 17407 mode = crtc_state->base.adjusted_mode; 17408 mode.hdisplay = crtc_state->pipe_src_w; 17409 mode.vdisplay = crtc_state->pipe_src_h; 17410 WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->base, &mode)); 17411 17412 /* 17413 * The initial mode needs to be set in order to keep 17414 * the atomic core happy. It wants a valid mode if the 17415 * crtc's enabled, so we do the above call. 17416 * 17417 * But we don't set all the derived state fully, hence 17418 * set a flag to indicate that a full recalculation is 17419 * needed on the next commit. 17420 */ 17421 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED; 17422 17423 intel_crtc_compute_pixel_rate(crtc_state); 17424 17425 intel_crtc_update_active_timings(crtc_state); 17426 } 17427 17428 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 17429 const struct intel_plane_state *plane_state = 17430 to_intel_plane_state(plane->base.state); 17431 17432 /* 17433 * FIXME don't have the fb yet, so can't 17434 * use intel_plane_data_rate() :( 17435 */ 17436 if (plane_state->base.visible) 17437 crtc_state->data_rate[plane->id] = 17438 4 * crtc_state->pixel_rate; 17439 /* 17440 * FIXME don't have the fb yet, so can't 17441 * use plane->min_cdclk() :( 17442 */ 17443 if (plane_state->base.visible && plane->min_cdclk) { 17444 if (crtc_state->double_wide || 17445 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 17446 crtc_state->min_cdclk[plane->id] = 17447 DIV_ROUND_UP(crtc_state->pixel_rate, 2); 17448 else 17449 crtc_state->min_cdclk[plane->id] = 17450 crtc_state->pixel_rate; 17451 } 17452 DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n", 17453 plane->base.base.id, plane->base.name, 17454 crtc_state->min_cdclk[plane->id]); 17455 } 17456 17457 if (crtc_state->base.active) { 17458 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 17459 if (WARN_ON(min_cdclk < 0)) 17460 min_cdclk = 0; 17461 } 17462 17463 dev_priv->min_cdclk[crtc->pipe] = min_cdclk; 17464 dev_priv->min_voltage_level[crtc->pipe] = 17465 crtc_state->min_voltage_level; 17466 17467 intel_bw_crtc_update(bw_state, crtc_state); 17468 17469 intel_pipe_config_sanity_check(dev_priv, crtc_state); 17470 } 17471 } 17472 17473 static void 17474 get_encoder_power_domains(struct drm_i915_private *dev_priv) 17475 { 17476 struct intel_encoder *encoder; 17477 17478 for_each_intel_encoder(&dev_priv->drm, encoder) { 17479 struct intel_crtc_state *crtc_state; 17480 17481 if (!encoder->get_power_domains) 17482 continue; 17483 17484 /* 17485 * MST-primary and inactive encoders don't have a crtc state 17486 * and neither of these require any power domain references. 17487 */ 17488 if (!encoder->base.crtc) 17489 continue; 17490 17491 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 17492 encoder->get_power_domains(encoder, crtc_state); 17493 } 17494 } 17495 17496 static void intel_early_display_was(struct drm_i915_private *dev_priv) 17497 { 17498 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */ 17499 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) 17500 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 17501 DARBF_GATING_DIS); 17502 17503 if (IS_HASWELL(dev_priv)) { 17504 /* 17505 * WaRsPkgCStateDisplayPMReq:hsw 17506 * System hang if this isn't done before disabling all planes! 17507 */ 17508 I915_WRITE(CHICKEN_PAR1_1, 17509 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 17510 } 17511 } 17512 17513 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 17514 enum port port, i915_reg_t hdmi_reg) 17515 { 17516 u32 val = I915_READ(hdmi_reg); 17517 17518 if (val & SDVO_ENABLE || 17519 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 17520 return; 17521 17522 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n", 17523 port_name(port)); 17524 17525 val &= ~SDVO_PIPE_SEL_MASK; 17526 val |= SDVO_PIPE_SEL(PIPE_A); 17527 17528 I915_WRITE(hdmi_reg, val); 17529 } 17530 17531 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 17532 enum port port, i915_reg_t dp_reg) 17533 { 17534 u32 val = I915_READ(dp_reg); 17535 17536 if (val & DP_PORT_EN || 17537 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 17538 return; 17539 17540 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n", 17541 port_name(port)); 17542 17543 val &= ~DP_PIPE_SEL_MASK; 17544 val |= DP_PIPE_SEL(PIPE_A); 17545 17546 I915_WRITE(dp_reg, val); 17547 } 17548 17549 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 17550 { 17551 /* 17552 * The BIOS may select transcoder B on some of the PCH 17553 * ports even it doesn't enable the port. This would trip 17554 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 17555 * Sanitize the transcoder select bits to prevent that. We 17556 * assume that the BIOS never actually enabled the port, 17557 * because if it did we'd actually have to toggle the port 17558 * on and back off to make the transcoder A select stick 17559 * (see. intel_dp_link_down(), intel_disable_hdmi(), 17560 * intel_disable_sdvo()). 17561 */ 17562 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 17563 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 17564 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 17565 17566 /* PCH SDVOB multiplex with HDMIB */ 17567 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 17568 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 17569 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 17570 } 17571 17572 /* Scan out the current hw modeset state, 17573 * and sanitizes it to the current state 17574 */ 17575 static void 17576 intel_modeset_setup_hw_state(struct drm_device *dev, 17577 struct drm_modeset_acquire_ctx *ctx) 17578 { 17579 struct drm_i915_private *dev_priv = to_i915(dev); 17580 struct intel_crtc_state *crtc_state; 17581 struct intel_encoder *encoder; 17582 struct intel_crtc *crtc; 17583 intel_wakeref_t wakeref; 17584 int i; 17585 17586 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 17587 17588 intel_early_display_was(dev_priv); 17589 intel_modeset_readout_hw_state(dev); 17590 17591 /* HW state is read out, now we need to sanitize this mess. */ 17592 17593 /* Sanitize the TypeC port mode upfront, encoders depend on this */ 17594 for_each_intel_encoder(dev, encoder) { 17595 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 17596 17597 /* We need to sanitize only the MST primary port. */ 17598 if (encoder->type != INTEL_OUTPUT_DP_MST && 17599 intel_phy_is_tc(dev_priv, phy)) 17600 intel_tc_port_sanitize(enc_to_dig_port(&encoder->base)); 17601 } 17602 17603 get_encoder_power_domains(dev_priv); 17604 17605 if (HAS_PCH_IBX(dev_priv)) 17606 ibx_sanitize_pch_ports(dev_priv); 17607 17608 /* 17609 * intel_sanitize_plane_mapping() may need to do vblank 17610 * waits, so we need vblank interrupts restored beforehand. 17611 */ 17612 for_each_intel_crtc(&dev_priv->drm, crtc) { 17613 crtc_state = to_intel_crtc_state(crtc->base.state); 17614 17615 drm_crtc_vblank_reset(&crtc->base); 17616 17617 if (crtc_state->base.active) 17618 intel_crtc_vblank_on(crtc_state); 17619 } 17620 17621 intel_sanitize_plane_mapping(dev_priv); 17622 17623 for_each_intel_encoder(dev, encoder) 17624 intel_sanitize_encoder(encoder); 17625 17626 for_each_intel_crtc(&dev_priv->drm, crtc) { 17627 crtc_state = to_intel_crtc_state(crtc->base.state); 17628 intel_sanitize_crtc(crtc, ctx); 17629 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 17630 } 17631 17632 intel_modeset_update_connector_atomic_state(dev); 17633 17634 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 17635 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 17636 17637 if (!pll->on || pll->active_mask) 17638 continue; 17639 17640 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", 17641 pll->info->name); 17642 17643 pll->info->funcs->disable(dev_priv, pll); 17644 pll->on = false; 17645 } 17646 17647 if (IS_G4X(dev_priv)) { 17648 g4x_wm_get_hw_state(dev_priv); 17649 g4x_wm_sanitize(dev_priv); 17650 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 17651 vlv_wm_get_hw_state(dev_priv); 17652 vlv_wm_sanitize(dev_priv); 17653 } else if (INTEL_GEN(dev_priv) >= 9) { 17654 skl_wm_get_hw_state(dev_priv); 17655 } else if (HAS_PCH_SPLIT(dev_priv)) { 17656 ilk_wm_get_hw_state(dev_priv); 17657 } 17658 17659 for_each_intel_crtc(dev, crtc) { 17660 u64 put_domains; 17661 17662 crtc_state = to_intel_crtc_state(crtc->base.state); 17663 put_domains = modeset_get_crtc_power_domains(crtc_state); 17664 if (WARN_ON(put_domains)) 17665 modeset_put_power_domains(dev_priv, put_domains); 17666 } 17667 17668 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 17669 17670 intel_fbc_init_pipe_state(dev_priv); 17671 } 17672 17673 void intel_display_resume(struct drm_device *dev) 17674 { 17675 struct drm_i915_private *dev_priv = to_i915(dev); 17676 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 17677 struct drm_modeset_acquire_ctx ctx; 17678 int ret; 17679 17680 dev_priv->modeset_restore_state = NULL; 17681 if (state) 17682 state->acquire_ctx = &ctx; 17683 17684 drm_modeset_acquire_init(&ctx, 0); 17685 17686 while (1) { 17687 ret = drm_modeset_lock_all_ctx(dev, &ctx); 17688 if (ret != -EDEADLK) 17689 break; 17690 17691 drm_modeset_backoff(&ctx); 17692 } 17693 17694 if (!ret) 17695 ret = __intel_display_resume(dev, state, &ctx); 17696 17697 intel_enable_ipc(dev_priv); 17698 drm_modeset_drop_locks(&ctx); 17699 drm_modeset_acquire_fini(&ctx); 17700 17701 if (ret) 17702 DRM_ERROR("Restoring old state failed with %i\n", ret); 17703 if (state) 17704 drm_atomic_state_put(state); 17705 } 17706 17707 static void intel_hpd_poll_fini(struct drm_i915_private *i915) 17708 { 17709 struct intel_connector *connector; 17710 struct drm_connector_list_iter conn_iter; 17711 17712 /* Kill all the work that may have been queued by hpd. */ 17713 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 17714 for_each_intel_connector_iter(connector, &conn_iter) { 17715 if (connector->modeset_retry_work.func) 17716 cancel_work_sync(&connector->modeset_retry_work); 17717 if (connector->hdcp.shim) { 17718 cancel_delayed_work_sync(&connector->hdcp.check_work); 17719 cancel_work_sync(&connector->hdcp.prop_work); 17720 } 17721 } 17722 drm_connector_list_iter_end(&conn_iter); 17723 } 17724 17725 void intel_modeset_driver_remove(struct drm_i915_private *i915) 17726 { 17727 flush_workqueue(i915->flip_wq); 17728 flush_workqueue(i915->modeset_wq); 17729 17730 flush_work(&i915->atomic_helper.free_work); 17731 WARN_ON(!llist_empty(&i915->atomic_helper.free_list)); 17732 17733 /* 17734 * Interrupts and polling as the first thing to avoid creating havoc. 17735 * Too much stuff here (turning of connectors, ...) would 17736 * experience fancy races otherwise. 17737 */ 17738 intel_irq_uninstall(i915); 17739 17740 /* 17741 * Due to the hpd irq storm handling the hotplug work can re-arm the 17742 * poll handlers. Hence disable polling after hpd handling is shut down. 17743 */ 17744 intel_hpd_poll_fini(i915); 17745 17746 /* poll work can call into fbdev, hence clean that up afterwards */ 17747 intel_fbdev_fini(i915); 17748 17749 intel_unregister_dsm_handler(); 17750 17751 intel_fbc_global_disable(i915); 17752 17753 /* flush any delayed tasks or pending work */ 17754 flush_scheduled_work(); 17755 17756 intel_hdcp_component_fini(i915); 17757 17758 drm_mode_config_cleanup(&i915->drm); 17759 17760 intel_overlay_cleanup(i915); 17761 17762 intel_gmbus_teardown(i915); 17763 17764 destroy_workqueue(i915->flip_wq); 17765 destroy_workqueue(i915->modeset_wq); 17766 17767 intel_fbc_cleanup_cfb(i915); 17768 } 17769 17770 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 17771 17772 struct intel_display_error_state { 17773 17774 u32 power_well_driver; 17775 17776 struct intel_cursor_error_state { 17777 u32 control; 17778 u32 position; 17779 u32 base; 17780 u32 size; 17781 } cursor[I915_MAX_PIPES]; 17782 17783 struct intel_pipe_error_state { 17784 bool power_domain_on; 17785 u32 source; 17786 u32 stat; 17787 } pipe[I915_MAX_PIPES]; 17788 17789 struct intel_plane_error_state { 17790 u32 control; 17791 u32 stride; 17792 u32 size; 17793 u32 pos; 17794 u32 addr; 17795 u32 surface; 17796 u32 tile_offset; 17797 } plane[I915_MAX_PIPES]; 17798 17799 struct intel_transcoder_error_state { 17800 bool available; 17801 bool power_domain_on; 17802 enum transcoder cpu_transcoder; 17803 17804 u32 conf; 17805 17806 u32 htotal; 17807 u32 hblank; 17808 u32 hsync; 17809 u32 vtotal; 17810 u32 vblank; 17811 u32 vsync; 17812 } transcoder[5]; 17813 }; 17814 17815 struct intel_display_error_state * 17816 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 17817 { 17818 struct intel_display_error_state *error; 17819 int transcoders[] = { 17820 TRANSCODER_A, 17821 TRANSCODER_B, 17822 TRANSCODER_C, 17823 TRANSCODER_D, 17824 TRANSCODER_EDP, 17825 }; 17826 int i; 17827 17828 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); 17829 17830 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 17831 return NULL; 17832 17833 error = kzalloc(sizeof(*error), GFP_ATOMIC); 17834 if (error == NULL) 17835 return NULL; 17836 17837 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 17838 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2); 17839 17840 for_each_pipe(dev_priv, i) { 17841 error->pipe[i].power_domain_on = 17842 __intel_display_power_is_enabled(dev_priv, 17843 POWER_DOMAIN_PIPE(i)); 17844 if (!error->pipe[i].power_domain_on) 17845 continue; 17846 17847 error->cursor[i].control = I915_READ(CURCNTR(i)); 17848 error->cursor[i].position = I915_READ(CURPOS(i)); 17849 error->cursor[i].base = I915_READ(CURBASE(i)); 17850 17851 error->plane[i].control = I915_READ(DSPCNTR(i)); 17852 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 17853 if (INTEL_GEN(dev_priv) <= 3) { 17854 error->plane[i].size = I915_READ(DSPSIZE(i)); 17855 error->plane[i].pos = I915_READ(DSPPOS(i)); 17856 } 17857 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 17858 error->plane[i].addr = I915_READ(DSPADDR(i)); 17859 if (INTEL_GEN(dev_priv) >= 4) { 17860 error->plane[i].surface = I915_READ(DSPSURF(i)); 17861 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 17862 } 17863 17864 error->pipe[i].source = I915_READ(PIPESRC(i)); 17865 17866 if (HAS_GMCH(dev_priv)) 17867 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 17868 } 17869 17870 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 17871 enum transcoder cpu_transcoder = transcoders[i]; 17872 17873 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder]) 17874 continue; 17875 17876 error->transcoder[i].available = true; 17877 error->transcoder[i].power_domain_on = 17878 __intel_display_power_is_enabled(dev_priv, 17879 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 17880 if (!error->transcoder[i].power_domain_on) 17881 continue; 17882 17883 error->transcoder[i].cpu_transcoder = cpu_transcoder; 17884 17885 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 17886 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 17887 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 17888 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 17889 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 17890 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 17891 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 17892 } 17893 17894 return error; 17895 } 17896 17897 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 17898 17899 void 17900 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 17901 struct intel_display_error_state *error) 17902 { 17903 struct drm_i915_private *dev_priv = m->i915; 17904 int i; 17905 17906 if (!error) 17907 return; 17908 17909 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv)); 17910 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 17911 err_printf(m, "PWR_WELL_CTL2: %08x\n", 17912 error->power_well_driver); 17913 for_each_pipe(dev_priv, i) { 17914 err_printf(m, "Pipe [%d]:\n", i); 17915 err_printf(m, " Power: %s\n", 17916 onoff(error->pipe[i].power_domain_on)); 17917 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 17918 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 17919 17920 err_printf(m, "Plane [%d]:\n", i); 17921 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 17922 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 17923 if (INTEL_GEN(dev_priv) <= 3) { 17924 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 17925 err_printf(m, " POS: %08x\n", error->plane[i].pos); 17926 } 17927 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 17928 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 17929 if (INTEL_GEN(dev_priv) >= 4) { 17930 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 17931 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 17932 } 17933 17934 err_printf(m, "Cursor [%d]:\n", i); 17935 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 17936 err_printf(m, " POS: %08x\n", error->cursor[i].position); 17937 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 17938 } 17939 17940 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 17941 if (!error->transcoder[i].available) 17942 continue; 17943 17944 err_printf(m, "CPU transcoder: %s\n", 17945 transcoder_name(error->transcoder[i].cpu_transcoder)); 17946 err_printf(m, " Power: %s\n", 17947 onoff(error->transcoder[i].power_domain_on)); 17948 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 17949 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 17950 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 17951 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 17952 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 17953 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 17954 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 17955 } 17956 } 17957 17958 #endif 17959