1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/i2c.h> 28 #include <linux/input.h> 29 #include <linux/intel-iommu.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/dma-resv.h> 33 #include <linux/slab.h> 34 35 #include <drm/drm_atomic.h> 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_atomic_uapi.h> 38 #include <drm/drm_damage_helper.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_fourcc.h> 42 #include <drm/drm_plane_helper.h> 43 #include <drm/drm_probe_helper.h> 44 #include <drm/drm_rect.h> 45 46 #include "display/intel_crt.h" 47 #include "display/intel_ddi.h" 48 #include "display/intel_dp.h" 49 #include "display/intel_dp_mst.h" 50 #include "display/intel_dpll_mgr.h" 51 #include "display/intel_dsi.h" 52 #include "display/intel_dvo.h" 53 #include "display/intel_gmbus.h" 54 #include "display/intel_hdmi.h" 55 #include "display/intel_lvds.h" 56 #include "display/intel_sdvo.h" 57 #include "display/intel_tv.h" 58 #include "display/intel_vdsc.h" 59 60 #include "gt/intel_rps.h" 61 62 #include "i915_drv.h" 63 #include "i915_trace.h" 64 #include "intel_acpi.h" 65 #include "intel_atomic.h" 66 #include "intel_atomic_plane.h" 67 #include "intel_bw.h" 68 #include "intel_cdclk.h" 69 #include "intel_color.h" 70 #include "intel_csr.h" 71 #include "intel_display_types.h" 72 #include "intel_dp_link_training.h" 73 #include "intel_fbc.h" 74 #include "intel_fbdev.h" 75 #include "intel_fifo_underrun.h" 76 #include "intel_frontbuffer.h" 77 #include "intel_hdcp.h" 78 #include "intel_hotplug.h" 79 #include "intel_overlay.h" 80 #include "intel_pipe_crc.h" 81 #include "intel_pm.h" 82 #include "intel_psr.h" 83 #include "intel_quirks.h" 84 #include "intel_sideband.h" 85 #include "intel_sprite.h" 86 #include "intel_tc.h" 87 #include "intel_vga.h" 88 89 /* Primary plane formats for gen <= 3 */ 90 static const u32 i8xx_primary_formats[] = { 91 DRM_FORMAT_C8, 92 DRM_FORMAT_XRGB1555, 93 DRM_FORMAT_RGB565, 94 DRM_FORMAT_XRGB8888, 95 }; 96 97 /* Primary plane formats for ivb (no fp16 due to hw issue) */ 98 static const u32 ivb_primary_formats[] = { 99 DRM_FORMAT_C8, 100 DRM_FORMAT_RGB565, 101 DRM_FORMAT_XRGB8888, 102 DRM_FORMAT_XBGR8888, 103 DRM_FORMAT_XRGB2101010, 104 DRM_FORMAT_XBGR2101010, 105 }; 106 107 /* Primary plane formats for gen >= 4, except ivb */ 108 static const u32 i965_primary_formats[] = { 109 DRM_FORMAT_C8, 110 DRM_FORMAT_RGB565, 111 DRM_FORMAT_XRGB8888, 112 DRM_FORMAT_XBGR8888, 113 DRM_FORMAT_XRGB2101010, 114 DRM_FORMAT_XBGR2101010, 115 DRM_FORMAT_XBGR16161616F, 116 }; 117 118 /* Primary plane formats for vlv/chv */ 119 static const u32 vlv_primary_formats[] = { 120 DRM_FORMAT_C8, 121 DRM_FORMAT_RGB565, 122 DRM_FORMAT_XRGB8888, 123 DRM_FORMAT_XBGR8888, 124 DRM_FORMAT_ARGB8888, 125 DRM_FORMAT_ABGR8888, 126 DRM_FORMAT_XRGB2101010, 127 DRM_FORMAT_XBGR2101010, 128 DRM_FORMAT_ARGB2101010, 129 DRM_FORMAT_ABGR2101010, 130 DRM_FORMAT_XBGR16161616F, 131 }; 132 133 static const u64 i9xx_format_modifiers[] = { 134 I915_FORMAT_MOD_X_TILED, 135 DRM_FORMAT_MOD_LINEAR, 136 DRM_FORMAT_MOD_INVALID 137 }; 138 139 /* Cursor formats */ 140 static const u32 intel_cursor_formats[] = { 141 DRM_FORMAT_ARGB8888, 142 }; 143 144 static const u64 cursor_format_modifiers[] = { 145 DRM_FORMAT_MOD_LINEAR, 146 DRM_FORMAT_MOD_INVALID 147 }; 148 149 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 150 struct intel_crtc_state *pipe_config); 151 static void ilk_pch_clock_get(struct intel_crtc *crtc, 152 struct intel_crtc_state *pipe_config); 153 154 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 155 struct drm_i915_gem_object *obj, 156 struct drm_mode_fb_cmd2 *mode_cmd); 157 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); 158 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 159 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 160 const struct intel_link_m_n *m_n, 161 const struct intel_link_m_n *m2_n2); 162 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 163 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); 164 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state); 165 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 166 static void vlv_prepare_pll(struct intel_crtc *crtc, 167 const struct intel_crtc_state *pipe_config); 168 static void chv_prepare_pll(struct intel_crtc *crtc, 169 const struct intel_crtc_state *pipe_config); 170 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state); 171 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); 172 static void intel_modeset_setup_hw_state(struct drm_device *dev, 173 struct drm_modeset_acquire_ctx *ctx); 174 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); 175 176 struct intel_limit { 177 struct { 178 int min, max; 179 } dot, vco, n, m, m1, m2, p, p1; 180 181 struct { 182 int dot_limit; 183 int p2_slow, p2_fast; 184 } p2; 185 }; 186 187 /* returns HPLL frequency in kHz */ 188 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 189 { 190 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 191 192 /* Obtain SKU information */ 193 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 194 CCK_FUSE_HPLL_FREQ_MASK; 195 196 return vco_freq[hpll_freq] * 1000; 197 } 198 199 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 200 const char *name, u32 reg, int ref_freq) 201 { 202 u32 val; 203 int divider; 204 205 val = vlv_cck_read(dev_priv, reg); 206 divider = val & CCK_FREQUENCY_VALUES; 207 208 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 209 (divider << CCK_FREQUENCY_STATUS_SHIFT), 210 "%s change in progress\n", name); 211 212 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 213 } 214 215 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 216 const char *name, u32 reg) 217 { 218 int hpll; 219 220 vlv_cck_get(dev_priv); 221 222 if (dev_priv->hpll_freq == 0) 223 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 224 225 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 226 227 vlv_cck_put(dev_priv); 228 229 return hpll; 230 } 231 232 static void intel_update_czclk(struct drm_i915_private *dev_priv) 233 { 234 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 235 return; 236 237 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 238 CCK_CZ_CLOCK_CONTROL); 239 240 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 241 dev_priv->czclk_freq); 242 } 243 244 /* units of 100MHz */ 245 static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 246 const struct intel_crtc_state *pipe_config) 247 { 248 if (HAS_DDI(dev_priv)) 249 return pipe_config->port_clock; /* SPLL */ 250 else 251 return dev_priv->fdi_pll_freq; 252 } 253 254 static const struct intel_limit intel_limits_i8xx_dac = { 255 .dot = { .min = 25000, .max = 350000 }, 256 .vco = { .min = 908000, .max = 1512000 }, 257 .n = { .min = 2, .max = 16 }, 258 .m = { .min = 96, .max = 140 }, 259 .m1 = { .min = 18, .max = 26 }, 260 .m2 = { .min = 6, .max = 16 }, 261 .p = { .min = 4, .max = 128 }, 262 .p1 = { .min = 2, .max = 33 }, 263 .p2 = { .dot_limit = 165000, 264 .p2_slow = 4, .p2_fast = 2 }, 265 }; 266 267 static const struct intel_limit intel_limits_i8xx_dvo = { 268 .dot = { .min = 25000, .max = 350000 }, 269 .vco = { .min = 908000, .max = 1512000 }, 270 .n = { .min = 2, .max = 16 }, 271 .m = { .min = 96, .max = 140 }, 272 .m1 = { .min = 18, .max = 26 }, 273 .m2 = { .min = 6, .max = 16 }, 274 .p = { .min = 4, .max = 128 }, 275 .p1 = { .min = 2, .max = 33 }, 276 .p2 = { .dot_limit = 165000, 277 .p2_slow = 4, .p2_fast = 4 }, 278 }; 279 280 static const struct intel_limit intel_limits_i8xx_lvds = { 281 .dot = { .min = 25000, .max = 350000 }, 282 .vco = { .min = 908000, .max = 1512000 }, 283 .n = { .min = 2, .max = 16 }, 284 .m = { .min = 96, .max = 140 }, 285 .m1 = { .min = 18, .max = 26 }, 286 .m2 = { .min = 6, .max = 16 }, 287 .p = { .min = 4, .max = 128 }, 288 .p1 = { .min = 1, .max = 6 }, 289 .p2 = { .dot_limit = 165000, 290 .p2_slow = 14, .p2_fast = 7 }, 291 }; 292 293 static const struct intel_limit intel_limits_i9xx_sdvo = { 294 .dot = { .min = 20000, .max = 400000 }, 295 .vco = { .min = 1400000, .max = 2800000 }, 296 .n = { .min = 1, .max = 6 }, 297 .m = { .min = 70, .max = 120 }, 298 .m1 = { .min = 8, .max = 18 }, 299 .m2 = { .min = 3, .max = 7 }, 300 .p = { .min = 5, .max = 80 }, 301 .p1 = { .min = 1, .max = 8 }, 302 .p2 = { .dot_limit = 200000, 303 .p2_slow = 10, .p2_fast = 5 }, 304 }; 305 306 static const struct intel_limit intel_limits_i9xx_lvds = { 307 .dot = { .min = 20000, .max = 400000 }, 308 .vco = { .min = 1400000, .max = 2800000 }, 309 .n = { .min = 1, .max = 6 }, 310 .m = { .min = 70, .max = 120 }, 311 .m1 = { .min = 8, .max = 18 }, 312 .m2 = { .min = 3, .max = 7 }, 313 .p = { .min = 7, .max = 98 }, 314 .p1 = { .min = 1, .max = 8 }, 315 .p2 = { .dot_limit = 112000, 316 .p2_slow = 14, .p2_fast = 7 }, 317 }; 318 319 320 static const struct intel_limit intel_limits_g4x_sdvo = { 321 .dot = { .min = 25000, .max = 270000 }, 322 .vco = { .min = 1750000, .max = 3500000}, 323 .n = { .min = 1, .max = 4 }, 324 .m = { .min = 104, .max = 138 }, 325 .m1 = { .min = 17, .max = 23 }, 326 .m2 = { .min = 5, .max = 11 }, 327 .p = { .min = 10, .max = 30 }, 328 .p1 = { .min = 1, .max = 3}, 329 .p2 = { .dot_limit = 270000, 330 .p2_slow = 10, 331 .p2_fast = 10 332 }, 333 }; 334 335 static const struct intel_limit intel_limits_g4x_hdmi = { 336 .dot = { .min = 22000, .max = 400000 }, 337 .vco = { .min = 1750000, .max = 3500000}, 338 .n = { .min = 1, .max = 4 }, 339 .m = { .min = 104, .max = 138 }, 340 .m1 = { .min = 16, .max = 23 }, 341 .m2 = { .min = 5, .max = 11 }, 342 .p = { .min = 5, .max = 80 }, 343 .p1 = { .min = 1, .max = 8}, 344 .p2 = { .dot_limit = 165000, 345 .p2_slow = 10, .p2_fast = 5 }, 346 }; 347 348 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 349 .dot = { .min = 20000, .max = 115000 }, 350 .vco = { .min = 1750000, .max = 3500000 }, 351 .n = { .min = 1, .max = 3 }, 352 .m = { .min = 104, .max = 138 }, 353 .m1 = { .min = 17, .max = 23 }, 354 .m2 = { .min = 5, .max = 11 }, 355 .p = { .min = 28, .max = 112 }, 356 .p1 = { .min = 2, .max = 8 }, 357 .p2 = { .dot_limit = 0, 358 .p2_slow = 14, .p2_fast = 14 359 }, 360 }; 361 362 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 363 .dot = { .min = 80000, .max = 224000 }, 364 .vco = { .min = 1750000, .max = 3500000 }, 365 .n = { .min = 1, .max = 3 }, 366 .m = { .min = 104, .max = 138 }, 367 .m1 = { .min = 17, .max = 23 }, 368 .m2 = { .min = 5, .max = 11 }, 369 .p = { .min = 14, .max = 42 }, 370 .p1 = { .min = 2, .max = 6 }, 371 .p2 = { .dot_limit = 0, 372 .p2_slow = 7, .p2_fast = 7 373 }, 374 }; 375 376 static const struct intel_limit pnv_limits_sdvo = { 377 .dot = { .min = 20000, .max = 400000}, 378 .vco = { .min = 1700000, .max = 3500000 }, 379 /* Pineview's Ncounter is a ring counter */ 380 .n = { .min = 3, .max = 6 }, 381 .m = { .min = 2, .max = 256 }, 382 /* Pineview only has one combined m divider, which we treat as m2. */ 383 .m1 = { .min = 0, .max = 0 }, 384 .m2 = { .min = 0, .max = 254 }, 385 .p = { .min = 5, .max = 80 }, 386 .p1 = { .min = 1, .max = 8 }, 387 .p2 = { .dot_limit = 200000, 388 .p2_slow = 10, .p2_fast = 5 }, 389 }; 390 391 static const struct intel_limit pnv_limits_lvds = { 392 .dot = { .min = 20000, .max = 400000 }, 393 .vco = { .min = 1700000, .max = 3500000 }, 394 .n = { .min = 3, .max = 6 }, 395 .m = { .min = 2, .max = 256 }, 396 .m1 = { .min = 0, .max = 0 }, 397 .m2 = { .min = 0, .max = 254 }, 398 .p = { .min = 7, .max = 112 }, 399 .p1 = { .min = 1, .max = 8 }, 400 .p2 = { .dot_limit = 112000, 401 .p2_slow = 14, .p2_fast = 14 }, 402 }; 403 404 /* Ironlake / Sandybridge 405 * 406 * We calculate clock using (register_value + 2) for N/M1/M2, so here 407 * the range value for them is (actual_value - 2). 408 */ 409 static const struct intel_limit ilk_limits_dac = { 410 .dot = { .min = 25000, .max = 350000 }, 411 .vco = { .min = 1760000, .max = 3510000 }, 412 .n = { .min = 1, .max = 5 }, 413 .m = { .min = 79, .max = 127 }, 414 .m1 = { .min = 12, .max = 22 }, 415 .m2 = { .min = 5, .max = 9 }, 416 .p = { .min = 5, .max = 80 }, 417 .p1 = { .min = 1, .max = 8 }, 418 .p2 = { .dot_limit = 225000, 419 .p2_slow = 10, .p2_fast = 5 }, 420 }; 421 422 static const struct intel_limit ilk_limits_single_lvds = { 423 .dot = { .min = 25000, .max = 350000 }, 424 .vco = { .min = 1760000, .max = 3510000 }, 425 .n = { .min = 1, .max = 3 }, 426 .m = { .min = 79, .max = 118 }, 427 .m1 = { .min = 12, .max = 22 }, 428 .m2 = { .min = 5, .max = 9 }, 429 .p = { .min = 28, .max = 112 }, 430 .p1 = { .min = 2, .max = 8 }, 431 .p2 = { .dot_limit = 225000, 432 .p2_slow = 14, .p2_fast = 14 }, 433 }; 434 435 static const struct intel_limit ilk_limits_dual_lvds = { 436 .dot = { .min = 25000, .max = 350000 }, 437 .vco = { .min = 1760000, .max = 3510000 }, 438 .n = { .min = 1, .max = 3 }, 439 .m = { .min = 79, .max = 127 }, 440 .m1 = { .min = 12, .max = 22 }, 441 .m2 = { .min = 5, .max = 9 }, 442 .p = { .min = 14, .max = 56 }, 443 .p1 = { .min = 2, .max = 8 }, 444 .p2 = { .dot_limit = 225000, 445 .p2_slow = 7, .p2_fast = 7 }, 446 }; 447 448 /* LVDS 100mhz refclk limits. */ 449 static const struct intel_limit ilk_limits_single_lvds_100m = { 450 .dot = { .min = 25000, .max = 350000 }, 451 .vco = { .min = 1760000, .max = 3510000 }, 452 .n = { .min = 1, .max = 2 }, 453 .m = { .min = 79, .max = 126 }, 454 .m1 = { .min = 12, .max = 22 }, 455 .m2 = { .min = 5, .max = 9 }, 456 .p = { .min = 28, .max = 112 }, 457 .p1 = { .min = 2, .max = 8 }, 458 .p2 = { .dot_limit = 225000, 459 .p2_slow = 14, .p2_fast = 14 }, 460 }; 461 462 static const struct intel_limit ilk_limits_dual_lvds_100m = { 463 .dot = { .min = 25000, .max = 350000 }, 464 .vco = { .min = 1760000, .max = 3510000 }, 465 .n = { .min = 1, .max = 3 }, 466 .m = { .min = 79, .max = 126 }, 467 .m1 = { .min = 12, .max = 22 }, 468 .m2 = { .min = 5, .max = 9 }, 469 .p = { .min = 14, .max = 42 }, 470 .p1 = { .min = 2, .max = 6 }, 471 .p2 = { .dot_limit = 225000, 472 .p2_slow = 7, .p2_fast = 7 }, 473 }; 474 475 static const struct intel_limit intel_limits_vlv = { 476 /* 477 * These are the data rate limits (measured in fast clocks) 478 * since those are the strictest limits we have. The fast 479 * clock and actual rate limits are more relaxed, so checking 480 * them would make no difference. 481 */ 482 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 483 .vco = { .min = 4000000, .max = 6000000 }, 484 .n = { .min = 1, .max = 7 }, 485 .m1 = { .min = 2, .max = 3 }, 486 .m2 = { .min = 11, .max = 156 }, 487 .p1 = { .min = 2, .max = 3 }, 488 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 489 }; 490 491 static const struct intel_limit intel_limits_chv = { 492 /* 493 * These are the data rate limits (measured in fast clocks) 494 * since those are the strictest limits we have. The fast 495 * clock and actual rate limits are more relaxed, so checking 496 * them would make no difference. 497 */ 498 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 499 .vco = { .min = 4800000, .max = 6480000 }, 500 .n = { .min = 1, .max = 1 }, 501 .m1 = { .min = 2, .max = 2 }, 502 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 503 .p1 = { .min = 2, .max = 4 }, 504 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 505 }; 506 507 static const struct intel_limit intel_limits_bxt = { 508 /* FIXME: find real dot limits */ 509 .dot = { .min = 0, .max = INT_MAX }, 510 .vco = { .min = 4800000, .max = 6700000 }, 511 .n = { .min = 1, .max = 1 }, 512 .m1 = { .min = 2, .max = 2 }, 513 /* FIXME: find real m2 limits */ 514 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 515 .p1 = { .min = 2, .max = 4 }, 516 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 517 }; 518 519 /* WA Display #0827: Gen9:all */ 520 static void 521 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 522 { 523 if (enable) 524 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 525 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS); 526 else 527 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 528 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 529 } 530 531 /* Wa_2006604312:icl,ehl */ 532 static void 533 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 534 bool enable) 535 { 536 if (enable) 537 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 538 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 539 else 540 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 541 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 542 } 543 544 static bool 545 needs_modeset(const struct intel_crtc_state *state) 546 { 547 return drm_atomic_crtc_needs_modeset(&state->uapi); 548 } 549 550 static bool 551 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 552 { 553 return crtc_state->master_transcoder != INVALID_TRANSCODER; 554 } 555 556 static bool 557 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 558 { 559 return crtc_state->sync_mode_slaves_mask != 0; 560 } 561 562 bool 563 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 564 { 565 return is_trans_port_sync_master(crtc_state) || 566 is_trans_port_sync_slave(crtc_state); 567 } 568 569 /* 570 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 571 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 572 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 573 * The helpers' return value is the rate of the clock that is fed to the 574 * display engine's pipe which can be the above fast dot clock rate or a 575 * divided-down version of it. 576 */ 577 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 578 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 579 { 580 clock->m = clock->m2 + 2; 581 clock->p = clock->p1 * clock->p2; 582 if (WARN_ON(clock->n == 0 || clock->p == 0)) 583 return 0; 584 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 585 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 586 587 return clock->dot; 588 } 589 590 static u32 i9xx_dpll_compute_m(struct dpll *dpll) 591 { 592 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 593 } 594 595 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 596 { 597 clock->m = i9xx_dpll_compute_m(clock); 598 clock->p = clock->p1 * clock->p2; 599 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 600 return 0; 601 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 602 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 603 604 return clock->dot; 605 } 606 607 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 608 { 609 clock->m = clock->m1 * clock->m2; 610 clock->p = clock->p1 * clock->p2; 611 if (WARN_ON(clock->n == 0 || clock->p == 0)) 612 return 0; 613 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 614 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 615 616 return clock->dot / 5; 617 } 618 619 int chv_calc_dpll_params(int refclk, struct dpll *clock) 620 { 621 clock->m = clock->m1 * clock->m2; 622 clock->p = clock->p1 * clock->p2; 623 if (WARN_ON(clock->n == 0 || clock->p == 0)) 624 return 0; 625 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), 626 clock->n << 22); 627 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 628 629 return clock->dot / 5; 630 } 631 632 /* 633 * Returns whether the given set of divisors are valid for a given refclk with 634 * the given connectors. 635 */ 636 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, 637 const struct intel_limit *limit, 638 const struct dpll *clock) 639 { 640 if (clock->n < limit->n.min || limit->n.max < clock->n) 641 return false; 642 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 643 return false; 644 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 645 return false; 646 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 647 return false; 648 649 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 650 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 651 if (clock->m1 <= clock->m2) 652 return false; 653 654 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 655 !IS_GEN9_LP(dev_priv)) { 656 if (clock->p < limit->p.min || limit->p.max < clock->p) 657 return false; 658 if (clock->m < limit->m.min || limit->m.max < clock->m) 659 return false; 660 } 661 662 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 663 return false; 664 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 665 * connector, etc., rather than just a single range. 666 */ 667 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 668 return false; 669 670 return true; 671 } 672 673 static int 674 i9xx_select_p2_div(const struct intel_limit *limit, 675 const struct intel_crtc_state *crtc_state, 676 int target) 677 { 678 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 679 680 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 681 /* 682 * For LVDS just rely on its current settings for dual-channel. 683 * We haven't figured out how to reliably set up different 684 * single/dual channel state, if we even can. 685 */ 686 if (intel_is_dual_link_lvds(dev_priv)) 687 return limit->p2.p2_fast; 688 else 689 return limit->p2.p2_slow; 690 } else { 691 if (target < limit->p2.dot_limit) 692 return limit->p2.p2_slow; 693 else 694 return limit->p2.p2_fast; 695 } 696 } 697 698 /* 699 * Returns a set of divisors for the desired target clock with the given 700 * refclk, or FALSE. The returned values represent the clock equation: 701 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 702 * 703 * Target and reference clocks are specified in kHz. 704 * 705 * If match_clock is provided, then best_clock P divider must match the P 706 * divider from @match_clock used for LVDS downclocking. 707 */ 708 static bool 709 i9xx_find_best_dpll(const struct intel_limit *limit, 710 struct intel_crtc_state *crtc_state, 711 int target, int refclk, struct dpll *match_clock, 712 struct dpll *best_clock) 713 { 714 struct drm_device *dev = crtc_state->uapi.crtc->dev; 715 struct dpll clock; 716 int err = target; 717 718 memset(best_clock, 0, sizeof(*best_clock)); 719 720 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 721 722 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 723 clock.m1++) { 724 for (clock.m2 = limit->m2.min; 725 clock.m2 <= limit->m2.max; clock.m2++) { 726 if (clock.m2 >= clock.m1) 727 break; 728 for (clock.n = limit->n.min; 729 clock.n <= limit->n.max; clock.n++) { 730 for (clock.p1 = limit->p1.min; 731 clock.p1 <= limit->p1.max; clock.p1++) { 732 int this_err; 733 734 i9xx_calc_dpll_params(refclk, &clock); 735 if (!intel_pll_is_valid(to_i915(dev), 736 limit, 737 &clock)) 738 continue; 739 if (match_clock && 740 clock.p != match_clock->p) 741 continue; 742 743 this_err = abs(clock.dot - target); 744 if (this_err < err) { 745 *best_clock = clock; 746 err = this_err; 747 } 748 } 749 } 750 } 751 } 752 753 return (err != target); 754 } 755 756 /* 757 * Returns a set of divisors for the desired target clock with the given 758 * refclk, or FALSE. The returned values represent the clock equation: 759 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 760 * 761 * Target and reference clocks are specified in kHz. 762 * 763 * If match_clock is provided, then best_clock P divider must match the P 764 * divider from @match_clock used for LVDS downclocking. 765 */ 766 static bool 767 pnv_find_best_dpll(const struct intel_limit *limit, 768 struct intel_crtc_state *crtc_state, 769 int target, int refclk, struct dpll *match_clock, 770 struct dpll *best_clock) 771 { 772 struct drm_device *dev = crtc_state->uapi.crtc->dev; 773 struct dpll clock; 774 int err = target; 775 776 memset(best_clock, 0, sizeof(*best_clock)); 777 778 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 779 780 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 781 clock.m1++) { 782 for (clock.m2 = limit->m2.min; 783 clock.m2 <= limit->m2.max; clock.m2++) { 784 for (clock.n = limit->n.min; 785 clock.n <= limit->n.max; clock.n++) { 786 for (clock.p1 = limit->p1.min; 787 clock.p1 <= limit->p1.max; clock.p1++) { 788 int this_err; 789 790 pnv_calc_dpll_params(refclk, &clock); 791 if (!intel_pll_is_valid(to_i915(dev), 792 limit, 793 &clock)) 794 continue; 795 if (match_clock && 796 clock.p != match_clock->p) 797 continue; 798 799 this_err = abs(clock.dot - target); 800 if (this_err < err) { 801 *best_clock = clock; 802 err = this_err; 803 } 804 } 805 } 806 } 807 } 808 809 return (err != target); 810 } 811 812 /* 813 * Returns a set of divisors for the desired target clock with the given 814 * refclk, or FALSE. The returned values represent the clock equation: 815 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 816 * 817 * Target and reference clocks are specified in kHz. 818 * 819 * If match_clock is provided, then best_clock P divider must match the P 820 * divider from @match_clock used for LVDS downclocking. 821 */ 822 static bool 823 g4x_find_best_dpll(const struct intel_limit *limit, 824 struct intel_crtc_state *crtc_state, 825 int target, int refclk, struct dpll *match_clock, 826 struct dpll *best_clock) 827 { 828 struct drm_device *dev = crtc_state->uapi.crtc->dev; 829 struct dpll clock; 830 int max_n; 831 bool found = false; 832 /* approximately equals target * 0.00585 */ 833 int err_most = (target >> 8) + (target >> 9); 834 835 memset(best_clock, 0, sizeof(*best_clock)); 836 837 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 838 839 max_n = limit->n.max; 840 /* based on hardware requirement, prefer smaller n to precision */ 841 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 842 /* based on hardware requirement, prefere larger m1,m2 */ 843 for (clock.m1 = limit->m1.max; 844 clock.m1 >= limit->m1.min; clock.m1--) { 845 for (clock.m2 = limit->m2.max; 846 clock.m2 >= limit->m2.min; clock.m2--) { 847 for (clock.p1 = limit->p1.max; 848 clock.p1 >= limit->p1.min; clock.p1--) { 849 int this_err; 850 851 i9xx_calc_dpll_params(refclk, &clock); 852 if (!intel_pll_is_valid(to_i915(dev), 853 limit, 854 &clock)) 855 continue; 856 857 this_err = abs(clock.dot - target); 858 if (this_err < err_most) { 859 *best_clock = clock; 860 err_most = this_err; 861 max_n = clock.n; 862 found = true; 863 } 864 } 865 } 866 } 867 } 868 return found; 869 } 870 871 /* 872 * Check if the calculated PLL configuration is more optimal compared to the 873 * best configuration and error found so far. Return the calculated error. 874 */ 875 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 876 const struct dpll *calculated_clock, 877 const struct dpll *best_clock, 878 unsigned int best_error_ppm, 879 unsigned int *error_ppm) 880 { 881 /* 882 * For CHV ignore the error and consider only the P value. 883 * Prefer a bigger P value based on HW requirements. 884 */ 885 if (IS_CHERRYVIEW(to_i915(dev))) { 886 *error_ppm = 0; 887 888 return calculated_clock->p > best_clock->p; 889 } 890 891 if (drm_WARN_ON_ONCE(dev, !target_freq)) 892 return false; 893 894 *error_ppm = div_u64(1000000ULL * 895 abs(target_freq - calculated_clock->dot), 896 target_freq); 897 /* 898 * Prefer a better P value over a better (smaller) error if the error 899 * is small. Ensure this preference for future configurations too by 900 * setting the error to 0. 901 */ 902 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 903 *error_ppm = 0; 904 905 return true; 906 } 907 908 return *error_ppm + 10 < best_error_ppm; 909 } 910 911 /* 912 * Returns a set of divisors for the desired target clock with the given 913 * refclk, or FALSE. The returned values represent the clock equation: 914 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 915 */ 916 static bool 917 vlv_find_best_dpll(const struct intel_limit *limit, 918 struct intel_crtc_state *crtc_state, 919 int target, int refclk, struct dpll *match_clock, 920 struct dpll *best_clock) 921 { 922 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 923 struct drm_device *dev = crtc->base.dev; 924 struct dpll clock; 925 unsigned int bestppm = 1000000; 926 /* min update 19.2 MHz */ 927 int max_n = min(limit->n.max, refclk / 19200); 928 bool found = false; 929 930 target *= 5; /* fast clock */ 931 932 memset(best_clock, 0, sizeof(*best_clock)); 933 934 /* based on hardware requirement, prefer smaller n to precision */ 935 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 936 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 937 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 938 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 939 clock.p = clock.p1 * clock.p2; 940 /* based on hardware requirement, prefer bigger m1,m2 values */ 941 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 942 unsigned int ppm; 943 944 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 945 refclk * clock.m1); 946 947 vlv_calc_dpll_params(refclk, &clock); 948 949 if (!intel_pll_is_valid(to_i915(dev), 950 limit, 951 &clock)) 952 continue; 953 954 if (!vlv_PLL_is_optimal(dev, target, 955 &clock, 956 best_clock, 957 bestppm, &ppm)) 958 continue; 959 960 *best_clock = clock; 961 bestppm = ppm; 962 found = true; 963 } 964 } 965 } 966 } 967 968 return found; 969 } 970 971 /* 972 * Returns a set of divisors for the desired target clock with the given 973 * refclk, or FALSE. The returned values represent the clock equation: 974 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 975 */ 976 static bool 977 chv_find_best_dpll(const struct intel_limit *limit, 978 struct intel_crtc_state *crtc_state, 979 int target, int refclk, struct dpll *match_clock, 980 struct dpll *best_clock) 981 { 982 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 983 struct drm_device *dev = crtc->base.dev; 984 unsigned int best_error_ppm; 985 struct dpll clock; 986 u64 m2; 987 int found = false; 988 989 memset(best_clock, 0, sizeof(*best_clock)); 990 best_error_ppm = 1000000; 991 992 /* 993 * Based on hardware doc, the n always set to 1, and m1 always 994 * set to 2. If requires to support 200Mhz refclk, we need to 995 * revisit this because n may not 1 anymore. 996 */ 997 clock.n = 1, clock.m1 = 2; 998 target *= 5; /* fast clock */ 999 1000 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 1001 for (clock.p2 = limit->p2.p2_fast; 1002 clock.p2 >= limit->p2.p2_slow; 1003 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 1004 unsigned int error_ppm; 1005 1006 clock.p = clock.p1 * clock.p2; 1007 1008 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 1009 refclk * clock.m1); 1010 1011 if (m2 > INT_MAX/clock.m1) 1012 continue; 1013 1014 clock.m2 = m2; 1015 1016 chv_calc_dpll_params(refclk, &clock); 1017 1018 if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) 1019 continue; 1020 1021 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 1022 best_error_ppm, &error_ppm)) 1023 continue; 1024 1025 *best_clock = clock; 1026 best_error_ppm = error_ppm; 1027 found = true; 1028 } 1029 } 1030 1031 return found; 1032 } 1033 1034 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 1035 struct dpll *best_clock) 1036 { 1037 int refclk = 100000; 1038 const struct intel_limit *limit = &intel_limits_bxt; 1039 1040 return chv_find_best_dpll(limit, crtc_state, 1041 crtc_state->port_clock, refclk, 1042 NULL, best_clock); 1043 } 1044 1045 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 1046 enum pipe pipe) 1047 { 1048 i915_reg_t reg = PIPEDSL(pipe); 1049 u32 line1, line2; 1050 u32 line_mask; 1051 1052 if (IS_GEN(dev_priv, 2)) 1053 line_mask = DSL_LINEMASK_GEN2; 1054 else 1055 line_mask = DSL_LINEMASK_GEN3; 1056 1057 line1 = intel_de_read(dev_priv, reg) & line_mask; 1058 msleep(5); 1059 line2 = intel_de_read(dev_priv, reg) & line_mask; 1060 1061 return line1 != line2; 1062 } 1063 1064 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 1065 { 1066 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1067 enum pipe pipe = crtc->pipe; 1068 1069 /* Wait for the display line to settle/start moving */ 1070 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 1071 drm_err(&dev_priv->drm, 1072 "pipe %c scanline %s wait timed out\n", 1073 pipe_name(pipe), onoff(state)); 1074 } 1075 1076 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 1077 { 1078 wait_for_pipe_scanline_moving(crtc, false); 1079 } 1080 1081 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 1082 { 1083 wait_for_pipe_scanline_moving(crtc, true); 1084 } 1085 1086 static void 1087 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 1088 { 1089 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1090 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1091 1092 if (INTEL_GEN(dev_priv) >= 4) { 1093 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1094 i915_reg_t reg = PIPECONF(cpu_transcoder); 1095 1096 /* Wait for the Pipe State to go off */ 1097 if (intel_de_wait_for_clear(dev_priv, reg, 1098 I965_PIPECONF_ACTIVE, 100)) 1099 drm_WARN(&dev_priv->drm, 1, 1100 "pipe_off wait timed out\n"); 1101 } else { 1102 intel_wait_for_pipe_scanline_stopped(crtc); 1103 } 1104 } 1105 1106 /* Only for pre-ILK configs */ 1107 void assert_pll(struct drm_i915_private *dev_priv, 1108 enum pipe pipe, bool state) 1109 { 1110 u32 val; 1111 bool cur_state; 1112 1113 val = intel_de_read(dev_priv, DPLL(pipe)); 1114 cur_state = !!(val & DPLL_VCO_ENABLE); 1115 I915_STATE_WARN(cur_state != state, 1116 "PLL state assertion failure (expected %s, current %s)\n", 1117 onoff(state), onoff(cur_state)); 1118 } 1119 1120 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1121 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1122 { 1123 u32 val; 1124 bool cur_state; 1125 1126 vlv_cck_get(dev_priv); 1127 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1128 vlv_cck_put(dev_priv); 1129 1130 cur_state = val & DSI_PLL_VCO_EN; 1131 I915_STATE_WARN(cur_state != state, 1132 "DSI PLL state assertion failure (expected %s, current %s)\n", 1133 onoff(state), onoff(cur_state)); 1134 } 1135 1136 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1137 enum pipe pipe, bool state) 1138 { 1139 bool cur_state; 1140 1141 if (HAS_DDI(dev_priv)) { 1142 /* 1143 * DDI does not have a specific FDI_TX register. 1144 * 1145 * FDI is never fed from EDP transcoder 1146 * so pipe->transcoder cast is fine here. 1147 */ 1148 enum transcoder cpu_transcoder = (enum transcoder)pipe; 1149 u32 val = intel_de_read(dev_priv, 1150 TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1151 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1152 } else { 1153 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); 1154 cur_state = !!(val & FDI_TX_ENABLE); 1155 } 1156 I915_STATE_WARN(cur_state != state, 1157 "FDI TX state assertion failure (expected %s, current %s)\n", 1158 onoff(state), onoff(cur_state)); 1159 } 1160 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1161 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1162 1163 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1164 enum pipe pipe, bool state) 1165 { 1166 u32 val; 1167 bool cur_state; 1168 1169 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); 1170 cur_state = !!(val & FDI_RX_ENABLE); 1171 I915_STATE_WARN(cur_state != state, 1172 "FDI RX state assertion failure (expected %s, current %s)\n", 1173 onoff(state), onoff(cur_state)); 1174 } 1175 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1176 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1177 1178 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1179 enum pipe pipe) 1180 { 1181 u32 val; 1182 1183 /* ILK FDI PLL is always enabled */ 1184 if (IS_GEN(dev_priv, 5)) 1185 return; 1186 1187 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1188 if (HAS_DDI(dev_priv)) 1189 return; 1190 1191 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); 1192 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1193 } 1194 1195 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1196 enum pipe pipe, bool state) 1197 { 1198 u32 val; 1199 bool cur_state; 1200 1201 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); 1202 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1203 I915_STATE_WARN(cur_state != state, 1204 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1205 onoff(state), onoff(cur_state)); 1206 } 1207 1208 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1209 { 1210 i915_reg_t pp_reg; 1211 u32 val; 1212 enum pipe panel_pipe = INVALID_PIPE; 1213 bool locked = true; 1214 1215 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv))) 1216 return; 1217 1218 if (HAS_PCH_SPLIT(dev_priv)) { 1219 u32 port_sel; 1220 1221 pp_reg = PP_CONTROL(0); 1222 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1223 1224 switch (port_sel) { 1225 case PANEL_PORT_SELECT_LVDS: 1226 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 1227 break; 1228 case PANEL_PORT_SELECT_DPA: 1229 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 1230 break; 1231 case PANEL_PORT_SELECT_DPC: 1232 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 1233 break; 1234 case PANEL_PORT_SELECT_DPD: 1235 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 1236 break; 1237 default: 1238 MISSING_CASE(port_sel); 1239 break; 1240 } 1241 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1242 /* presumably write lock depends on pipe, not port select */ 1243 pp_reg = PP_CONTROL(pipe); 1244 panel_pipe = pipe; 1245 } else { 1246 u32 port_sel; 1247 1248 pp_reg = PP_CONTROL(0); 1249 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1250 1251 drm_WARN_ON(&dev_priv->drm, 1252 port_sel != PANEL_PORT_SELECT_LVDS); 1253 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 1254 } 1255 1256 val = intel_de_read(dev_priv, pp_reg); 1257 if (!(val & PANEL_POWER_ON) || 1258 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1259 locked = false; 1260 1261 I915_STATE_WARN(panel_pipe == pipe && locked, 1262 "panel assertion failure, pipe %c regs locked\n", 1263 pipe_name(pipe)); 1264 } 1265 1266 void assert_pipe(struct drm_i915_private *dev_priv, 1267 enum transcoder cpu_transcoder, bool state) 1268 { 1269 bool cur_state; 1270 enum intel_display_power_domain power_domain; 1271 intel_wakeref_t wakeref; 1272 1273 /* we keep both pipes enabled on 830 */ 1274 if (IS_I830(dev_priv)) 1275 state = true; 1276 1277 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1278 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 1279 if (wakeref) { 1280 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 1281 cur_state = !!(val & PIPECONF_ENABLE); 1282 1283 intel_display_power_put(dev_priv, power_domain, wakeref); 1284 } else { 1285 cur_state = false; 1286 } 1287 1288 I915_STATE_WARN(cur_state != state, 1289 "transcoder %s assertion failure (expected %s, current %s)\n", 1290 transcoder_name(cpu_transcoder), 1291 onoff(state), onoff(cur_state)); 1292 } 1293 1294 static void assert_plane(struct intel_plane *plane, bool state) 1295 { 1296 enum pipe pipe; 1297 bool cur_state; 1298 1299 cur_state = plane->get_hw_state(plane, &pipe); 1300 1301 I915_STATE_WARN(cur_state != state, 1302 "%s assertion failure (expected %s, current %s)\n", 1303 plane->base.name, onoff(state), onoff(cur_state)); 1304 } 1305 1306 #define assert_plane_enabled(p) assert_plane(p, true) 1307 #define assert_plane_disabled(p) assert_plane(p, false) 1308 1309 static void assert_planes_disabled(struct intel_crtc *crtc) 1310 { 1311 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1312 struct intel_plane *plane; 1313 1314 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 1315 assert_plane_disabled(plane); 1316 } 1317 1318 static void assert_vblank_disabled(struct drm_crtc *crtc) 1319 { 1320 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1321 drm_crtc_vblank_put(crtc); 1322 } 1323 1324 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1325 enum pipe pipe) 1326 { 1327 u32 val; 1328 bool enabled; 1329 1330 val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); 1331 enabled = !!(val & TRANS_ENABLE); 1332 I915_STATE_WARN(enabled, 1333 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1334 pipe_name(pipe)); 1335 } 1336 1337 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1338 enum pipe pipe, enum port port, 1339 i915_reg_t dp_reg) 1340 { 1341 enum pipe port_pipe; 1342 bool state; 1343 1344 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 1345 1346 I915_STATE_WARN(state && port_pipe == pipe, 1347 "PCH DP %c enabled on transcoder %c, should be disabled\n", 1348 port_name(port), pipe_name(pipe)); 1349 1350 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1351 "IBX PCH DP %c still using transcoder B\n", 1352 port_name(port)); 1353 } 1354 1355 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1356 enum pipe pipe, enum port port, 1357 i915_reg_t hdmi_reg) 1358 { 1359 enum pipe port_pipe; 1360 bool state; 1361 1362 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 1363 1364 I915_STATE_WARN(state && port_pipe == pipe, 1365 "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 1366 port_name(port), pipe_name(pipe)); 1367 1368 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1369 "IBX PCH HDMI %c still using transcoder B\n", 1370 port_name(port)); 1371 } 1372 1373 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1374 enum pipe pipe) 1375 { 1376 enum pipe port_pipe; 1377 1378 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 1379 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 1380 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 1381 1382 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 1383 port_pipe == pipe, 1384 "PCH VGA enabled on transcoder %c, should be disabled\n", 1385 pipe_name(pipe)); 1386 1387 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 1388 port_pipe == pipe, 1389 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1390 pipe_name(pipe)); 1391 1392 /* PCH SDVOB multiplex with HDMIB */ 1393 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1394 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1395 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1396 } 1397 1398 static void _vlv_enable_pll(struct intel_crtc *crtc, 1399 const struct intel_crtc_state *pipe_config) 1400 { 1401 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1402 enum pipe pipe = crtc->pipe; 1403 1404 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1405 intel_de_posting_read(dev_priv, DPLL(pipe)); 1406 udelay(150); 1407 1408 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1409 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); 1410 } 1411 1412 static void vlv_enable_pll(struct intel_crtc *crtc, 1413 const struct intel_crtc_state *pipe_config) 1414 { 1415 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1416 enum pipe pipe = crtc->pipe; 1417 1418 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 1419 1420 /* PLL is protected by panel, make sure we can write it */ 1421 assert_panel_unlocked(dev_priv, pipe); 1422 1423 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1424 _vlv_enable_pll(crtc, pipe_config); 1425 1426 intel_de_write(dev_priv, DPLL_MD(pipe), 1427 pipe_config->dpll_hw_state.dpll_md); 1428 intel_de_posting_read(dev_priv, DPLL_MD(pipe)); 1429 } 1430 1431 1432 static void _chv_enable_pll(struct intel_crtc *crtc, 1433 const struct intel_crtc_state *pipe_config) 1434 { 1435 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1436 enum pipe pipe = crtc->pipe; 1437 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1438 u32 tmp; 1439 1440 vlv_dpio_get(dev_priv); 1441 1442 /* Enable back the 10bit clock to display controller */ 1443 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1444 tmp |= DPIO_DCLKP_EN; 1445 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1446 1447 vlv_dpio_put(dev_priv); 1448 1449 /* 1450 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1451 */ 1452 udelay(1); 1453 1454 /* Enable PLL */ 1455 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1456 1457 /* Check PLL is locked */ 1458 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1459 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); 1460 } 1461 1462 static void chv_enable_pll(struct intel_crtc *crtc, 1463 const struct intel_crtc_state *pipe_config) 1464 { 1465 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1466 enum pipe pipe = crtc->pipe; 1467 1468 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 1469 1470 /* PLL is protected by panel, make sure we can write it */ 1471 assert_panel_unlocked(dev_priv, pipe); 1472 1473 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1474 _chv_enable_pll(crtc, pipe_config); 1475 1476 if (pipe != PIPE_A) { 1477 /* 1478 * WaPixelRepeatModeFixForC0:chv 1479 * 1480 * DPLLCMD is AWOL. Use chicken bits to propagate 1481 * the value from DPLLBMD to either pipe B or C. 1482 */ 1483 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 1484 intel_de_write(dev_priv, DPLL_MD(PIPE_B), 1485 pipe_config->dpll_hw_state.dpll_md); 1486 intel_de_write(dev_priv, CBR4_VLV, 0); 1487 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1488 1489 /* 1490 * DPLLB VGA mode also seems to cause problems. 1491 * We should always have it disabled. 1492 */ 1493 drm_WARN_ON(&dev_priv->drm, 1494 (intel_de_read(dev_priv, DPLL(PIPE_B)) & 1495 DPLL_VGA_MODE_DIS) == 0); 1496 } else { 1497 intel_de_write(dev_priv, DPLL_MD(pipe), 1498 pipe_config->dpll_hw_state.dpll_md); 1499 intel_de_posting_read(dev_priv, DPLL_MD(pipe)); 1500 } 1501 } 1502 1503 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1504 { 1505 if (IS_I830(dev_priv)) 1506 return false; 1507 1508 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1509 } 1510 1511 static void i9xx_enable_pll(struct intel_crtc *crtc, 1512 const struct intel_crtc_state *crtc_state) 1513 { 1514 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1515 i915_reg_t reg = DPLL(crtc->pipe); 1516 u32 dpll = crtc_state->dpll_hw_state.dpll; 1517 int i; 1518 1519 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 1520 1521 /* PLL is protected by panel, make sure we can write it */ 1522 if (i9xx_has_pps(dev_priv)) 1523 assert_panel_unlocked(dev_priv, crtc->pipe); 1524 1525 /* 1526 * Apparently we need to have VGA mode enabled prior to changing 1527 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1528 * dividers, even though the register value does change. 1529 */ 1530 intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS); 1531 intel_de_write(dev_priv, reg, dpll); 1532 1533 /* Wait for the clocks to stabilize. */ 1534 intel_de_posting_read(dev_priv, reg); 1535 udelay(150); 1536 1537 if (INTEL_GEN(dev_priv) >= 4) { 1538 intel_de_write(dev_priv, DPLL_MD(crtc->pipe), 1539 crtc_state->dpll_hw_state.dpll_md); 1540 } else { 1541 /* The pixel multiplier can only be updated once the 1542 * DPLL is enabled and the clocks are stable. 1543 * 1544 * So write it again. 1545 */ 1546 intel_de_write(dev_priv, reg, dpll); 1547 } 1548 1549 /* We do this three times for luck */ 1550 for (i = 0; i < 3; i++) { 1551 intel_de_write(dev_priv, reg, dpll); 1552 intel_de_posting_read(dev_priv, reg); 1553 udelay(150); /* wait for warmup */ 1554 } 1555 } 1556 1557 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 1558 { 1559 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1560 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1561 enum pipe pipe = crtc->pipe; 1562 1563 /* Don't disable pipe or pipe PLLs if needed */ 1564 if (IS_I830(dev_priv)) 1565 return; 1566 1567 /* Make sure the pipe isn't still relying on us */ 1568 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 1569 1570 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 1571 intel_de_posting_read(dev_priv, DPLL(pipe)); 1572 } 1573 1574 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1575 { 1576 u32 val; 1577 1578 /* Make sure the pipe isn't still relying on us */ 1579 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 1580 1581 val = DPLL_INTEGRATED_REF_CLK_VLV | 1582 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1583 if (pipe != PIPE_A) 1584 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1585 1586 intel_de_write(dev_priv, DPLL(pipe), val); 1587 intel_de_posting_read(dev_priv, DPLL(pipe)); 1588 } 1589 1590 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1591 { 1592 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1593 u32 val; 1594 1595 /* Make sure the pipe isn't still relying on us */ 1596 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 1597 1598 val = DPLL_SSC_REF_CLK_CHV | 1599 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1600 if (pipe != PIPE_A) 1601 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1602 1603 intel_de_write(dev_priv, DPLL(pipe), val); 1604 intel_de_posting_read(dev_priv, DPLL(pipe)); 1605 1606 vlv_dpio_get(dev_priv); 1607 1608 /* Disable 10bit clock to display controller */ 1609 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1610 val &= ~DPIO_DCLKP_EN; 1611 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1612 1613 vlv_dpio_put(dev_priv); 1614 } 1615 1616 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1617 struct intel_digital_port *dig_port, 1618 unsigned int expected_mask) 1619 { 1620 u32 port_mask; 1621 i915_reg_t dpll_reg; 1622 1623 switch (dig_port->base.port) { 1624 case PORT_B: 1625 port_mask = DPLL_PORTB_READY_MASK; 1626 dpll_reg = DPLL(0); 1627 break; 1628 case PORT_C: 1629 port_mask = DPLL_PORTC_READY_MASK; 1630 dpll_reg = DPLL(0); 1631 expected_mask <<= 4; 1632 break; 1633 case PORT_D: 1634 port_mask = DPLL_PORTD_READY_MASK; 1635 dpll_reg = DPIO_PHY_STATUS; 1636 break; 1637 default: 1638 BUG(); 1639 } 1640 1641 if (intel_de_wait_for_register(dev_priv, dpll_reg, 1642 port_mask, expected_mask, 1000)) 1643 drm_WARN(&dev_priv->drm, 1, 1644 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 1645 dig_port->base.base.base.id, dig_port->base.base.name, 1646 intel_de_read(dev_priv, dpll_reg) & port_mask, 1647 expected_mask); 1648 } 1649 1650 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 1651 { 1652 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1653 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1654 enum pipe pipe = crtc->pipe; 1655 i915_reg_t reg; 1656 u32 val, pipeconf_val; 1657 1658 /* Make sure PCH DPLL is enabled */ 1659 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 1660 1661 /* FDI must be feeding us bits for PCH ports */ 1662 assert_fdi_tx_enabled(dev_priv, pipe); 1663 assert_fdi_rx_enabled(dev_priv, pipe); 1664 1665 if (HAS_PCH_CPT(dev_priv)) { 1666 reg = TRANS_CHICKEN2(pipe); 1667 val = intel_de_read(dev_priv, reg); 1668 /* 1669 * Workaround: Set the timing override bit 1670 * before enabling the pch transcoder. 1671 */ 1672 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1673 /* Configure frame start delay to match the CPU */ 1674 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 1675 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 1676 intel_de_write(dev_priv, reg, val); 1677 } 1678 1679 reg = PCH_TRANSCONF(pipe); 1680 val = intel_de_read(dev_priv, reg); 1681 pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); 1682 1683 if (HAS_PCH_IBX(dev_priv)) { 1684 /* Configure frame start delay to match the CPU */ 1685 val &= ~TRANS_FRAME_START_DELAY_MASK; 1686 val |= TRANS_FRAME_START_DELAY(0); 1687 1688 /* 1689 * Make the BPC in transcoder be consistent with 1690 * that in pipeconf reg. For HDMI we must use 8bpc 1691 * here for both 8bpc and 12bpc. 1692 */ 1693 val &= ~PIPECONF_BPC_MASK; 1694 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1695 val |= PIPECONF_8BPC; 1696 else 1697 val |= pipeconf_val & PIPECONF_BPC_MASK; 1698 } 1699 1700 val &= ~TRANS_INTERLACE_MASK; 1701 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 1702 if (HAS_PCH_IBX(dev_priv) && 1703 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 1704 val |= TRANS_LEGACY_INTERLACED_ILK; 1705 else 1706 val |= TRANS_INTERLACED; 1707 } else { 1708 val |= TRANS_PROGRESSIVE; 1709 } 1710 1711 intel_de_write(dev_priv, reg, val | TRANS_ENABLE); 1712 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 1713 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", 1714 pipe_name(pipe)); 1715 } 1716 1717 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1718 enum transcoder cpu_transcoder) 1719 { 1720 u32 val, pipeconf_val; 1721 1722 /* FDI must be feeding us bits for PCH ports */ 1723 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1724 assert_fdi_rx_enabled(dev_priv, PIPE_A); 1725 1726 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 1727 /* Workaround: set timing override bit. */ 1728 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1729 /* Configure frame start delay to match the CPU */ 1730 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 1731 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 1732 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 1733 1734 val = TRANS_ENABLE; 1735 pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 1736 1737 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1738 PIPECONF_INTERLACED_ILK) 1739 val |= TRANS_INTERLACED; 1740 else 1741 val |= TRANS_PROGRESSIVE; 1742 1743 intel_de_write(dev_priv, LPT_TRANSCONF, val); 1744 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 1745 TRANS_STATE_ENABLE, 100)) 1746 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); 1747 } 1748 1749 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1750 enum pipe pipe) 1751 { 1752 i915_reg_t reg; 1753 u32 val; 1754 1755 /* FDI relies on the transcoder */ 1756 assert_fdi_tx_disabled(dev_priv, pipe); 1757 assert_fdi_rx_disabled(dev_priv, pipe); 1758 1759 /* Ports must be off as well */ 1760 assert_pch_ports_disabled(dev_priv, pipe); 1761 1762 reg = PCH_TRANSCONF(pipe); 1763 val = intel_de_read(dev_priv, reg); 1764 val &= ~TRANS_ENABLE; 1765 intel_de_write(dev_priv, reg, val); 1766 /* wait for PCH transcoder off, transcoder state */ 1767 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 1768 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", 1769 pipe_name(pipe)); 1770 1771 if (HAS_PCH_CPT(dev_priv)) { 1772 /* Workaround: Clear the timing override chicken bit again. */ 1773 reg = TRANS_CHICKEN2(pipe); 1774 val = intel_de_read(dev_priv, reg); 1775 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1776 intel_de_write(dev_priv, reg, val); 1777 } 1778 } 1779 1780 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1781 { 1782 u32 val; 1783 1784 val = intel_de_read(dev_priv, LPT_TRANSCONF); 1785 val &= ~TRANS_ENABLE; 1786 intel_de_write(dev_priv, LPT_TRANSCONF, val); 1787 /* wait for PCH transcoder off, transcoder state */ 1788 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 1789 TRANS_STATE_ENABLE, 50)) 1790 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); 1791 1792 /* Workaround: clear timing override bit. */ 1793 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 1794 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1795 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 1796 } 1797 1798 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1799 { 1800 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1801 1802 if (HAS_PCH_LPT(dev_priv)) 1803 return PIPE_A; 1804 else 1805 return crtc->pipe; 1806 } 1807 1808 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 1809 { 1810 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1811 1812 /* 1813 * On i965gm the hardware frame counter reads 1814 * zero when the TV encoder is enabled :( 1815 */ 1816 if (IS_I965GM(dev_priv) && 1817 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 1818 return 0; 1819 1820 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1821 return 0xffffffff; /* full 32 bit counter */ 1822 else if (INTEL_GEN(dev_priv) >= 3) 1823 return 0xffffff; /* only 24 bits of frame count */ 1824 else 1825 return 0; /* Gen2 doesn't have a hardware frame counter */ 1826 } 1827 1828 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 1829 { 1830 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1831 1832 assert_vblank_disabled(&crtc->base); 1833 drm_crtc_set_max_vblank_count(&crtc->base, 1834 intel_crtc_max_vblank_count(crtc_state)); 1835 drm_crtc_vblank_on(&crtc->base); 1836 } 1837 1838 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) 1839 { 1840 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1841 1842 drm_crtc_vblank_off(&crtc->base); 1843 assert_vblank_disabled(&crtc->base); 1844 } 1845 1846 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) 1847 { 1848 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1849 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1850 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1851 enum pipe pipe = crtc->pipe; 1852 i915_reg_t reg; 1853 u32 val; 1854 1855 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 1856 1857 assert_planes_disabled(crtc); 1858 1859 /* 1860 * A pipe without a PLL won't actually be able to drive bits from 1861 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1862 * need the check. 1863 */ 1864 if (HAS_GMCH(dev_priv)) { 1865 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 1866 assert_dsi_pll_enabled(dev_priv); 1867 else 1868 assert_pll_enabled(dev_priv, pipe); 1869 } else { 1870 if (new_crtc_state->has_pch_encoder) { 1871 /* if driving the PCH, we need FDI enabled */ 1872 assert_fdi_rx_pll_enabled(dev_priv, 1873 intel_crtc_pch_transcoder(crtc)); 1874 assert_fdi_tx_pll_enabled(dev_priv, 1875 (enum pipe) cpu_transcoder); 1876 } 1877 /* FIXME: assert CPU port conditions for SNB+ */ 1878 } 1879 1880 trace_intel_pipe_enable(crtc); 1881 1882 reg = PIPECONF(cpu_transcoder); 1883 val = intel_de_read(dev_priv, reg); 1884 if (val & PIPECONF_ENABLE) { 1885 /* we keep both pipes enabled on 830 */ 1886 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 1887 return; 1888 } 1889 1890 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE); 1891 intel_de_posting_read(dev_priv, reg); 1892 1893 /* 1894 * Until the pipe starts PIPEDSL reads will return a stale value, 1895 * which causes an apparent vblank timestamp jump when PIPEDSL 1896 * resets to its proper value. That also messes up the frame count 1897 * when it's derived from the timestamps. So let's wait for the 1898 * pipe to start properly before we call drm_crtc_vblank_on() 1899 */ 1900 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 1901 intel_wait_for_pipe_scanline_moving(crtc); 1902 } 1903 1904 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) 1905 { 1906 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1907 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1908 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1909 enum pipe pipe = crtc->pipe; 1910 i915_reg_t reg; 1911 u32 val; 1912 1913 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 1914 1915 /* 1916 * Make sure planes won't keep trying to pump pixels to us, 1917 * or we might hang the display. 1918 */ 1919 assert_planes_disabled(crtc); 1920 1921 trace_intel_pipe_disable(crtc); 1922 1923 reg = PIPECONF(cpu_transcoder); 1924 val = intel_de_read(dev_priv, reg); 1925 if ((val & PIPECONF_ENABLE) == 0) 1926 return; 1927 1928 /* 1929 * Double wide has implications for planes 1930 * so best keep it disabled when not needed. 1931 */ 1932 if (old_crtc_state->double_wide) 1933 val &= ~PIPECONF_DOUBLE_WIDE; 1934 1935 /* Don't disable pipe or pipe PLLs if needed */ 1936 if (!IS_I830(dev_priv)) 1937 val &= ~PIPECONF_ENABLE; 1938 1939 intel_de_write(dev_priv, reg, val); 1940 if ((val & PIPECONF_ENABLE) == 0) 1941 intel_wait_for_pipe_off(old_crtc_state); 1942 } 1943 1944 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1945 { 1946 return IS_GEN(dev_priv, 2) ? 2048 : 4096; 1947 } 1948 1949 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) 1950 { 1951 if (!is_ccs_modifier(fb->modifier)) 1952 return false; 1953 1954 return plane >= fb->format->num_planes / 2; 1955 } 1956 1957 static bool is_gen12_ccs_modifier(u64 modifier) 1958 { 1959 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 1960 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 1961 1962 } 1963 1964 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) 1965 { 1966 return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); 1967 } 1968 1969 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane) 1970 { 1971 if (is_ccs_modifier(fb->modifier)) 1972 return is_ccs_plane(fb, plane); 1973 1974 return plane == 1; 1975 } 1976 1977 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) 1978 { 1979 drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || 1980 (main_plane && main_plane >= fb->format->num_planes / 2)); 1981 1982 return fb->format->num_planes / 2 + main_plane; 1983 } 1984 1985 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) 1986 { 1987 drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || 1988 ccs_plane < fb->format->num_planes / 2); 1989 1990 return ccs_plane - fb->format->num_planes / 2; 1991 } 1992 1993 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */ 1994 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) 1995 { 1996 if (is_ccs_modifier(fb->modifier)) 1997 return main_to_ccs_plane(fb, main_plane); 1998 1999 return 1; 2000 } 2001 2002 bool 2003 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 2004 uint64_t modifier) 2005 { 2006 return info->is_yuv && 2007 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); 2008 } 2009 2010 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, 2011 int color_plane) 2012 { 2013 return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && 2014 color_plane == 1; 2015 } 2016 2017 static unsigned int 2018 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) 2019 { 2020 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2021 unsigned int cpp = fb->format->cpp[color_plane]; 2022 2023 switch (fb->modifier) { 2024 case DRM_FORMAT_MOD_LINEAR: 2025 return intel_tile_size(dev_priv); 2026 case I915_FORMAT_MOD_X_TILED: 2027 if (IS_GEN(dev_priv, 2)) 2028 return 128; 2029 else 2030 return 512; 2031 case I915_FORMAT_MOD_Y_TILED_CCS: 2032 if (is_ccs_plane(fb, color_plane)) 2033 return 128; 2034 fallthrough; 2035 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2036 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2037 if (is_ccs_plane(fb, color_plane)) 2038 return 64; 2039 fallthrough; 2040 case I915_FORMAT_MOD_Y_TILED: 2041 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) 2042 return 128; 2043 else 2044 return 512; 2045 case I915_FORMAT_MOD_Yf_TILED_CCS: 2046 if (is_ccs_plane(fb, color_plane)) 2047 return 128; 2048 fallthrough; 2049 case I915_FORMAT_MOD_Yf_TILED: 2050 switch (cpp) { 2051 case 1: 2052 return 64; 2053 case 2: 2054 case 4: 2055 return 128; 2056 case 8: 2057 case 16: 2058 return 256; 2059 default: 2060 MISSING_CASE(cpp); 2061 return cpp; 2062 } 2063 break; 2064 default: 2065 MISSING_CASE(fb->modifier); 2066 return cpp; 2067 } 2068 } 2069 2070 static unsigned int 2071 intel_tile_height(const struct drm_framebuffer *fb, int color_plane) 2072 { 2073 if (is_gen12_ccs_plane(fb, color_plane)) 2074 return 1; 2075 2076 return intel_tile_size(to_i915(fb->dev)) / 2077 intel_tile_width_bytes(fb, color_plane); 2078 } 2079 2080 /* Return the tile dimensions in pixel units */ 2081 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, 2082 unsigned int *tile_width, 2083 unsigned int *tile_height) 2084 { 2085 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); 2086 unsigned int cpp = fb->format->cpp[color_plane]; 2087 2088 *tile_width = tile_width_bytes / cpp; 2089 *tile_height = intel_tile_height(fb, color_plane); 2090 } 2091 2092 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, 2093 int color_plane) 2094 { 2095 unsigned int tile_width, tile_height; 2096 2097 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2098 2099 return fb->pitches[color_plane] * tile_height; 2100 } 2101 2102 unsigned int 2103 intel_fb_align_height(const struct drm_framebuffer *fb, 2104 int color_plane, unsigned int height) 2105 { 2106 unsigned int tile_height = intel_tile_height(fb, color_plane); 2107 2108 return ALIGN(height, tile_height); 2109 } 2110 2111 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2112 { 2113 unsigned int size = 0; 2114 int i; 2115 2116 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2117 size += rot_info->plane[i].width * rot_info->plane[i].height; 2118 2119 return size; 2120 } 2121 2122 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 2123 { 2124 unsigned int size = 0; 2125 int i; 2126 2127 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 2128 size += rem_info->plane[i].width * rem_info->plane[i].height; 2129 2130 return size; 2131 } 2132 2133 static void 2134 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2135 const struct drm_framebuffer *fb, 2136 unsigned int rotation) 2137 { 2138 view->type = I915_GGTT_VIEW_NORMAL; 2139 if (drm_rotation_90_or_270(rotation)) { 2140 view->type = I915_GGTT_VIEW_ROTATED; 2141 view->rotated = to_intel_framebuffer(fb)->rot_info; 2142 } 2143 } 2144 2145 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2146 { 2147 if (IS_I830(dev_priv)) 2148 return 16 * 1024; 2149 else if (IS_I85X(dev_priv)) 2150 return 256; 2151 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2152 return 32; 2153 else 2154 return 4 * 1024; 2155 } 2156 2157 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2158 { 2159 if (INTEL_GEN(dev_priv) >= 9) 2160 return 256 * 1024; 2161 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2162 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2163 return 128 * 1024; 2164 else if (INTEL_GEN(dev_priv) >= 4) 2165 return 4 * 1024; 2166 else 2167 return 0; 2168 } 2169 2170 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2171 int color_plane) 2172 { 2173 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2174 2175 /* AUX_DIST needs only 4K alignment */ 2176 if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) || 2177 is_ccs_plane(fb, color_plane)) 2178 return 4096; 2179 2180 switch (fb->modifier) { 2181 case DRM_FORMAT_MOD_LINEAR: 2182 return intel_linear_alignment(dev_priv); 2183 case I915_FORMAT_MOD_X_TILED: 2184 if (INTEL_GEN(dev_priv) >= 9) 2185 return 256 * 1024; 2186 return 0; 2187 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2188 if (is_semiplanar_uv_plane(fb, color_plane)) 2189 return intel_tile_row_size(fb, color_plane); 2190 fallthrough; 2191 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2192 return 16 * 1024; 2193 case I915_FORMAT_MOD_Y_TILED_CCS: 2194 case I915_FORMAT_MOD_Yf_TILED_CCS: 2195 case I915_FORMAT_MOD_Y_TILED: 2196 if (INTEL_GEN(dev_priv) >= 12 && 2197 is_semiplanar_uv_plane(fb, color_plane)) 2198 return intel_tile_row_size(fb, color_plane); 2199 fallthrough; 2200 case I915_FORMAT_MOD_Yf_TILED: 2201 return 1 * 1024 * 1024; 2202 default: 2203 MISSING_CASE(fb->modifier); 2204 return 0; 2205 } 2206 } 2207 2208 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 2209 { 2210 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2211 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2212 2213 return INTEL_GEN(dev_priv) < 4 || 2214 (plane->has_fbc && 2215 plane_state->view.type == I915_GGTT_VIEW_NORMAL); 2216 } 2217 2218 struct i915_vma * 2219 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2220 const struct i915_ggtt_view *view, 2221 bool uses_fence, 2222 unsigned long *out_flags) 2223 { 2224 struct drm_device *dev = fb->dev; 2225 struct drm_i915_private *dev_priv = to_i915(dev); 2226 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2227 intel_wakeref_t wakeref; 2228 struct i915_vma *vma; 2229 unsigned int pinctl; 2230 u32 alignment; 2231 2232 if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj))) 2233 return ERR_PTR(-EINVAL); 2234 2235 alignment = intel_surf_alignment(fb, 0); 2236 if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment))) 2237 return ERR_PTR(-EINVAL); 2238 2239 /* Note that the w/a also requires 64 PTE of padding following the 2240 * bo. We currently fill all unused PTE with the shadow page and so 2241 * we should always have valid PTE following the scanout preventing 2242 * the VT-d warning. 2243 */ 2244 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2245 alignment = 256 * 1024; 2246 2247 /* 2248 * Global gtt pte registers are special registers which actually forward 2249 * writes to a chunk of system memory. Which means that there is no risk 2250 * that the register values disappear as soon as we call 2251 * intel_runtime_pm_put(), so it is correct to wrap only the 2252 * pin/unpin/fence and not more. 2253 */ 2254 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2255 2256 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2257 2258 /* 2259 * Valleyview is definitely limited to scanning out the first 2260 * 512MiB. Lets presume this behaviour was inherited from the 2261 * g4x display engine and that all earlier gen are similarly 2262 * limited. Testing suggests that it is a little more 2263 * complicated than this. For example, Cherryview appears quite 2264 * happy to scanout from anywhere within its global aperture. 2265 */ 2266 pinctl = 0; 2267 if (HAS_GMCH(dev_priv)) 2268 pinctl |= PIN_MAPPABLE; 2269 2270 vma = i915_gem_object_pin_to_display_plane(obj, 2271 alignment, view, pinctl); 2272 if (IS_ERR(vma)) 2273 goto err; 2274 2275 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 2276 int ret; 2277 2278 /* 2279 * Install a fence for tiled scan-out. Pre-i965 always needs a 2280 * fence, whereas 965+ only requires a fence if using 2281 * framebuffer compression. For simplicity, we always, when 2282 * possible, install a fence as the cost is not that onerous. 2283 * 2284 * If we fail to fence the tiled scanout, then either the 2285 * modeset will reject the change (which is highly unlikely as 2286 * the affected systems, all but one, do not have unmappable 2287 * space) or we will not be able to enable full powersaving 2288 * techniques (also likely not to apply due to various limits 2289 * FBC and the like impose on the size of the buffer, which 2290 * presumably we violated anyway with this unmappable buffer). 2291 * Anyway, it is presumably better to stumble onwards with 2292 * something and try to run the system in a "less than optimal" 2293 * mode that matches the user configuration. 2294 */ 2295 ret = i915_vma_pin_fence(vma); 2296 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2297 i915_gem_object_unpin_from_display_plane(vma); 2298 vma = ERR_PTR(ret); 2299 goto err; 2300 } 2301 2302 if (ret == 0 && vma->fence) 2303 *out_flags |= PLANE_HAS_FENCE; 2304 } 2305 2306 i915_vma_get(vma); 2307 err: 2308 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 2309 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2310 return vma; 2311 } 2312 2313 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2314 { 2315 i915_gem_object_lock(vma->obj, NULL); 2316 if (flags & PLANE_HAS_FENCE) 2317 i915_vma_unpin_fence(vma); 2318 i915_gem_object_unpin_from_display_plane(vma); 2319 i915_gem_object_unlock(vma->obj); 2320 2321 i915_vma_put(vma); 2322 } 2323 2324 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane, 2325 unsigned int rotation) 2326 { 2327 if (drm_rotation_90_or_270(rotation)) 2328 return to_intel_framebuffer(fb)->rotated[color_plane].pitch; 2329 else 2330 return fb->pitches[color_plane]; 2331 } 2332 2333 /* 2334 * Convert the x/y offsets into a linear offset. 2335 * Only valid with 0/180 degree rotation, which is fine since linear 2336 * offset is only used with linear buffers on pre-hsw and tiled buffers 2337 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2338 */ 2339 u32 intel_fb_xy_to_linear(int x, int y, 2340 const struct intel_plane_state *state, 2341 int color_plane) 2342 { 2343 const struct drm_framebuffer *fb = state->hw.fb; 2344 unsigned int cpp = fb->format->cpp[color_plane]; 2345 unsigned int pitch = state->color_plane[color_plane].stride; 2346 2347 return y * pitch + x * cpp; 2348 } 2349 2350 /* 2351 * Add the x/y offsets derived from fb->offsets[] to the user 2352 * specified plane src x/y offsets. The resulting x/y offsets 2353 * specify the start of scanout from the beginning of the gtt mapping. 2354 */ 2355 void intel_add_fb_offsets(int *x, int *y, 2356 const struct intel_plane_state *state, 2357 int color_plane) 2358 2359 { 2360 *x += state->color_plane[color_plane].x; 2361 *y += state->color_plane[color_plane].y; 2362 } 2363 2364 static u32 intel_adjust_tile_offset(int *x, int *y, 2365 unsigned int tile_width, 2366 unsigned int tile_height, 2367 unsigned int tile_size, 2368 unsigned int pitch_tiles, 2369 u32 old_offset, 2370 u32 new_offset) 2371 { 2372 unsigned int pitch_pixels = pitch_tiles * tile_width; 2373 unsigned int tiles; 2374 2375 WARN_ON(old_offset & (tile_size - 1)); 2376 WARN_ON(new_offset & (tile_size - 1)); 2377 WARN_ON(new_offset > old_offset); 2378 2379 tiles = (old_offset - new_offset) / tile_size; 2380 2381 *y += tiles / pitch_tiles * tile_height; 2382 *x += tiles % pitch_tiles * tile_width; 2383 2384 /* minimize x in case it got needlessly big */ 2385 *y += *x / pitch_pixels * tile_height; 2386 *x %= pitch_pixels; 2387 2388 return new_offset; 2389 } 2390 2391 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) 2392 { 2393 return fb->modifier == DRM_FORMAT_MOD_LINEAR || 2394 is_gen12_ccs_plane(fb, color_plane); 2395 } 2396 2397 static u32 intel_adjust_aligned_offset(int *x, int *y, 2398 const struct drm_framebuffer *fb, 2399 int color_plane, 2400 unsigned int rotation, 2401 unsigned int pitch, 2402 u32 old_offset, u32 new_offset) 2403 { 2404 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2405 unsigned int cpp = fb->format->cpp[color_plane]; 2406 2407 drm_WARN_ON(&dev_priv->drm, new_offset > old_offset); 2408 2409 if (!is_surface_linear(fb, color_plane)) { 2410 unsigned int tile_size, tile_width, tile_height; 2411 unsigned int pitch_tiles; 2412 2413 tile_size = intel_tile_size(dev_priv); 2414 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2415 2416 if (drm_rotation_90_or_270(rotation)) { 2417 pitch_tiles = pitch / tile_height; 2418 swap(tile_width, tile_height); 2419 } else { 2420 pitch_tiles = pitch / (tile_width * cpp); 2421 } 2422 2423 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2424 tile_size, pitch_tiles, 2425 old_offset, new_offset); 2426 } else { 2427 old_offset += *y * pitch + *x * cpp; 2428 2429 *y = (old_offset - new_offset) / pitch; 2430 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2431 } 2432 2433 return new_offset; 2434 } 2435 2436 /* 2437 * Adjust the tile offset by moving the difference into 2438 * the x/y offsets. 2439 */ 2440 static u32 intel_plane_adjust_aligned_offset(int *x, int *y, 2441 const struct intel_plane_state *state, 2442 int color_plane, 2443 u32 old_offset, u32 new_offset) 2444 { 2445 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, 2446 state->hw.rotation, 2447 state->color_plane[color_plane].stride, 2448 old_offset, new_offset); 2449 } 2450 2451 /* 2452 * Computes the aligned offset to the base tile and adjusts 2453 * x, y. bytes per pixel is assumed to be a power-of-two. 2454 * 2455 * In the 90/270 rotated case, x and y are assumed 2456 * to be already rotated to match the rotated GTT view, and 2457 * pitch is the tile_height aligned framebuffer height. 2458 * 2459 * This function is used when computing the derived information 2460 * under intel_framebuffer, so using any of that information 2461 * here is not allowed. Anything under drm_framebuffer can be 2462 * used. This is why the user has to pass in the pitch since it 2463 * is specified in the rotated orientation. 2464 */ 2465 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, 2466 int *x, int *y, 2467 const struct drm_framebuffer *fb, 2468 int color_plane, 2469 unsigned int pitch, 2470 unsigned int rotation, 2471 u32 alignment) 2472 { 2473 unsigned int cpp = fb->format->cpp[color_plane]; 2474 u32 offset, offset_aligned; 2475 2476 if (!is_surface_linear(fb, color_plane)) { 2477 unsigned int tile_size, tile_width, tile_height; 2478 unsigned int tile_rows, tiles, pitch_tiles; 2479 2480 tile_size = intel_tile_size(dev_priv); 2481 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2482 2483 if (drm_rotation_90_or_270(rotation)) { 2484 pitch_tiles = pitch / tile_height; 2485 swap(tile_width, tile_height); 2486 } else { 2487 pitch_tiles = pitch / (tile_width * cpp); 2488 } 2489 2490 tile_rows = *y / tile_height; 2491 *y %= tile_height; 2492 2493 tiles = *x / tile_width; 2494 *x %= tile_width; 2495 2496 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2497 2498 offset_aligned = offset; 2499 if (alignment) 2500 offset_aligned = rounddown(offset_aligned, alignment); 2501 2502 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2503 tile_size, pitch_tiles, 2504 offset, offset_aligned); 2505 } else { 2506 offset = *y * pitch + *x * cpp; 2507 offset_aligned = offset; 2508 if (alignment) { 2509 offset_aligned = rounddown(offset_aligned, alignment); 2510 *y = (offset % alignment) / pitch; 2511 *x = ((offset % alignment) - *y * pitch) / cpp; 2512 } else { 2513 *y = *x = 0; 2514 } 2515 } 2516 2517 return offset_aligned; 2518 } 2519 2520 static u32 intel_plane_compute_aligned_offset(int *x, int *y, 2521 const struct intel_plane_state *state, 2522 int color_plane) 2523 { 2524 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane); 2525 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2526 const struct drm_framebuffer *fb = state->hw.fb; 2527 unsigned int rotation = state->hw.rotation; 2528 int pitch = state->color_plane[color_plane].stride; 2529 u32 alignment; 2530 2531 if (intel_plane->id == PLANE_CURSOR) 2532 alignment = intel_cursor_alignment(dev_priv); 2533 else 2534 alignment = intel_surf_alignment(fb, color_plane); 2535 2536 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane, 2537 pitch, rotation, alignment); 2538 } 2539 2540 /* Convert the fb->offset[] into x/y offsets */ 2541 static int intel_fb_offset_to_xy(int *x, int *y, 2542 const struct drm_framebuffer *fb, 2543 int color_plane) 2544 { 2545 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2546 unsigned int height; 2547 u32 alignment; 2548 2549 if (INTEL_GEN(dev_priv) >= 12 && 2550 is_semiplanar_uv_plane(fb, color_plane)) 2551 alignment = intel_tile_row_size(fb, color_plane); 2552 else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) 2553 alignment = intel_tile_size(dev_priv); 2554 else 2555 alignment = 0; 2556 2557 if (alignment != 0 && fb->offsets[color_plane] % alignment) { 2558 drm_dbg_kms(&dev_priv->drm, 2559 "Misaligned offset 0x%08x for color plane %d\n", 2560 fb->offsets[color_plane], color_plane); 2561 return -EINVAL; 2562 } 2563 2564 height = drm_framebuffer_plane_height(fb->height, fb, color_plane); 2565 height = ALIGN(height, intel_tile_height(fb, color_plane)); 2566 2567 /* Catch potential overflows early */ 2568 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), 2569 fb->offsets[color_plane])) { 2570 drm_dbg_kms(&dev_priv->drm, 2571 "Bad offset 0x%08x or pitch %d for color plane %d\n", 2572 fb->offsets[color_plane], fb->pitches[color_plane], 2573 color_plane); 2574 return -ERANGE; 2575 } 2576 2577 *x = 0; 2578 *y = 0; 2579 2580 intel_adjust_aligned_offset(x, y, 2581 fb, color_plane, DRM_MODE_ROTATE_0, 2582 fb->pitches[color_plane], 2583 fb->offsets[color_plane], 0); 2584 2585 return 0; 2586 } 2587 2588 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 2589 { 2590 switch (fb_modifier) { 2591 case I915_FORMAT_MOD_X_TILED: 2592 return I915_TILING_X; 2593 case I915_FORMAT_MOD_Y_TILED: 2594 case I915_FORMAT_MOD_Y_TILED_CCS: 2595 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2596 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2597 return I915_TILING_Y; 2598 default: 2599 return I915_TILING_NONE; 2600 } 2601 } 2602 2603 /* 2604 * From the Sky Lake PRM: 2605 * "The Color Control Surface (CCS) contains the compression status of 2606 * the cache-line pairs. The compression state of the cache-line pair 2607 * is specified by 2 bits in the CCS. Each CCS cache-line represents 2608 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 2609 * cache-line-pairs. CCS is always Y tiled." 2610 * 2611 * Since cache line pairs refers to horizontally adjacent cache lines, 2612 * each cache line in the CCS corresponds to an area of 32x16 cache 2613 * lines on the main surface. Since each pixel is 4 bytes, this gives 2614 * us a ratio of one byte in the CCS for each 8x16 pixels in the 2615 * main surface. 2616 */ 2617 static const struct drm_format_info skl_ccs_formats[] = { 2618 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2619 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2620 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2621 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2622 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2623 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2624 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2625 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2626 }; 2627 2628 /* 2629 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the 2630 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles 2631 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of 2632 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in 2633 * the main surface. 2634 */ 2635 static const struct drm_format_info gen12_ccs_formats[] = { 2636 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2637 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2638 .hsub = 1, .vsub = 1, }, 2639 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2640 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2641 .hsub = 1, .vsub = 1, }, 2642 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2643 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2644 .hsub = 1, .vsub = 1, .has_alpha = true }, 2645 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2646 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2647 .hsub = 1, .vsub = 1, .has_alpha = true }, 2648 { .format = DRM_FORMAT_YUYV, .num_planes = 2, 2649 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2650 .hsub = 2, .vsub = 1, .is_yuv = true }, 2651 { .format = DRM_FORMAT_YVYU, .num_planes = 2, 2652 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2653 .hsub = 2, .vsub = 1, .is_yuv = true }, 2654 { .format = DRM_FORMAT_UYVY, .num_planes = 2, 2655 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2656 .hsub = 2, .vsub = 1, .is_yuv = true }, 2657 { .format = DRM_FORMAT_VYUY, .num_planes = 2, 2658 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2659 .hsub = 2, .vsub = 1, .is_yuv = true }, 2660 { .format = DRM_FORMAT_NV12, .num_planes = 4, 2661 .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, 2662 .hsub = 2, .vsub = 2, .is_yuv = true }, 2663 { .format = DRM_FORMAT_P010, .num_planes = 4, 2664 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2665 .hsub = 2, .vsub = 2, .is_yuv = true }, 2666 { .format = DRM_FORMAT_P012, .num_planes = 4, 2667 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2668 .hsub = 2, .vsub = 2, .is_yuv = true }, 2669 { .format = DRM_FORMAT_P016, .num_planes = 4, 2670 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2671 .hsub = 2, .vsub = 2, .is_yuv = true }, 2672 }; 2673 2674 static const struct drm_format_info * 2675 lookup_format_info(const struct drm_format_info formats[], 2676 int num_formats, u32 format) 2677 { 2678 int i; 2679 2680 for (i = 0; i < num_formats; i++) { 2681 if (formats[i].format == format) 2682 return &formats[i]; 2683 } 2684 2685 return NULL; 2686 } 2687 2688 static const struct drm_format_info * 2689 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 2690 { 2691 switch (cmd->modifier[0]) { 2692 case I915_FORMAT_MOD_Y_TILED_CCS: 2693 case I915_FORMAT_MOD_Yf_TILED_CCS: 2694 return lookup_format_info(skl_ccs_formats, 2695 ARRAY_SIZE(skl_ccs_formats), 2696 cmd->pixel_format); 2697 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2698 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2699 return lookup_format_info(gen12_ccs_formats, 2700 ARRAY_SIZE(gen12_ccs_formats), 2701 cmd->pixel_format); 2702 default: 2703 return NULL; 2704 } 2705 } 2706 2707 bool is_ccs_modifier(u64 modifier) 2708 { 2709 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 2710 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || 2711 modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2712 modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 2713 } 2714 2715 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane) 2716 { 2717 return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)], 2718 512) * 64; 2719 } 2720 2721 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 2722 u32 pixel_format, u64 modifier) 2723 { 2724 struct intel_crtc *crtc; 2725 struct intel_plane *plane; 2726 2727 /* 2728 * We assume the primary plane for pipe A has 2729 * the highest stride limits of them all, 2730 * if in case pipe A is disabled, use the first pipe from pipe_mask. 2731 */ 2732 crtc = intel_get_first_crtc(dev_priv); 2733 if (!crtc) 2734 return 0; 2735 2736 plane = to_intel_plane(crtc->base.primary); 2737 2738 return plane->max_stride(plane, pixel_format, modifier, 2739 DRM_MODE_ROTATE_0); 2740 } 2741 2742 static 2743 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, 2744 u32 pixel_format, u64 modifier) 2745 { 2746 /* 2747 * Arbitrary limit for gen4+ chosen to match the 2748 * render engine max stride. 2749 * 2750 * The new CCS hash mode makes remapping impossible 2751 */ 2752 if (!is_ccs_modifier(modifier)) { 2753 if (INTEL_GEN(dev_priv) >= 7) 2754 return 256*1024; 2755 else if (INTEL_GEN(dev_priv) >= 4) 2756 return 128*1024; 2757 } 2758 2759 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 2760 } 2761 2762 static u32 2763 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) 2764 { 2765 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2766 u32 tile_width; 2767 2768 if (is_surface_linear(fb, color_plane)) { 2769 u32 max_stride = intel_plane_fb_max_stride(dev_priv, 2770 fb->format->format, 2771 fb->modifier); 2772 2773 /* 2774 * To make remapping with linear generally feasible 2775 * we need the stride to be page aligned. 2776 */ 2777 if (fb->pitches[color_plane] > max_stride && 2778 !is_ccs_modifier(fb->modifier)) 2779 return intel_tile_size(dev_priv); 2780 else 2781 return 64; 2782 } 2783 2784 tile_width = intel_tile_width_bytes(fb, color_plane); 2785 if (is_ccs_modifier(fb->modifier)) { 2786 /* 2787 * Display WA #0531: skl,bxt,kbl,glk 2788 * 2789 * Render decompression and plane width > 3840 2790 * combined with horizontal panning requires the 2791 * plane stride to be a multiple of 4. We'll just 2792 * require the entire fb to accommodate that to avoid 2793 * potential runtime errors at plane configuration time. 2794 */ 2795 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840) 2796 tile_width *= 4; 2797 /* 2798 * The main surface pitch must be padded to a multiple of four 2799 * tile widths. 2800 */ 2801 else if (INTEL_GEN(dev_priv) >= 12) 2802 tile_width *= 4; 2803 } 2804 return tile_width; 2805 } 2806 2807 bool intel_plane_can_remap(const struct intel_plane_state *plane_state) 2808 { 2809 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2810 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2811 const struct drm_framebuffer *fb = plane_state->hw.fb; 2812 int i; 2813 2814 /* We don't want to deal with remapping with cursors */ 2815 if (plane->id == PLANE_CURSOR) 2816 return false; 2817 2818 /* 2819 * The display engine limits already match/exceed the 2820 * render engine limits, so not much point in remapping. 2821 * Would also need to deal with the fence POT alignment 2822 * and gen2 2KiB GTT tile size. 2823 */ 2824 if (INTEL_GEN(dev_priv) < 4) 2825 return false; 2826 2827 /* 2828 * The new CCS hash mode isn't compatible with remapping as 2829 * the virtual address of the pages affects the compressed data. 2830 */ 2831 if (is_ccs_modifier(fb->modifier)) 2832 return false; 2833 2834 /* Linear needs a page aligned stride for remapping */ 2835 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2836 unsigned int alignment = intel_tile_size(dev_priv) - 1; 2837 2838 for (i = 0; i < fb->format->num_planes; i++) { 2839 if (fb->pitches[i] & alignment) 2840 return false; 2841 } 2842 } 2843 2844 return true; 2845 } 2846 2847 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) 2848 { 2849 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2850 const struct drm_framebuffer *fb = plane_state->hw.fb; 2851 unsigned int rotation = plane_state->hw.rotation; 2852 u32 stride, max_stride; 2853 2854 /* 2855 * No remapping for invisible planes since we don't have 2856 * an actual source viewport to remap. 2857 */ 2858 if (!plane_state->uapi.visible) 2859 return false; 2860 2861 if (!intel_plane_can_remap(plane_state)) 2862 return false; 2863 2864 /* 2865 * FIXME: aux plane limits on gen9+ are 2866 * unclear in Bspec, for now no checking. 2867 */ 2868 stride = intel_fb_pitch(fb, 0, rotation); 2869 max_stride = plane->max_stride(plane, fb->format->format, 2870 fb->modifier, rotation); 2871 2872 return stride > max_stride; 2873 } 2874 2875 static void 2876 intel_fb_plane_get_subsampling(int *hsub, int *vsub, 2877 const struct drm_framebuffer *fb, 2878 int color_plane) 2879 { 2880 int main_plane; 2881 2882 if (color_plane == 0) { 2883 *hsub = 1; 2884 *vsub = 1; 2885 2886 return; 2887 } 2888 2889 /* 2890 * TODO: Deduct the subsampling from the char block for all CCS 2891 * formats and planes. 2892 */ 2893 if (!is_gen12_ccs_plane(fb, color_plane)) { 2894 *hsub = fb->format->hsub; 2895 *vsub = fb->format->vsub; 2896 2897 return; 2898 } 2899 2900 main_plane = ccs_to_main_plane(fb, color_plane); 2901 *hsub = drm_format_info_block_width(fb->format, color_plane) / 2902 drm_format_info_block_width(fb->format, main_plane); 2903 2904 /* 2905 * The min stride check in the core framebuffer_check() function 2906 * assumes that format->hsub applies to every plane except for the 2907 * first plane. That's incorrect for the CCS AUX plane of the first 2908 * plane, but for the above check to pass we must define the block 2909 * width with that subsampling applied to it. Adjust the width here 2910 * accordingly, so we can calculate the actual subsampling factor. 2911 */ 2912 if (main_plane == 0) 2913 *hsub *= fb->format->hsub; 2914 2915 *vsub = 32; 2916 } 2917 static int 2918 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) 2919 { 2920 struct drm_i915_private *i915 = to_i915(fb->dev); 2921 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2922 int main_plane; 2923 int hsub, vsub; 2924 int tile_width, tile_height; 2925 int ccs_x, ccs_y; 2926 int main_x, main_y; 2927 2928 if (!is_ccs_plane(fb, ccs_plane)) 2929 return 0; 2930 2931 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height); 2932 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 2933 2934 tile_width *= hsub; 2935 tile_height *= vsub; 2936 2937 ccs_x = (x * hsub) % tile_width; 2938 ccs_y = (y * vsub) % tile_height; 2939 2940 main_plane = ccs_to_main_plane(fb, ccs_plane); 2941 main_x = intel_fb->normal[main_plane].x % tile_width; 2942 main_y = intel_fb->normal[main_plane].y % tile_height; 2943 2944 /* 2945 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2946 * x/y offsets must match between CCS and the main surface. 2947 */ 2948 if (main_x != ccs_x || main_y != ccs_y) { 2949 drm_dbg_kms(&i915->drm, 2950 "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2951 main_x, main_y, 2952 ccs_x, ccs_y, 2953 intel_fb->normal[main_plane].x, 2954 intel_fb->normal[main_plane].y, 2955 x, y); 2956 return -EINVAL; 2957 } 2958 2959 return 0; 2960 } 2961 2962 static void 2963 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane) 2964 { 2965 int main_plane = is_ccs_plane(fb, color_plane) ? 2966 ccs_to_main_plane(fb, color_plane) : 0; 2967 int main_hsub, main_vsub; 2968 int hsub, vsub; 2969 2970 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane); 2971 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane); 2972 *w = fb->width / main_hsub / hsub; 2973 *h = fb->height / main_vsub / vsub; 2974 } 2975 2976 /* 2977 * Setup the rotated view for an FB plane and return the size the GTT mapping 2978 * requires for this view. 2979 */ 2980 static u32 2981 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info, 2982 u32 gtt_offset_rotated, int x, int y, 2983 unsigned int width, unsigned int height, 2984 unsigned int tile_size, 2985 unsigned int tile_width, unsigned int tile_height, 2986 struct drm_framebuffer *fb) 2987 { 2988 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2989 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2990 unsigned int pitch_tiles; 2991 struct drm_rect r; 2992 2993 /* Y or Yf modifiers required for 90/270 rotation */ 2994 if (fb->modifier != I915_FORMAT_MOD_Y_TILED && 2995 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 2996 return 0; 2997 2998 if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane))) 2999 return 0; 3000 3001 rot_info->plane[plane] = *plane_info; 3002 3003 intel_fb->rotated[plane].pitch = plane_info->height * tile_height; 3004 3005 /* rotate the x/y offsets to match the GTT view */ 3006 drm_rect_init(&r, x, y, width, height); 3007 drm_rect_rotate(&r, 3008 plane_info->width * tile_width, 3009 plane_info->height * tile_height, 3010 DRM_MODE_ROTATE_270); 3011 x = r.x1; 3012 y = r.y1; 3013 3014 /* rotate the tile dimensions to match the GTT view */ 3015 pitch_tiles = intel_fb->rotated[plane].pitch / tile_height; 3016 swap(tile_width, tile_height); 3017 3018 /* 3019 * We only keep the x/y offsets, so push all of the 3020 * gtt offset into the x/y offsets. 3021 */ 3022 intel_adjust_tile_offset(&x, &y, 3023 tile_width, tile_height, 3024 tile_size, pitch_tiles, 3025 gtt_offset_rotated * tile_size, 0); 3026 3027 /* 3028 * First pixel of the framebuffer from 3029 * the start of the rotated gtt mapping. 3030 */ 3031 intel_fb->rotated[plane].x = x; 3032 intel_fb->rotated[plane].y = y; 3033 3034 return plane_info->width * plane_info->height; 3035 } 3036 3037 static int 3038 intel_fill_fb_info(struct drm_i915_private *dev_priv, 3039 struct drm_framebuffer *fb) 3040 { 3041 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 3042 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 3043 u32 gtt_offset_rotated = 0; 3044 unsigned int max_size = 0; 3045 int i, num_planes = fb->format->num_planes; 3046 unsigned int tile_size = intel_tile_size(dev_priv); 3047 3048 for (i = 0; i < num_planes; i++) { 3049 unsigned int width, height; 3050 unsigned int cpp, size; 3051 u32 offset; 3052 int x, y; 3053 int ret; 3054 3055 cpp = fb->format->cpp[i]; 3056 intel_fb_plane_dims(&width, &height, fb, i); 3057 3058 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 3059 if (ret) { 3060 drm_dbg_kms(&dev_priv->drm, 3061 "bad fb plane %d offset: 0x%x\n", 3062 i, fb->offsets[i]); 3063 return ret; 3064 } 3065 3066 ret = intel_fb_check_ccs_xy(fb, i, x, y); 3067 if (ret) 3068 return ret; 3069 3070 /* 3071 * The fence (if used) is aligned to the start of the object 3072 * so having the framebuffer wrap around across the edge of the 3073 * fenced region doesn't really work. We have no API to configure 3074 * the fence start offset within the object (nor could we probably 3075 * on gen2/3). So it's just easier if we just require that the 3076 * fb layout agrees with the fence layout. We already check that the 3077 * fb stride matches the fence stride elsewhere. 3078 */ 3079 if (i == 0 && i915_gem_object_is_tiled(obj) && 3080 (x + width) * cpp > fb->pitches[i]) { 3081 drm_dbg_kms(&dev_priv->drm, 3082 "bad fb plane %d offset: 0x%x\n", 3083 i, fb->offsets[i]); 3084 return -EINVAL; 3085 } 3086 3087 /* 3088 * First pixel of the framebuffer from 3089 * the start of the normal gtt mapping. 3090 */ 3091 intel_fb->normal[i].x = x; 3092 intel_fb->normal[i].y = y; 3093 3094 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i, 3095 fb->pitches[i], 3096 DRM_MODE_ROTATE_0, 3097 tile_size); 3098 offset /= tile_size; 3099 3100 if (!is_surface_linear(fb, i)) { 3101 struct intel_remapped_plane_info plane_info; 3102 unsigned int tile_width, tile_height; 3103 3104 intel_tile_dims(fb, i, &tile_width, &tile_height); 3105 3106 plane_info.offset = offset; 3107 plane_info.stride = DIV_ROUND_UP(fb->pitches[i], 3108 tile_width * cpp); 3109 plane_info.width = DIV_ROUND_UP(x + width, tile_width); 3110 plane_info.height = DIV_ROUND_UP(y + height, 3111 tile_height); 3112 3113 /* how many tiles does this plane need */ 3114 size = plane_info.stride * plane_info.height; 3115 /* 3116 * If the plane isn't horizontally tile aligned, 3117 * we need one more tile. 3118 */ 3119 if (x != 0) 3120 size++; 3121 3122 gtt_offset_rotated += 3123 setup_fb_rotation(i, &plane_info, 3124 gtt_offset_rotated, 3125 x, y, width, height, 3126 tile_size, 3127 tile_width, tile_height, 3128 fb); 3129 } else { 3130 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 3131 x * cpp, tile_size); 3132 } 3133 3134 /* how many tiles in total needed in the bo */ 3135 max_size = max(max_size, offset + size); 3136 } 3137 3138 if (mul_u32_u32(max_size, tile_size) > obj->base.size) { 3139 drm_dbg_kms(&dev_priv->drm, 3140 "fb too big for bo (need %llu bytes, have %zu bytes)\n", 3141 mul_u32_u32(max_size, tile_size), obj->base.size); 3142 return -EINVAL; 3143 } 3144 3145 return 0; 3146 } 3147 3148 static void 3149 intel_plane_remap_gtt(struct intel_plane_state *plane_state) 3150 { 3151 struct drm_i915_private *dev_priv = 3152 to_i915(plane_state->uapi.plane->dev); 3153 struct drm_framebuffer *fb = plane_state->hw.fb; 3154 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 3155 struct intel_rotation_info *info = &plane_state->view.rotated; 3156 unsigned int rotation = plane_state->hw.rotation; 3157 int i, num_planes = fb->format->num_planes; 3158 unsigned int tile_size = intel_tile_size(dev_priv); 3159 unsigned int src_x, src_y; 3160 unsigned int src_w, src_h; 3161 u32 gtt_offset = 0; 3162 3163 memset(&plane_state->view, 0, sizeof(plane_state->view)); 3164 plane_state->view.type = drm_rotation_90_or_270(rotation) ? 3165 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; 3166 3167 src_x = plane_state->uapi.src.x1 >> 16; 3168 src_y = plane_state->uapi.src.y1 >> 16; 3169 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 3170 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 3171 3172 drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier)); 3173 3174 /* Make src coordinates relative to the viewport */ 3175 drm_rect_translate(&plane_state->uapi.src, 3176 -(src_x << 16), -(src_y << 16)); 3177 3178 /* Rotate src coordinates to match rotated GTT view */ 3179 if (drm_rotation_90_or_270(rotation)) 3180 drm_rect_rotate(&plane_state->uapi.src, 3181 src_w << 16, src_h << 16, 3182 DRM_MODE_ROTATE_270); 3183 3184 for (i = 0; i < num_planes; i++) { 3185 unsigned int hsub = i ? fb->format->hsub : 1; 3186 unsigned int vsub = i ? fb->format->vsub : 1; 3187 unsigned int cpp = fb->format->cpp[i]; 3188 unsigned int tile_width, tile_height; 3189 unsigned int width, height; 3190 unsigned int pitch_tiles; 3191 unsigned int x, y; 3192 u32 offset; 3193 3194 intel_tile_dims(fb, i, &tile_width, &tile_height); 3195 3196 x = src_x / hsub; 3197 y = src_y / vsub; 3198 width = src_w / hsub; 3199 height = src_h / vsub; 3200 3201 /* 3202 * First pixel of the src viewport from the 3203 * start of the normal gtt mapping. 3204 */ 3205 x += intel_fb->normal[i].x; 3206 y += intel_fb->normal[i].y; 3207 3208 offset = intel_compute_aligned_offset(dev_priv, &x, &y, 3209 fb, i, fb->pitches[i], 3210 DRM_MODE_ROTATE_0, tile_size); 3211 offset /= tile_size; 3212 3213 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane)); 3214 info->plane[i].offset = offset; 3215 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], 3216 tile_width * cpp); 3217 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 3218 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 3219 3220 if (drm_rotation_90_or_270(rotation)) { 3221 struct drm_rect r; 3222 3223 /* rotate the x/y offsets to match the GTT view */ 3224 drm_rect_init(&r, x, y, width, height); 3225 drm_rect_rotate(&r, 3226 info->plane[i].width * tile_width, 3227 info->plane[i].height * tile_height, 3228 DRM_MODE_ROTATE_270); 3229 x = r.x1; 3230 y = r.y1; 3231 3232 pitch_tiles = info->plane[i].height; 3233 plane_state->color_plane[i].stride = pitch_tiles * tile_height; 3234 3235 /* rotate the tile dimensions to match the GTT view */ 3236 swap(tile_width, tile_height); 3237 } else { 3238 pitch_tiles = info->plane[i].width; 3239 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp; 3240 } 3241 3242 /* 3243 * We only keep the x/y offsets, so push all of the 3244 * gtt offset into the x/y offsets. 3245 */ 3246 intel_adjust_tile_offset(&x, &y, 3247 tile_width, tile_height, 3248 tile_size, pitch_tiles, 3249 gtt_offset * tile_size, 0); 3250 3251 gtt_offset += info->plane[i].width * info->plane[i].height; 3252 3253 plane_state->color_plane[i].offset = 0; 3254 plane_state->color_plane[i].x = x; 3255 plane_state->color_plane[i].y = y; 3256 } 3257 } 3258 3259 static int 3260 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 3261 { 3262 const struct intel_framebuffer *fb = 3263 to_intel_framebuffer(plane_state->hw.fb); 3264 unsigned int rotation = plane_state->hw.rotation; 3265 int i, num_planes; 3266 3267 if (!fb) 3268 return 0; 3269 3270 num_planes = fb->base.format->num_planes; 3271 3272 if (intel_plane_needs_remap(plane_state)) { 3273 intel_plane_remap_gtt(plane_state); 3274 3275 /* 3276 * Sometimes even remapping can't overcome 3277 * the stride limitations :( Can happen with 3278 * big plane sizes and suitably misaligned 3279 * offsets. 3280 */ 3281 return intel_plane_check_stride(plane_state); 3282 } 3283 3284 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation); 3285 3286 for (i = 0; i < num_planes; i++) { 3287 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation); 3288 plane_state->color_plane[i].offset = 0; 3289 3290 if (drm_rotation_90_or_270(rotation)) { 3291 plane_state->color_plane[i].x = fb->rotated[i].x; 3292 plane_state->color_plane[i].y = fb->rotated[i].y; 3293 } else { 3294 plane_state->color_plane[i].x = fb->normal[i].x; 3295 plane_state->color_plane[i].y = fb->normal[i].y; 3296 } 3297 } 3298 3299 /* Rotate src coordinates to match rotated GTT view */ 3300 if (drm_rotation_90_or_270(rotation)) 3301 drm_rect_rotate(&plane_state->uapi.src, 3302 fb->base.width << 16, fb->base.height << 16, 3303 DRM_MODE_ROTATE_270); 3304 3305 return intel_plane_check_stride(plane_state); 3306 } 3307 3308 static int i9xx_format_to_fourcc(int format) 3309 { 3310 switch (format) { 3311 case DISPPLANE_8BPP: 3312 return DRM_FORMAT_C8; 3313 case DISPPLANE_BGRA555: 3314 return DRM_FORMAT_ARGB1555; 3315 case DISPPLANE_BGRX555: 3316 return DRM_FORMAT_XRGB1555; 3317 case DISPPLANE_BGRX565: 3318 return DRM_FORMAT_RGB565; 3319 default: 3320 case DISPPLANE_BGRX888: 3321 return DRM_FORMAT_XRGB8888; 3322 case DISPPLANE_RGBX888: 3323 return DRM_FORMAT_XBGR8888; 3324 case DISPPLANE_BGRA888: 3325 return DRM_FORMAT_ARGB8888; 3326 case DISPPLANE_RGBA888: 3327 return DRM_FORMAT_ABGR8888; 3328 case DISPPLANE_BGRX101010: 3329 return DRM_FORMAT_XRGB2101010; 3330 case DISPPLANE_RGBX101010: 3331 return DRM_FORMAT_XBGR2101010; 3332 case DISPPLANE_BGRA101010: 3333 return DRM_FORMAT_ARGB2101010; 3334 case DISPPLANE_RGBA101010: 3335 return DRM_FORMAT_ABGR2101010; 3336 case DISPPLANE_RGBX161616: 3337 return DRM_FORMAT_XBGR16161616F; 3338 } 3339 } 3340 3341 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 3342 { 3343 switch (format) { 3344 case PLANE_CTL_FORMAT_RGB_565: 3345 return DRM_FORMAT_RGB565; 3346 case PLANE_CTL_FORMAT_NV12: 3347 return DRM_FORMAT_NV12; 3348 case PLANE_CTL_FORMAT_XYUV: 3349 return DRM_FORMAT_XYUV8888; 3350 case PLANE_CTL_FORMAT_P010: 3351 return DRM_FORMAT_P010; 3352 case PLANE_CTL_FORMAT_P012: 3353 return DRM_FORMAT_P012; 3354 case PLANE_CTL_FORMAT_P016: 3355 return DRM_FORMAT_P016; 3356 case PLANE_CTL_FORMAT_Y210: 3357 return DRM_FORMAT_Y210; 3358 case PLANE_CTL_FORMAT_Y212: 3359 return DRM_FORMAT_Y212; 3360 case PLANE_CTL_FORMAT_Y216: 3361 return DRM_FORMAT_Y216; 3362 case PLANE_CTL_FORMAT_Y410: 3363 return DRM_FORMAT_XVYU2101010; 3364 case PLANE_CTL_FORMAT_Y412: 3365 return DRM_FORMAT_XVYU12_16161616; 3366 case PLANE_CTL_FORMAT_Y416: 3367 return DRM_FORMAT_XVYU16161616; 3368 default: 3369 case PLANE_CTL_FORMAT_XRGB_8888: 3370 if (rgb_order) { 3371 if (alpha) 3372 return DRM_FORMAT_ABGR8888; 3373 else 3374 return DRM_FORMAT_XBGR8888; 3375 } else { 3376 if (alpha) 3377 return DRM_FORMAT_ARGB8888; 3378 else 3379 return DRM_FORMAT_XRGB8888; 3380 } 3381 case PLANE_CTL_FORMAT_XRGB_2101010: 3382 if (rgb_order) { 3383 if (alpha) 3384 return DRM_FORMAT_ABGR2101010; 3385 else 3386 return DRM_FORMAT_XBGR2101010; 3387 } else { 3388 if (alpha) 3389 return DRM_FORMAT_ARGB2101010; 3390 else 3391 return DRM_FORMAT_XRGB2101010; 3392 } 3393 case PLANE_CTL_FORMAT_XRGB_16161616F: 3394 if (rgb_order) { 3395 if (alpha) 3396 return DRM_FORMAT_ABGR16161616F; 3397 else 3398 return DRM_FORMAT_XBGR16161616F; 3399 } else { 3400 if (alpha) 3401 return DRM_FORMAT_ARGB16161616F; 3402 else 3403 return DRM_FORMAT_XRGB16161616F; 3404 } 3405 } 3406 } 3407 3408 static struct i915_vma * 3409 initial_plane_vma(struct drm_i915_private *i915, 3410 struct intel_initial_plane_config *plane_config) 3411 { 3412 struct drm_i915_gem_object *obj; 3413 struct i915_vma *vma; 3414 u32 base, size; 3415 3416 if (plane_config->size == 0) 3417 return NULL; 3418 3419 base = round_down(plane_config->base, 3420 I915_GTT_MIN_ALIGNMENT); 3421 size = round_up(plane_config->base + plane_config->size, 3422 I915_GTT_MIN_ALIGNMENT); 3423 size -= base; 3424 3425 /* 3426 * If the FB is too big, just don't use it since fbdev is not very 3427 * important and we should probably use that space with FBC or other 3428 * features. 3429 */ 3430 if (size * 2 > i915->stolen_usable_size) 3431 return NULL; 3432 3433 obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size); 3434 if (IS_ERR(obj)) 3435 return NULL; 3436 3437 switch (plane_config->tiling) { 3438 case I915_TILING_NONE: 3439 break; 3440 case I915_TILING_X: 3441 case I915_TILING_Y: 3442 obj->tiling_and_stride = 3443 plane_config->fb->base.pitches[0] | 3444 plane_config->tiling; 3445 break; 3446 default: 3447 MISSING_CASE(plane_config->tiling); 3448 goto err_obj; 3449 } 3450 3451 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); 3452 if (IS_ERR(vma)) 3453 goto err_obj; 3454 3455 if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base)) 3456 goto err_obj; 3457 3458 if (i915_gem_object_is_tiled(obj) && 3459 !i915_vma_is_map_and_fenceable(vma)) 3460 goto err_obj; 3461 3462 return vma; 3463 3464 err_obj: 3465 i915_gem_object_put(obj); 3466 return NULL; 3467 } 3468 3469 static bool 3470 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 3471 struct intel_initial_plane_config *plane_config) 3472 { 3473 struct drm_device *dev = crtc->base.dev; 3474 struct drm_i915_private *dev_priv = to_i915(dev); 3475 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 3476 struct drm_framebuffer *fb = &plane_config->fb->base; 3477 struct i915_vma *vma; 3478 3479 switch (fb->modifier) { 3480 case DRM_FORMAT_MOD_LINEAR: 3481 case I915_FORMAT_MOD_X_TILED: 3482 case I915_FORMAT_MOD_Y_TILED: 3483 break; 3484 default: 3485 drm_dbg(&dev_priv->drm, 3486 "Unsupported modifier for initial FB: 0x%llx\n", 3487 fb->modifier); 3488 return false; 3489 } 3490 3491 vma = initial_plane_vma(dev_priv, plane_config); 3492 if (!vma) 3493 return false; 3494 3495 mode_cmd.pixel_format = fb->format->format; 3496 mode_cmd.width = fb->width; 3497 mode_cmd.height = fb->height; 3498 mode_cmd.pitches[0] = fb->pitches[0]; 3499 mode_cmd.modifier[0] = fb->modifier; 3500 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 3501 3502 if (intel_framebuffer_init(to_intel_framebuffer(fb), 3503 vma->obj, &mode_cmd)) { 3504 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); 3505 goto err_vma; 3506 } 3507 3508 plane_config->vma = vma; 3509 return true; 3510 3511 err_vma: 3512 i915_vma_put(vma); 3513 return false; 3514 } 3515 3516 static void 3517 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 3518 struct intel_plane_state *plane_state, 3519 bool visible) 3520 { 3521 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 3522 3523 plane_state->uapi.visible = visible; 3524 3525 if (visible) 3526 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 3527 else 3528 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 3529 } 3530 3531 static void fixup_active_planes(struct intel_crtc_state *crtc_state) 3532 { 3533 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 3534 struct drm_plane *plane; 3535 3536 /* 3537 * Active_planes aliases if multiple "primary" or cursor planes 3538 * have been used on the same (or wrong) pipe. plane_mask uses 3539 * unique ids, hence we can use that to reconstruct active_planes. 3540 */ 3541 crtc_state->active_planes = 0; 3542 3543 drm_for_each_plane_mask(plane, &dev_priv->drm, 3544 crtc_state->uapi.plane_mask) 3545 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 3546 } 3547 3548 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 3549 struct intel_plane *plane) 3550 { 3551 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3552 struct intel_crtc_state *crtc_state = 3553 to_intel_crtc_state(crtc->base.state); 3554 struct intel_plane_state *plane_state = 3555 to_intel_plane_state(plane->base.state); 3556 3557 drm_dbg_kms(&dev_priv->drm, 3558 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 3559 plane->base.base.id, plane->base.name, 3560 crtc->base.base.id, crtc->base.name); 3561 3562 intel_set_plane_visible(crtc_state, plane_state, false); 3563 fixup_active_planes(crtc_state); 3564 crtc_state->data_rate[plane->id] = 0; 3565 crtc_state->min_cdclk[plane->id] = 0; 3566 3567 if (plane->id == PLANE_PRIMARY) 3568 hsw_disable_ips(crtc_state); 3569 3570 /* 3571 * Vblank time updates from the shadow to live plane control register 3572 * are blocked if the memory self-refresh mode is active at that 3573 * moment. So to make sure the plane gets truly disabled, disable 3574 * first the self-refresh mode. The self-refresh enable bit in turn 3575 * will be checked/applied by the HW only at the next frame start 3576 * event which is after the vblank start event, so we need to have a 3577 * wait-for-vblank between disabling the plane and the pipe. 3578 */ 3579 if (HAS_GMCH(dev_priv) && 3580 intel_set_memory_cxsr(dev_priv, false)) 3581 intel_wait_for_vblank(dev_priv, crtc->pipe); 3582 3583 /* 3584 * Gen2 reports pipe underruns whenever all planes are disabled. 3585 * So disable underrun reporting before all the planes get disabled. 3586 */ 3587 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes) 3588 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3589 3590 intel_disable_plane(plane, crtc_state); 3591 } 3592 3593 static struct intel_frontbuffer * 3594 to_intel_frontbuffer(struct drm_framebuffer *fb) 3595 { 3596 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 3597 } 3598 3599 static void 3600 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 3601 struct intel_initial_plane_config *plane_config) 3602 { 3603 struct drm_device *dev = intel_crtc->base.dev; 3604 struct drm_i915_private *dev_priv = to_i915(dev); 3605 struct drm_crtc *c; 3606 struct drm_plane *primary = intel_crtc->base.primary; 3607 struct drm_plane_state *plane_state = primary->state; 3608 struct intel_plane *intel_plane = to_intel_plane(primary); 3609 struct intel_plane_state *intel_state = 3610 to_intel_plane_state(plane_state); 3611 struct drm_framebuffer *fb; 3612 struct i915_vma *vma; 3613 3614 if (!plane_config->fb) 3615 return; 3616 3617 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 3618 fb = &plane_config->fb->base; 3619 vma = plane_config->vma; 3620 goto valid_fb; 3621 } 3622 3623 /* 3624 * Failed to alloc the obj, check to see if we should share 3625 * an fb with another CRTC instead 3626 */ 3627 for_each_crtc(dev, c) { 3628 struct intel_plane_state *state; 3629 3630 if (c == &intel_crtc->base) 3631 continue; 3632 3633 if (!to_intel_crtc(c)->active) 3634 continue; 3635 3636 state = to_intel_plane_state(c->primary->state); 3637 if (!state->vma) 3638 continue; 3639 3640 if (intel_plane_ggtt_offset(state) == plane_config->base) { 3641 fb = state->hw.fb; 3642 vma = state->vma; 3643 goto valid_fb; 3644 } 3645 } 3646 3647 /* 3648 * We've failed to reconstruct the BIOS FB. Current display state 3649 * indicates that the primary plane is visible, but has a NULL FB, 3650 * which will lead to problems later if we don't fix it up. The 3651 * simplest solution is to just disable the primary plane now and 3652 * pretend the BIOS never had it enabled. 3653 */ 3654 intel_plane_disable_noatomic(intel_crtc, intel_plane); 3655 3656 return; 3657 3658 valid_fb: 3659 intel_state->hw.rotation = plane_config->rotation; 3660 intel_fill_fb_ggtt_view(&intel_state->view, fb, 3661 intel_state->hw.rotation); 3662 intel_state->color_plane[0].stride = 3663 intel_fb_pitch(fb, 0, intel_state->hw.rotation); 3664 3665 __i915_vma_pin(vma); 3666 intel_state->vma = i915_vma_get(vma); 3667 if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0) 3668 if (vma->fence) 3669 intel_state->flags |= PLANE_HAS_FENCE; 3670 3671 plane_state->src_x = 0; 3672 plane_state->src_y = 0; 3673 plane_state->src_w = fb->width << 16; 3674 plane_state->src_h = fb->height << 16; 3675 3676 plane_state->crtc_x = 0; 3677 plane_state->crtc_y = 0; 3678 plane_state->crtc_w = fb->width; 3679 plane_state->crtc_h = fb->height; 3680 3681 intel_state->uapi.src = drm_plane_state_src(plane_state); 3682 intel_state->uapi.dst = drm_plane_state_dest(plane_state); 3683 3684 if (plane_config->tiling) 3685 dev_priv->preserve_bios_swizzle = true; 3686 3687 plane_state->fb = fb; 3688 drm_framebuffer_get(fb); 3689 3690 plane_state->crtc = &intel_crtc->base; 3691 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state); 3692 3693 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 3694 3695 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 3696 &to_intel_frontbuffer(fb)->bits); 3697 } 3698 3699 static int skl_max_plane_width(const struct drm_framebuffer *fb, 3700 int color_plane, 3701 unsigned int rotation) 3702 { 3703 int cpp = fb->format->cpp[color_plane]; 3704 3705 switch (fb->modifier) { 3706 case DRM_FORMAT_MOD_LINEAR: 3707 case I915_FORMAT_MOD_X_TILED: 3708 /* 3709 * Validated limit is 4k, but has 5k should 3710 * work apart from the following features: 3711 * - Ytile (already limited to 4k) 3712 * - FP16 (already limited to 4k) 3713 * - render compression (already limited to 4k) 3714 * - KVMR sprite and cursor (don't care) 3715 * - horizontal panning (TODO verify this) 3716 * - pipe and plane scaling (TODO verify this) 3717 */ 3718 if (cpp == 8) 3719 return 4096; 3720 else 3721 return 5120; 3722 case I915_FORMAT_MOD_Y_TILED_CCS: 3723 case I915_FORMAT_MOD_Yf_TILED_CCS: 3724 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 3725 /* FIXME AUX plane? */ 3726 case I915_FORMAT_MOD_Y_TILED: 3727 case I915_FORMAT_MOD_Yf_TILED: 3728 if (cpp == 8) 3729 return 2048; 3730 else 3731 return 4096; 3732 default: 3733 MISSING_CASE(fb->modifier); 3734 return 2048; 3735 } 3736 } 3737 3738 static int glk_max_plane_width(const struct drm_framebuffer *fb, 3739 int color_plane, 3740 unsigned int rotation) 3741 { 3742 int cpp = fb->format->cpp[color_plane]; 3743 3744 switch (fb->modifier) { 3745 case DRM_FORMAT_MOD_LINEAR: 3746 case I915_FORMAT_MOD_X_TILED: 3747 if (cpp == 8) 3748 return 4096; 3749 else 3750 return 5120; 3751 case I915_FORMAT_MOD_Y_TILED_CCS: 3752 case I915_FORMAT_MOD_Yf_TILED_CCS: 3753 /* FIXME AUX plane? */ 3754 case I915_FORMAT_MOD_Y_TILED: 3755 case I915_FORMAT_MOD_Yf_TILED: 3756 if (cpp == 8) 3757 return 2048; 3758 else 3759 return 5120; 3760 default: 3761 MISSING_CASE(fb->modifier); 3762 return 2048; 3763 } 3764 } 3765 3766 static int icl_min_plane_width(const struct drm_framebuffer *fb) 3767 { 3768 /* Wa_14011264657, Wa_14011050563: gen11+ */ 3769 switch (fb->format->format) { 3770 case DRM_FORMAT_C8: 3771 return 18; 3772 case DRM_FORMAT_RGB565: 3773 return 10; 3774 case DRM_FORMAT_XRGB8888: 3775 case DRM_FORMAT_XBGR8888: 3776 case DRM_FORMAT_ARGB8888: 3777 case DRM_FORMAT_ABGR8888: 3778 case DRM_FORMAT_XRGB2101010: 3779 case DRM_FORMAT_XBGR2101010: 3780 case DRM_FORMAT_ARGB2101010: 3781 case DRM_FORMAT_ABGR2101010: 3782 case DRM_FORMAT_XVYU2101010: 3783 case DRM_FORMAT_Y212: 3784 case DRM_FORMAT_Y216: 3785 return 6; 3786 case DRM_FORMAT_NV12: 3787 return 20; 3788 case DRM_FORMAT_P010: 3789 case DRM_FORMAT_P012: 3790 case DRM_FORMAT_P016: 3791 return 12; 3792 case DRM_FORMAT_XRGB16161616F: 3793 case DRM_FORMAT_XBGR16161616F: 3794 case DRM_FORMAT_ARGB16161616F: 3795 case DRM_FORMAT_ABGR16161616F: 3796 case DRM_FORMAT_XVYU12_16161616: 3797 case DRM_FORMAT_XVYU16161616: 3798 return 4; 3799 default: 3800 return 1; 3801 } 3802 } 3803 3804 static int icl_max_plane_width(const struct drm_framebuffer *fb, 3805 int color_plane, 3806 unsigned int rotation) 3807 { 3808 return 5120; 3809 } 3810 3811 static int skl_max_plane_height(void) 3812 { 3813 return 4096; 3814 } 3815 3816 static int icl_max_plane_height(void) 3817 { 3818 return 4320; 3819 } 3820 3821 static bool 3822 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 3823 int main_x, int main_y, u32 main_offset, 3824 int ccs_plane) 3825 { 3826 const struct drm_framebuffer *fb = plane_state->hw.fb; 3827 int aux_x = plane_state->color_plane[ccs_plane].x; 3828 int aux_y = plane_state->color_plane[ccs_plane].y; 3829 u32 aux_offset = plane_state->color_plane[ccs_plane].offset; 3830 u32 alignment = intel_surf_alignment(fb, ccs_plane); 3831 int hsub; 3832 int vsub; 3833 3834 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 3835 while (aux_offset >= main_offset && aux_y <= main_y) { 3836 int x, y; 3837 3838 if (aux_x == main_x && aux_y == main_y) 3839 break; 3840 3841 if (aux_offset == 0) 3842 break; 3843 3844 x = aux_x / hsub; 3845 y = aux_y / vsub; 3846 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, 3847 plane_state, 3848 ccs_plane, 3849 aux_offset, 3850 aux_offset - 3851 alignment); 3852 aux_x = x * hsub + aux_x % hsub; 3853 aux_y = y * vsub + aux_y % vsub; 3854 } 3855 3856 if (aux_x != main_x || aux_y != main_y) 3857 return false; 3858 3859 plane_state->color_plane[ccs_plane].offset = aux_offset; 3860 plane_state->color_plane[ccs_plane].x = aux_x; 3861 plane_state->color_plane[ccs_plane].y = aux_y; 3862 3863 return true; 3864 } 3865 3866 unsigned int 3867 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 3868 { 3869 int x = 0, y = 0; 3870 3871 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3872 plane_state->color_plane[0].offset, 0); 3873 3874 return y; 3875 } 3876 3877 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3878 { 3879 struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); 3880 const struct drm_framebuffer *fb = plane_state->hw.fb; 3881 unsigned int rotation = plane_state->hw.rotation; 3882 int x = plane_state->uapi.src.x1 >> 16; 3883 int y = plane_state->uapi.src.y1 >> 16; 3884 int w = drm_rect_width(&plane_state->uapi.src) >> 16; 3885 int h = drm_rect_height(&plane_state->uapi.src) >> 16; 3886 int max_width, min_width, max_height; 3887 u32 alignment, offset; 3888 int aux_plane = intel_main_to_aux_plane(fb, 0); 3889 u32 aux_offset = plane_state->color_plane[aux_plane].offset; 3890 3891 if (INTEL_GEN(dev_priv) >= 11) { 3892 max_width = icl_max_plane_width(fb, 0, rotation); 3893 min_width = icl_min_plane_width(fb); 3894 } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 3895 max_width = glk_max_plane_width(fb, 0, rotation); 3896 min_width = 1; 3897 } else { 3898 max_width = skl_max_plane_width(fb, 0, rotation); 3899 min_width = 1; 3900 } 3901 3902 if (INTEL_GEN(dev_priv) >= 11) 3903 max_height = icl_max_plane_height(); 3904 else 3905 max_height = skl_max_plane_height(); 3906 3907 if (w > max_width || w < min_width || h > max_height) { 3908 drm_dbg_kms(&dev_priv->drm, 3909 "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n", 3910 w, h, min_width, max_width, max_height); 3911 return -EINVAL; 3912 } 3913 3914 intel_add_fb_offsets(&x, &y, plane_state, 0); 3915 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); 3916 alignment = intel_surf_alignment(fb, 0); 3917 if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment))) 3918 return -EINVAL; 3919 3920 /* 3921 * AUX surface offset is specified as the distance from the 3922 * main surface offset, and it must be non-negative. Make 3923 * sure that is what we will get. 3924 */ 3925 if (offset > aux_offset) 3926 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3927 offset, aux_offset & ~(alignment - 1)); 3928 3929 /* 3930 * When using an X-tiled surface, the plane blows up 3931 * if the x offset + width exceed the stride. 3932 * 3933 * TODO: linear and Y-tiled seem fine, Yf untested, 3934 */ 3935 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3936 int cpp = fb->format->cpp[0]; 3937 3938 while ((x + w) * cpp > plane_state->color_plane[0].stride) { 3939 if (offset == 0) { 3940 drm_dbg_kms(&dev_priv->drm, 3941 "Unable to find suitable display surface offset due to X-tiling\n"); 3942 return -EINVAL; 3943 } 3944 3945 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3946 offset, offset - alignment); 3947 } 3948 } 3949 3950 /* 3951 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3952 * they match with the main surface x/y offsets. 3953 */ 3954 if (is_ccs_modifier(fb->modifier)) { 3955 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 3956 offset, aux_plane)) { 3957 if (offset == 0) 3958 break; 3959 3960 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3961 offset, offset - alignment); 3962 } 3963 3964 if (x != plane_state->color_plane[aux_plane].x || 3965 y != plane_state->color_plane[aux_plane].y) { 3966 drm_dbg_kms(&dev_priv->drm, 3967 "Unable to find suitable display surface offset due to CCS\n"); 3968 return -EINVAL; 3969 } 3970 } 3971 3972 plane_state->color_plane[0].offset = offset; 3973 plane_state->color_plane[0].x = x; 3974 plane_state->color_plane[0].y = y; 3975 3976 /* 3977 * Put the final coordinates back so that the src 3978 * coordinate checks will see the right values. 3979 */ 3980 drm_rect_translate_to(&plane_state->uapi.src, 3981 x << 16, y << 16); 3982 3983 return 0; 3984 } 3985 3986 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3987 { 3988 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 3989 const struct drm_framebuffer *fb = plane_state->hw.fb; 3990 unsigned int rotation = plane_state->hw.rotation; 3991 int uv_plane = 1; 3992 int max_width = skl_max_plane_width(fb, uv_plane, rotation); 3993 int max_height = 4096; 3994 int x = plane_state->uapi.src.x1 >> 17; 3995 int y = plane_state->uapi.src.y1 >> 17; 3996 int w = drm_rect_width(&plane_state->uapi.src) >> 17; 3997 int h = drm_rect_height(&plane_state->uapi.src) >> 17; 3998 u32 offset; 3999 4000 intel_add_fb_offsets(&x, &y, plane_state, uv_plane); 4001 offset = intel_plane_compute_aligned_offset(&x, &y, 4002 plane_state, uv_plane); 4003 4004 /* FIXME not quite sure how/if these apply to the chroma plane */ 4005 if (w > max_width || h > max_height) { 4006 drm_dbg_kms(&i915->drm, 4007 "CbCr source size %dx%d too big (limit %dx%d)\n", 4008 w, h, max_width, max_height); 4009 return -EINVAL; 4010 } 4011 4012 if (is_ccs_modifier(fb->modifier)) { 4013 int ccs_plane = main_to_ccs_plane(fb, uv_plane); 4014 int aux_offset = plane_state->color_plane[ccs_plane].offset; 4015 int alignment = intel_surf_alignment(fb, uv_plane); 4016 4017 if (offset > aux_offset) 4018 offset = intel_plane_adjust_aligned_offset(&x, &y, 4019 plane_state, 4020 uv_plane, 4021 offset, 4022 aux_offset & ~(alignment - 1)); 4023 4024 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 4025 offset, ccs_plane)) { 4026 if (offset == 0) 4027 break; 4028 4029 offset = intel_plane_adjust_aligned_offset(&x, &y, 4030 plane_state, 4031 uv_plane, 4032 offset, offset - alignment); 4033 } 4034 4035 if (x != plane_state->color_plane[ccs_plane].x || 4036 y != plane_state->color_plane[ccs_plane].y) { 4037 drm_dbg_kms(&i915->drm, 4038 "Unable to find suitable display surface offset due to CCS\n"); 4039 return -EINVAL; 4040 } 4041 } 4042 4043 plane_state->color_plane[uv_plane].offset = offset; 4044 plane_state->color_plane[uv_plane].x = x; 4045 plane_state->color_plane[uv_plane].y = y; 4046 4047 return 0; 4048 } 4049 4050 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 4051 { 4052 const struct drm_framebuffer *fb = plane_state->hw.fb; 4053 int src_x = plane_state->uapi.src.x1 >> 16; 4054 int src_y = plane_state->uapi.src.y1 >> 16; 4055 u32 offset; 4056 int ccs_plane; 4057 4058 for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) { 4059 int main_hsub, main_vsub; 4060 int hsub, vsub; 4061 int x, y; 4062 4063 if (!is_ccs_plane(fb, ccs_plane)) 4064 continue; 4065 4066 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, 4067 ccs_to_main_plane(fb, ccs_plane)); 4068 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 4069 4070 hsub *= main_hsub; 4071 vsub *= main_vsub; 4072 x = src_x / hsub; 4073 y = src_y / vsub; 4074 4075 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane); 4076 4077 offset = intel_plane_compute_aligned_offset(&x, &y, 4078 plane_state, 4079 ccs_plane); 4080 4081 plane_state->color_plane[ccs_plane].offset = offset; 4082 plane_state->color_plane[ccs_plane].x = (x * hsub + 4083 src_x % hsub) / 4084 main_hsub; 4085 plane_state->color_plane[ccs_plane].y = (y * vsub + 4086 src_y % vsub) / 4087 main_vsub; 4088 } 4089 4090 return 0; 4091 } 4092 4093 int skl_check_plane_surface(struct intel_plane_state *plane_state) 4094 { 4095 const struct drm_framebuffer *fb = plane_state->hw.fb; 4096 int ret, i; 4097 4098 ret = intel_plane_compute_gtt(plane_state); 4099 if (ret) 4100 return ret; 4101 4102 if (!plane_state->uapi.visible) 4103 return 0; 4104 4105 /* 4106 * Handle the AUX surface first since the main surface setup depends on 4107 * it. 4108 */ 4109 if (is_ccs_modifier(fb->modifier)) { 4110 ret = skl_check_ccs_aux_surface(plane_state); 4111 if (ret) 4112 return ret; 4113 } 4114 4115 if (intel_format_info_is_yuv_semiplanar(fb->format, 4116 fb->modifier)) { 4117 ret = skl_check_nv12_aux_surface(plane_state); 4118 if (ret) 4119 return ret; 4120 } 4121 4122 for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) { 4123 plane_state->color_plane[i].offset = ~0xfff; 4124 plane_state->color_plane[i].x = 0; 4125 plane_state->color_plane[i].y = 0; 4126 } 4127 4128 ret = skl_check_main_surface(plane_state); 4129 if (ret) 4130 return ret; 4131 4132 return 0; 4133 } 4134 4135 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, 4136 const struct intel_plane_state *plane_state, 4137 unsigned int *num, unsigned int *den) 4138 { 4139 const struct drm_framebuffer *fb = plane_state->hw.fb; 4140 unsigned int cpp = fb->format->cpp[0]; 4141 4142 /* 4143 * g4x bspec says 64bpp pixel rate can't exceed 80% 4144 * of cdclk when the sprite plane is enabled on the 4145 * same pipe. ilk/snb bspec says 64bpp pixel rate is 4146 * never allowed to exceed 80% of cdclk. Let's just go 4147 * with the ilk/snb limit always. 4148 */ 4149 if (cpp == 8) { 4150 *num = 10; 4151 *den = 8; 4152 } else { 4153 *num = 1; 4154 *den = 1; 4155 } 4156 } 4157 4158 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, 4159 const struct intel_plane_state *plane_state) 4160 { 4161 unsigned int pixel_rate; 4162 unsigned int num, den; 4163 4164 /* 4165 * Note that crtc_state->pixel_rate accounts for both 4166 * horizontal and vertical panel fitter downscaling factors. 4167 * Pre-HSW bspec tells us to only consider the horizontal 4168 * downscaling factor here. We ignore that and just consider 4169 * both for simplicity. 4170 */ 4171 pixel_rate = crtc_state->pixel_rate; 4172 4173 i9xx_plane_ratio(crtc_state, plane_state, &num, &den); 4174 4175 /* two pixels per clock with double wide pipe */ 4176 if (crtc_state->double_wide) 4177 den *= 2; 4178 4179 return DIV_ROUND_UP(pixel_rate * num, den); 4180 } 4181 4182 unsigned int 4183 i9xx_plane_max_stride(struct intel_plane *plane, 4184 u32 pixel_format, u64 modifier, 4185 unsigned int rotation) 4186 { 4187 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4188 4189 if (!HAS_GMCH(dev_priv)) { 4190 return 32*1024; 4191 } else if (INTEL_GEN(dev_priv) >= 4) { 4192 if (modifier == I915_FORMAT_MOD_X_TILED) 4193 return 16*1024; 4194 else 4195 return 32*1024; 4196 } else if (INTEL_GEN(dev_priv) >= 3) { 4197 if (modifier == I915_FORMAT_MOD_X_TILED) 4198 return 8*1024; 4199 else 4200 return 16*1024; 4201 } else { 4202 if (plane->i9xx_plane == PLANE_C) 4203 return 4*1024; 4204 else 4205 return 8*1024; 4206 } 4207 } 4208 4209 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4210 { 4211 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4212 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4213 u32 dspcntr = 0; 4214 4215 if (crtc_state->gamma_enable) 4216 dspcntr |= DISPPLANE_GAMMA_ENABLE; 4217 4218 if (crtc_state->csc_enable) 4219 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 4220 4221 if (INTEL_GEN(dev_priv) < 5) 4222 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 4223 4224 return dspcntr; 4225 } 4226 4227 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 4228 const struct intel_plane_state *plane_state) 4229 { 4230 struct drm_i915_private *dev_priv = 4231 to_i915(plane_state->uapi.plane->dev); 4232 const struct drm_framebuffer *fb = plane_state->hw.fb; 4233 unsigned int rotation = plane_state->hw.rotation; 4234 u32 dspcntr; 4235 4236 dspcntr = DISPLAY_PLANE_ENABLE; 4237 4238 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 4239 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 4240 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 4241 4242 switch (fb->format->format) { 4243 case DRM_FORMAT_C8: 4244 dspcntr |= DISPPLANE_8BPP; 4245 break; 4246 case DRM_FORMAT_XRGB1555: 4247 dspcntr |= DISPPLANE_BGRX555; 4248 break; 4249 case DRM_FORMAT_ARGB1555: 4250 dspcntr |= DISPPLANE_BGRA555; 4251 break; 4252 case DRM_FORMAT_RGB565: 4253 dspcntr |= DISPPLANE_BGRX565; 4254 break; 4255 case DRM_FORMAT_XRGB8888: 4256 dspcntr |= DISPPLANE_BGRX888; 4257 break; 4258 case DRM_FORMAT_XBGR8888: 4259 dspcntr |= DISPPLANE_RGBX888; 4260 break; 4261 case DRM_FORMAT_ARGB8888: 4262 dspcntr |= DISPPLANE_BGRA888; 4263 break; 4264 case DRM_FORMAT_ABGR8888: 4265 dspcntr |= DISPPLANE_RGBA888; 4266 break; 4267 case DRM_FORMAT_XRGB2101010: 4268 dspcntr |= DISPPLANE_BGRX101010; 4269 break; 4270 case DRM_FORMAT_XBGR2101010: 4271 dspcntr |= DISPPLANE_RGBX101010; 4272 break; 4273 case DRM_FORMAT_ARGB2101010: 4274 dspcntr |= DISPPLANE_BGRA101010; 4275 break; 4276 case DRM_FORMAT_ABGR2101010: 4277 dspcntr |= DISPPLANE_RGBA101010; 4278 break; 4279 case DRM_FORMAT_XBGR16161616F: 4280 dspcntr |= DISPPLANE_RGBX161616; 4281 break; 4282 default: 4283 MISSING_CASE(fb->format->format); 4284 return 0; 4285 } 4286 4287 if (INTEL_GEN(dev_priv) >= 4 && 4288 fb->modifier == I915_FORMAT_MOD_X_TILED) 4289 dspcntr |= DISPPLANE_TILED; 4290 4291 if (rotation & DRM_MODE_ROTATE_180) 4292 dspcntr |= DISPPLANE_ROTATE_180; 4293 4294 if (rotation & DRM_MODE_REFLECT_X) 4295 dspcntr |= DISPPLANE_MIRROR; 4296 4297 return dspcntr; 4298 } 4299 4300 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 4301 { 4302 struct drm_i915_private *dev_priv = 4303 to_i915(plane_state->uapi.plane->dev); 4304 const struct drm_framebuffer *fb = plane_state->hw.fb; 4305 int src_x, src_y, src_w; 4306 u32 offset; 4307 int ret; 4308 4309 ret = intel_plane_compute_gtt(plane_state); 4310 if (ret) 4311 return ret; 4312 4313 if (!plane_state->uapi.visible) 4314 return 0; 4315 4316 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4317 src_x = plane_state->uapi.src.x1 >> 16; 4318 src_y = plane_state->uapi.src.y1 >> 16; 4319 4320 /* Undocumented hardware limit on i965/g4x/vlv/chv */ 4321 if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) 4322 return -EINVAL; 4323 4324 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 4325 4326 if (INTEL_GEN(dev_priv) >= 4) 4327 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 4328 plane_state, 0); 4329 else 4330 offset = 0; 4331 4332 /* 4333 * Put the final coordinates back so that the src 4334 * coordinate checks will see the right values. 4335 */ 4336 drm_rect_translate_to(&plane_state->uapi.src, 4337 src_x << 16, src_y << 16); 4338 4339 /* HSW/BDW do this automagically in hardware */ 4340 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 4341 unsigned int rotation = plane_state->hw.rotation; 4342 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4343 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 4344 4345 if (rotation & DRM_MODE_ROTATE_180) { 4346 src_x += src_w - 1; 4347 src_y += src_h - 1; 4348 } else if (rotation & DRM_MODE_REFLECT_X) { 4349 src_x += src_w - 1; 4350 } 4351 } 4352 4353 plane_state->color_plane[0].offset = offset; 4354 plane_state->color_plane[0].x = src_x; 4355 plane_state->color_plane[0].y = src_y; 4356 4357 return 0; 4358 } 4359 4360 static bool i9xx_plane_has_windowing(struct intel_plane *plane) 4361 { 4362 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4363 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4364 4365 if (IS_CHERRYVIEW(dev_priv)) 4366 return i9xx_plane == PLANE_B; 4367 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 4368 return false; 4369 else if (IS_GEN(dev_priv, 4)) 4370 return i9xx_plane == PLANE_C; 4371 else 4372 return i9xx_plane == PLANE_B || 4373 i9xx_plane == PLANE_C; 4374 } 4375 4376 static int 4377 i9xx_plane_check(struct intel_crtc_state *crtc_state, 4378 struct intel_plane_state *plane_state) 4379 { 4380 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4381 int ret; 4382 4383 ret = chv_plane_check_rotation(plane_state); 4384 if (ret) 4385 return ret; 4386 4387 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, 4388 &crtc_state->uapi, 4389 DRM_PLANE_HELPER_NO_SCALING, 4390 DRM_PLANE_HELPER_NO_SCALING, 4391 i9xx_plane_has_windowing(plane), 4392 true); 4393 if (ret) 4394 return ret; 4395 4396 ret = i9xx_check_plane_surface(plane_state); 4397 if (ret) 4398 return ret; 4399 4400 if (!plane_state->uapi.visible) 4401 return 0; 4402 4403 ret = intel_plane_check_src_coordinates(plane_state); 4404 if (ret) 4405 return ret; 4406 4407 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 4408 4409 return 0; 4410 } 4411 4412 static void i9xx_update_plane(struct intel_plane *plane, 4413 const struct intel_crtc_state *crtc_state, 4414 const struct intel_plane_state *plane_state) 4415 { 4416 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4417 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4418 u32 linear_offset; 4419 int x = plane_state->color_plane[0].x; 4420 int y = plane_state->color_plane[0].y; 4421 int crtc_x = plane_state->uapi.dst.x1; 4422 int crtc_y = plane_state->uapi.dst.y1; 4423 int crtc_w = drm_rect_width(&plane_state->uapi.dst); 4424 int crtc_h = drm_rect_height(&plane_state->uapi.dst); 4425 unsigned long irqflags; 4426 u32 dspaddr_offset; 4427 u32 dspcntr; 4428 4429 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 4430 4431 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 4432 4433 if (INTEL_GEN(dev_priv) >= 4) 4434 dspaddr_offset = plane_state->color_plane[0].offset; 4435 else 4436 dspaddr_offset = linear_offset; 4437 4438 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4439 4440 intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), 4441 plane_state->color_plane[0].stride); 4442 4443 if (INTEL_GEN(dev_priv) < 4) { 4444 /* 4445 * PLANE_A doesn't actually have a full window 4446 * generator but let's assume we still need to 4447 * program whatever is there. 4448 */ 4449 intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), 4450 (crtc_y << 16) | crtc_x); 4451 intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), 4452 ((crtc_h - 1) << 16) | (crtc_w - 1)); 4453 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 4454 intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), 4455 (crtc_y << 16) | crtc_x); 4456 intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), 4457 ((crtc_h - 1) << 16) | (crtc_w - 1)); 4458 intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); 4459 } 4460 4461 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 4462 intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), 4463 (y << 16) | x); 4464 } else if (INTEL_GEN(dev_priv) >= 4) { 4465 intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), 4466 linear_offset); 4467 intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), 4468 (y << 16) | x); 4469 } 4470 4471 /* 4472 * The control register self-arms if the plane was previously 4473 * disabled. Try to make the plane enable atomic by writing 4474 * the control register just before the surface register. 4475 */ 4476 intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); 4477 if (INTEL_GEN(dev_priv) >= 4) 4478 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 4479 intel_plane_ggtt_offset(plane_state) + dspaddr_offset); 4480 else 4481 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 4482 intel_plane_ggtt_offset(plane_state) + dspaddr_offset); 4483 4484 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4485 } 4486 4487 static void i9xx_disable_plane(struct intel_plane *plane, 4488 const struct intel_crtc_state *crtc_state) 4489 { 4490 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4491 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4492 unsigned long irqflags; 4493 u32 dspcntr; 4494 4495 /* 4496 * DSPCNTR pipe gamma enable on g4x+ and pipe csc 4497 * enable on ilk+ affect the pipe bottom color as 4498 * well, so we must configure them even if the plane 4499 * is disabled. 4500 * 4501 * On pre-g4x there is no way to gamma correct the 4502 * pipe bottom color but we'll keep on doing this 4503 * anyway so that the crtc state readout works correctly. 4504 */ 4505 dspcntr = i9xx_plane_ctl_crtc(crtc_state); 4506 4507 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4508 4509 intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); 4510 if (INTEL_GEN(dev_priv) >= 4) 4511 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0); 4512 else 4513 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0); 4514 4515 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4516 } 4517 4518 static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 4519 enum pipe *pipe) 4520 { 4521 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4522 enum intel_display_power_domain power_domain; 4523 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4524 intel_wakeref_t wakeref; 4525 bool ret; 4526 u32 val; 4527 4528 /* 4529 * Not 100% correct for planes that can move between pipes, 4530 * but that's only the case for gen2-4 which don't have any 4531 * display power wells. 4532 */ 4533 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 4534 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 4535 if (!wakeref) 4536 return false; 4537 4538 val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 4539 4540 ret = val & DISPLAY_PLANE_ENABLE; 4541 4542 if (INTEL_GEN(dev_priv) >= 5) 4543 *pipe = plane->pipe; 4544 else 4545 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 4546 DISPPLANE_SEL_PIPE_SHIFT; 4547 4548 intel_display_power_put(dev_priv, power_domain, wakeref); 4549 4550 return ret; 4551 } 4552 4553 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 4554 { 4555 struct drm_device *dev = intel_crtc->base.dev; 4556 struct drm_i915_private *dev_priv = to_i915(dev); 4557 unsigned long irqflags; 4558 4559 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4560 4561 intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0); 4562 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 4563 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 4564 4565 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4566 } 4567 4568 /* 4569 * This function detaches (aka. unbinds) unused scalers in hardware 4570 */ 4571 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 4572 { 4573 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 4574 const struct intel_crtc_scaler_state *scaler_state = 4575 &crtc_state->scaler_state; 4576 int i; 4577 4578 /* loop through and disable scalers that aren't in use */ 4579 for (i = 0; i < intel_crtc->num_scalers; i++) { 4580 if (!scaler_state->scalers[i].in_use) 4581 skl_detach_scaler(intel_crtc, i); 4582 } 4583 } 4584 4585 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, 4586 int color_plane, unsigned int rotation) 4587 { 4588 /* 4589 * The stride is either expressed as a multiple of 64 bytes chunks for 4590 * linear buffers or in number of tiles for tiled buffers. 4591 */ 4592 if (is_surface_linear(fb, color_plane)) 4593 return 64; 4594 else if (drm_rotation_90_or_270(rotation)) 4595 return intel_tile_height(fb, color_plane); 4596 else 4597 return intel_tile_width_bytes(fb, color_plane); 4598 } 4599 4600 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 4601 int color_plane) 4602 { 4603 const struct drm_framebuffer *fb = plane_state->hw.fb; 4604 unsigned int rotation = plane_state->hw.rotation; 4605 u32 stride = plane_state->color_plane[color_plane].stride; 4606 4607 if (color_plane >= fb->format->num_planes) 4608 return 0; 4609 4610 return stride / skl_plane_stride_mult(fb, color_plane, rotation); 4611 } 4612 4613 static u32 skl_plane_ctl_format(u32 pixel_format) 4614 { 4615 switch (pixel_format) { 4616 case DRM_FORMAT_C8: 4617 return PLANE_CTL_FORMAT_INDEXED; 4618 case DRM_FORMAT_RGB565: 4619 return PLANE_CTL_FORMAT_RGB_565; 4620 case DRM_FORMAT_XBGR8888: 4621 case DRM_FORMAT_ABGR8888: 4622 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 4623 case DRM_FORMAT_XRGB8888: 4624 case DRM_FORMAT_ARGB8888: 4625 return PLANE_CTL_FORMAT_XRGB_8888; 4626 case DRM_FORMAT_XBGR2101010: 4627 case DRM_FORMAT_ABGR2101010: 4628 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; 4629 case DRM_FORMAT_XRGB2101010: 4630 case DRM_FORMAT_ARGB2101010: 4631 return PLANE_CTL_FORMAT_XRGB_2101010; 4632 case DRM_FORMAT_XBGR16161616F: 4633 case DRM_FORMAT_ABGR16161616F: 4634 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; 4635 case DRM_FORMAT_XRGB16161616F: 4636 case DRM_FORMAT_ARGB16161616F: 4637 return PLANE_CTL_FORMAT_XRGB_16161616F; 4638 case DRM_FORMAT_XYUV8888: 4639 return PLANE_CTL_FORMAT_XYUV; 4640 case DRM_FORMAT_YUYV: 4641 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 4642 case DRM_FORMAT_YVYU: 4643 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 4644 case DRM_FORMAT_UYVY: 4645 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 4646 case DRM_FORMAT_VYUY: 4647 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 4648 case DRM_FORMAT_NV12: 4649 return PLANE_CTL_FORMAT_NV12; 4650 case DRM_FORMAT_P010: 4651 return PLANE_CTL_FORMAT_P010; 4652 case DRM_FORMAT_P012: 4653 return PLANE_CTL_FORMAT_P012; 4654 case DRM_FORMAT_P016: 4655 return PLANE_CTL_FORMAT_P016; 4656 case DRM_FORMAT_Y210: 4657 return PLANE_CTL_FORMAT_Y210; 4658 case DRM_FORMAT_Y212: 4659 return PLANE_CTL_FORMAT_Y212; 4660 case DRM_FORMAT_Y216: 4661 return PLANE_CTL_FORMAT_Y216; 4662 case DRM_FORMAT_XVYU2101010: 4663 return PLANE_CTL_FORMAT_Y410; 4664 case DRM_FORMAT_XVYU12_16161616: 4665 return PLANE_CTL_FORMAT_Y412; 4666 case DRM_FORMAT_XVYU16161616: 4667 return PLANE_CTL_FORMAT_Y416; 4668 default: 4669 MISSING_CASE(pixel_format); 4670 } 4671 4672 return 0; 4673 } 4674 4675 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) 4676 { 4677 if (!plane_state->hw.fb->format->has_alpha) 4678 return PLANE_CTL_ALPHA_DISABLE; 4679 4680 switch (plane_state->hw.pixel_blend_mode) { 4681 case DRM_MODE_BLEND_PIXEL_NONE: 4682 return PLANE_CTL_ALPHA_DISABLE; 4683 case DRM_MODE_BLEND_PREMULTI: 4684 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 4685 case DRM_MODE_BLEND_COVERAGE: 4686 return PLANE_CTL_ALPHA_HW_PREMULTIPLY; 4687 default: 4688 MISSING_CASE(plane_state->hw.pixel_blend_mode); 4689 return PLANE_CTL_ALPHA_DISABLE; 4690 } 4691 } 4692 4693 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) 4694 { 4695 if (!plane_state->hw.fb->format->has_alpha) 4696 return PLANE_COLOR_ALPHA_DISABLE; 4697 4698 switch (plane_state->hw.pixel_blend_mode) { 4699 case DRM_MODE_BLEND_PIXEL_NONE: 4700 return PLANE_COLOR_ALPHA_DISABLE; 4701 case DRM_MODE_BLEND_PREMULTI: 4702 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 4703 case DRM_MODE_BLEND_COVERAGE: 4704 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; 4705 default: 4706 MISSING_CASE(plane_state->hw.pixel_blend_mode); 4707 return PLANE_COLOR_ALPHA_DISABLE; 4708 } 4709 } 4710 4711 static u32 skl_plane_ctl_tiling(u64 fb_modifier) 4712 { 4713 switch (fb_modifier) { 4714 case DRM_FORMAT_MOD_LINEAR: 4715 break; 4716 case I915_FORMAT_MOD_X_TILED: 4717 return PLANE_CTL_TILED_X; 4718 case I915_FORMAT_MOD_Y_TILED: 4719 return PLANE_CTL_TILED_Y; 4720 case I915_FORMAT_MOD_Y_TILED_CCS: 4721 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4722 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 4723 return PLANE_CTL_TILED_Y | 4724 PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | 4725 PLANE_CTL_CLEAR_COLOR_DISABLE; 4726 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 4727 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; 4728 case I915_FORMAT_MOD_Yf_TILED: 4729 return PLANE_CTL_TILED_YF; 4730 case I915_FORMAT_MOD_Yf_TILED_CCS: 4731 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4732 default: 4733 MISSING_CASE(fb_modifier); 4734 } 4735 4736 return 0; 4737 } 4738 4739 static u32 skl_plane_ctl_rotate(unsigned int rotate) 4740 { 4741 switch (rotate) { 4742 case DRM_MODE_ROTATE_0: 4743 break; 4744 /* 4745 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 4746 * while i915 HW rotation is clockwise, thats why this swapping. 4747 */ 4748 case DRM_MODE_ROTATE_90: 4749 return PLANE_CTL_ROTATE_270; 4750 case DRM_MODE_ROTATE_180: 4751 return PLANE_CTL_ROTATE_180; 4752 case DRM_MODE_ROTATE_270: 4753 return PLANE_CTL_ROTATE_90; 4754 default: 4755 MISSING_CASE(rotate); 4756 } 4757 4758 return 0; 4759 } 4760 4761 static u32 cnl_plane_ctl_flip(unsigned int reflect) 4762 { 4763 switch (reflect) { 4764 case 0: 4765 break; 4766 case DRM_MODE_REFLECT_X: 4767 return PLANE_CTL_FLIP_HORIZONTAL; 4768 case DRM_MODE_REFLECT_Y: 4769 default: 4770 MISSING_CASE(reflect); 4771 } 4772 4773 return 0; 4774 } 4775 4776 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4777 { 4778 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4779 u32 plane_ctl = 0; 4780 4781 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4782 return plane_ctl; 4783 4784 if (crtc_state->gamma_enable) 4785 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; 4786 4787 if (crtc_state->csc_enable) 4788 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 4789 4790 return plane_ctl; 4791 } 4792 4793 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 4794 const struct intel_plane_state *plane_state) 4795 { 4796 struct drm_i915_private *dev_priv = 4797 to_i915(plane_state->uapi.plane->dev); 4798 const struct drm_framebuffer *fb = plane_state->hw.fb; 4799 unsigned int rotation = plane_state->hw.rotation; 4800 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 4801 u32 plane_ctl; 4802 4803 plane_ctl = PLANE_CTL_ENABLE; 4804 4805 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 4806 plane_ctl |= skl_plane_ctl_alpha(plane_state); 4807 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 4808 4809 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) 4810 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; 4811 4812 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4813 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; 4814 } 4815 4816 plane_ctl |= skl_plane_ctl_format(fb->format->format); 4817 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 4818 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); 4819 4820 if (INTEL_GEN(dev_priv) >= 10) 4821 plane_ctl |= cnl_plane_ctl_flip(rotation & 4822 DRM_MODE_REFLECT_MASK); 4823 4824 if (key->flags & I915_SET_COLORKEY_DESTINATION) 4825 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 4826 else if (key->flags & I915_SET_COLORKEY_SOURCE) 4827 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 4828 4829 return plane_ctl; 4830 } 4831 4832 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) 4833 { 4834 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4835 u32 plane_color_ctl = 0; 4836 4837 if (INTEL_GEN(dev_priv) >= 11) 4838 return plane_color_ctl; 4839 4840 if (crtc_state->gamma_enable) 4841 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; 4842 4843 if (crtc_state->csc_enable) 4844 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 4845 4846 return plane_color_ctl; 4847 } 4848 4849 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 4850 const struct intel_plane_state *plane_state) 4851 { 4852 struct drm_i915_private *dev_priv = 4853 to_i915(plane_state->uapi.plane->dev); 4854 const struct drm_framebuffer *fb = plane_state->hw.fb; 4855 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4856 u32 plane_color_ctl = 0; 4857 4858 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 4859 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 4860 4861 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { 4862 switch (plane_state->hw.color_encoding) { 4863 case DRM_COLOR_YCBCR_BT709: 4864 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 4865 break; 4866 case DRM_COLOR_YCBCR_BT2020: 4867 plane_color_ctl |= 4868 PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020; 4869 break; 4870 default: 4871 plane_color_ctl |= 4872 PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601; 4873 } 4874 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4875 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 4876 } else if (fb->format->is_yuv) { 4877 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; 4878 } 4879 4880 return plane_color_ctl; 4881 } 4882 4883 static int 4884 __intel_display_resume(struct drm_device *dev, 4885 struct drm_atomic_state *state, 4886 struct drm_modeset_acquire_ctx *ctx) 4887 { 4888 struct drm_crtc_state *crtc_state; 4889 struct drm_crtc *crtc; 4890 int i, ret; 4891 4892 intel_modeset_setup_hw_state(dev, ctx); 4893 intel_vga_redisable(to_i915(dev)); 4894 4895 if (!state) 4896 return 0; 4897 4898 /* 4899 * We've duplicated the state, pointers to the old state are invalid. 4900 * 4901 * Don't attempt to use the old state until we commit the duplicated state. 4902 */ 4903 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 4904 /* 4905 * Force recalculation even if we restore 4906 * current state. With fast modeset this may not result 4907 * in a modeset when the state is compatible. 4908 */ 4909 crtc_state->mode_changed = true; 4910 } 4911 4912 /* ignore any reset values/BIOS leftovers in the WM registers */ 4913 if (!HAS_GMCH(to_i915(dev))) 4914 to_intel_atomic_state(state)->skip_intermediate_wm = true; 4915 4916 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4917 4918 drm_WARN_ON(dev, ret == -EDEADLK); 4919 return ret; 4920 } 4921 4922 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 4923 { 4924 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 4925 intel_has_gpu_reset(&dev_priv->gt)); 4926 } 4927 4928 void intel_prepare_reset(struct drm_i915_private *dev_priv) 4929 { 4930 struct drm_device *dev = &dev_priv->drm; 4931 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4932 struct drm_atomic_state *state; 4933 int ret; 4934 4935 /* reset doesn't touch the display */ 4936 if (!dev_priv->params.force_reset_modeset_test && 4937 !gpu_reset_clobbers_display(dev_priv)) 4938 return; 4939 4940 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 4941 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4942 smp_mb__after_atomic(); 4943 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); 4944 4945 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 4946 drm_dbg_kms(&dev_priv->drm, 4947 "Modeset potentially stuck, unbreaking through wedging\n"); 4948 intel_gt_set_wedged(&dev_priv->gt); 4949 } 4950 4951 /* 4952 * Need mode_config.mutex so that we don't 4953 * trample ongoing ->detect() and whatnot. 4954 */ 4955 mutex_lock(&dev->mode_config.mutex); 4956 drm_modeset_acquire_init(ctx, 0); 4957 while (1) { 4958 ret = drm_modeset_lock_all_ctx(dev, ctx); 4959 if (ret != -EDEADLK) 4960 break; 4961 4962 drm_modeset_backoff(ctx); 4963 } 4964 /* 4965 * Disabling the crtcs gracefully seems nicer. Also the 4966 * g33 docs say we should at least disable all the planes. 4967 */ 4968 state = drm_atomic_helper_duplicate_state(dev, ctx); 4969 if (IS_ERR(state)) { 4970 ret = PTR_ERR(state); 4971 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n", 4972 ret); 4973 return; 4974 } 4975 4976 ret = drm_atomic_helper_disable_all(dev, ctx); 4977 if (ret) { 4978 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 4979 ret); 4980 drm_atomic_state_put(state); 4981 return; 4982 } 4983 4984 dev_priv->modeset_restore_state = state; 4985 state->acquire_ctx = ctx; 4986 } 4987 4988 void intel_finish_reset(struct drm_i915_private *dev_priv) 4989 { 4990 struct drm_device *dev = &dev_priv->drm; 4991 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4992 struct drm_atomic_state *state; 4993 int ret; 4994 4995 /* reset doesn't touch the display */ 4996 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 4997 return; 4998 4999 state = fetch_and_zero(&dev_priv->modeset_restore_state); 5000 if (!state) 5001 goto unlock; 5002 5003 /* reset doesn't touch the display */ 5004 if (!gpu_reset_clobbers_display(dev_priv)) { 5005 /* for testing only restore the display */ 5006 ret = __intel_display_resume(dev, state, ctx); 5007 if (ret) 5008 drm_err(&dev_priv->drm, 5009 "Restoring old state failed with %i\n", ret); 5010 } else { 5011 /* 5012 * The display has been reset as well, 5013 * so need a full re-initialization. 5014 */ 5015 intel_pps_unlock_regs_wa(dev_priv); 5016 intel_modeset_init_hw(dev_priv); 5017 intel_init_clock_gating(dev_priv); 5018 5019 spin_lock_irq(&dev_priv->irq_lock); 5020 if (dev_priv->display.hpd_irq_setup) 5021 dev_priv->display.hpd_irq_setup(dev_priv); 5022 spin_unlock_irq(&dev_priv->irq_lock); 5023 5024 ret = __intel_display_resume(dev, state, ctx); 5025 if (ret) 5026 drm_err(&dev_priv->drm, 5027 "Restoring old state failed with %i\n", ret); 5028 5029 intel_hpd_init(dev_priv); 5030 } 5031 5032 drm_atomic_state_put(state); 5033 unlock: 5034 drm_modeset_drop_locks(ctx); 5035 drm_modeset_acquire_fini(ctx); 5036 mutex_unlock(&dev->mode_config.mutex); 5037 5038 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 5039 } 5040 5041 static void icl_set_pipe_chicken(struct intel_crtc *crtc) 5042 { 5043 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5044 enum pipe pipe = crtc->pipe; 5045 u32 tmp; 5046 5047 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 5048 5049 /* 5050 * Display WA #1153: icl 5051 * enable hardware to bypass the alpha math 5052 * and rounding for per-pixel values 00 and 0xff 5053 */ 5054 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 5055 /* 5056 * Display WA # 1605353570: icl 5057 * Set the pixel rounding bit to 1 for allowing 5058 * passthrough of Frame buffer pixels unmodified 5059 * across pipe 5060 */ 5061 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 5062 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 5063 } 5064 5065 static void intel_fdi_normal_train(struct intel_crtc *crtc) 5066 { 5067 struct drm_device *dev = crtc->base.dev; 5068 struct drm_i915_private *dev_priv = to_i915(dev); 5069 enum pipe pipe = crtc->pipe; 5070 i915_reg_t reg; 5071 u32 temp; 5072 5073 /* enable normal train */ 5074 reg = FDI_TX_CTL(pipe); 5075 temp = intel_de_read(dev_priv, reg); 5076 if (IS_IVYBRIDGE(dev_priv)) { 5077 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 5078 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 5079 } else { 5080 temp &= ~FDI_LINK_TRAIN_NONE; 5081 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 5082 } 5083 intel_de_write(dev_priv, reg, temp); 5084 5085 reg = FDI_RX_CTL(pipe); 5086 temp = intel_de_read(dev_priv, reg); 5087 if (HAS_PCH_CPT(dev_priv)) { 5088 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5089 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 5090 } else { 5091 temp &= ~FDI_LINK_TRAIN_NONE; 5092 temp |= FDI_LINK_TRAIN_NONE; 5093 } 5094 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 5095 5096 /* wait one idle pattern time */ 5097 intel_de_posting_read(dev_priv, reg); 5098 udelay(1000); 5099 5100 /* IVB wants error correction enabled */ 5101 if (IS_IVYBRIDGE(dev_priv)) 5102 intel_de_write(dev_priv, reg, 5103 intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); 5104 } 5105 5106 /* The FDI link training functions for ILK/Ibexpeak. */ 5107 static void ilk_fdi_link_train(struct intel_crtc *crtc, 5108 const struct intel_crtc_state *crtc_state) 5109 { 5110 struct drm_device *dev = crtc->base.dev; 5111 struct drm_i915_private *dev_priv = to_i915(dev); 5112 enum pipe pipe = crtc->pipe; 5113 i915_reg_t reg; 5114 u32 temp, tries; 5115 5116 /* FDI needs bits from pipe first */ 5117 assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder); 5118 5119 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5120 for train result */ 5121 reg = FDI_RX_IMR(pipe); 5122 temp = intel_de_read(dev_priv, reg); 5123 temp &= ~FDI_RX_SYMBOL_LOCK; 5124 temp &= ~FDI_RX_BIT_LOCK; 5125 intel_de_write(dev_priv, reg, temp); 5126 intel_de_read(dev_priv, reg); 5127 udelay(150); 5128 5129 /* enable CPU FDI TX and PCH FDI RX */ 5130 reg = FDI_TX_CTL(pipe); 5131 temp = intel_de_read(dev_priv, reg); 5132 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5133 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5134 temp &= ~FDI_LINK_TRAIN_NONE; 5135 temp |= FDI_LINK_TRAIN_PATTERN_1; 5136 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 5137 5138 reg = FDI_RX_CTL(pipe); 5139 temp = intel_de_read(dev_priv, reg); 5140 temp &= ~FDI_LINK_TRAIN_NONE; 5141 temp |= FDI_LINK_TRAIN_PATTERN_1; 5142 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 5143 5144 intel_de_posting_read(dev_priv, reg); 5145 udelay(150); 5146 5147 /* Ironlake workaround, enable clock pointer after FDI enable*/ 5148 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 5149 FDI_RX_PHASE_SYNC_POINTER_OVR); 5150 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 5151 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN); 5152 5153 reg = FDI_RX_IIR(pipe); 5154 for (tries = 0; tries < 5; tries++) { 5155 temp = intel_de_read(dev_priv, reg); 5156 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5157 5158 if ((temp & FDI_RX_BIT_LOCK)) { 5159 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n"); 5160 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK); 5161 break; 5162 } 5163 } 5164 if (tries == 5) 5165 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 5166 5167 /* Train 2 */ 5168 reg = FDI_TX_CTL(pipe); 5169 temp = intel_de_read(dev_priv, reg); 5170 temp &= ~FDI_LINK_TRAIN_NONE; 5171 temp |= FDI_LINK_TRAIN_PATTERN_2; 5172 intel_de_write(dev_priv, reg, temp); 5173 5174 reg = FDI_RX_CTL(pipe); 5175 temp = intel_de_read(dev_priv, reg); 5176 temp &= ~FDI_LINK_TRAIN_NONE; 5177 temp |= FDI_LINK_TRAIN_PATTERN_2; 5178 intel_de_write(dev_priv, reg, temp); 5179 5180 intel_de_posting_read(dev_priv, reg); 5181 udelay(150); 5182 5183 reg = FDI_RX_IIR(pipe); 5184 for (tries = 0; tries < 5; tries++) { 5185 temp = intel_de_read(dev_priv, reg); 5186 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5187 5188 if (temp & FDI_RX_SYMBOL_LOCK) { 5189 intel_de_write(dev_priv, reg, 5190 temp | FDI_RX_SYMBOL_LOCK); 5191 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n"); 5192 break; 5193 } 5194 } 5195 if (tries == 5) 5196 drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); 5197 5198 drm_dbg_kms(&dev_priv->drm, "FDI train done\n"); 5199 5200 } 5201 5202 static const int snb_b_fdi_train_param[] = { 5203 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 5204 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 5205 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 5206 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 5207 }; 5208 5209 /* The FDI link training functions for SNB/Cougarpoint. */ 5210 static void gen6_fdi_link_train(struct intel_crtc *crtc, 5211 const struct intel_crtc_state *crtc_state) 5212 { 5213 struct drm_device *dev = crtc->base.dev; 5214 struct drm_i915_private *dev_priv = to_i915(dev); 5215 enum pipe pipe = crtc->pipe; 5216 i915_reg_t reg; 5217 u32 temp, i, retry; 5218 5219 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5220 for train result */ 5221 reg = FDI_RX_IMR(pipe); 5222 temp = intel_de_read(dev_priv, reg); 5223 temp &= ~FDI_RX_SYMBOL_LOCK; 5224 temp &= ~FDI_RX_BIT_LOCK; 5225 intel_de_write(dev_priv, reg, temp); 5226 5227 intel_de_posting_read(dev_priv, reg); 5228 udelay(150); 5229 5230 /* enable CPU FDI TX and PCH FDI RX */ 5231 reg = FDI_TX_CTL(pipe); 5232 temp = intel_de_read(dev_priv, reg); 5233 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5234 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5235 temp &= ~FDI_LINK_TRAIN_NONE; 5236 temp |= FDI_LINK_TRAIN_PATTERN_1; 5237 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5238 /* SNB-B */ 5239 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 5240 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 5241 5242 intel_de_write(dev_priv, FDI_RX_MISC(pipe), 5243 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 5244 5245 reg = FDI_RX_CTL(pipe); 5246 temp = intel_de_read(dev_priv, reg); 5247 if (HAS_PCH_CPT(dev_priv)) { 5248 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5249 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5250 } else { 5251 temp &= ~FDI_LINK_TRAIN_NONE; 5252 temp |= FDI_LINK_TRAIN_PATTERN_1; 5253 } 5254 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 5255 5256 intel_de_posting_read(dev_priv, reg); 5257 udelay(150); 5258 5259 for (i = 0; i < 4; i++) { 5260 reg = FDI_TX_CTL(pipe); 5261 temp = intel_de_read(dev_priv, reg); 5262 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5263 temp |= snb_b_fdi_train_param[i]; 5264 intel_de_write(dev_priv, reg, temp); 5265 5266 intel_de_posting_read(dev_priv, reg); 5267 udelay(500); 5268 5269 for (retry = 0; retry < 5; retry++) { 5270 reg = FDI_RX_IIR(pipe); 5271 temp = intel_de_read(dev_priv, reg); 5272 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5273 if (temp & FDI_RX_BIT_LOCK) { 5274 intel_de_write(dev_priv, reg, 5275 temp | FDI_RX_BIT_LOCK); 5276 drm_dbg_kms(&dev_priv->drm, 5277 "FDI train 1 done.\n"); 5278 break; 5279 } 5280 udelay(50); 5281 } 5282 if (retry < 5) 5283 break; 5284 } 5285 if (i == 4) 5286 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 5287 5288 /* Train 2 */ 5289 reg = FDI_TX_CTL(pipe); 5290 temp = intel_de_read(dev_priv, reg); 5291 temp &= ~FDI_LINK_TRAIN_NONE; 5292 temp |= FDI_LINK_TRAIN_PATTERN_2; 5293 if (IS_GEN(dev_priv, 6)) { 5294 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5295 /* SNB-B */ 5296 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 5297 } 5298 intel_de_write(dev_priv, reg, temp); 5299 5300 reg = FDI_RX_CTL(pipe); 5301 temp = intel_de_read(dev_priv, reg); 5302 if (HAS_PCH_CPT(dev_priv)) { 5303 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5304 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 5305 } else { 5306 temp &= ~FDI_LINK_TRAIN_NONE; 5307 temp |= FDI_LINK_TRAIN_PATTERN_2; 5308 } 5309 intel_de_write(dev_priv, reg, temp); 5310 5311 intel_de_posting_read(dev_priv, reg); 5312 udelay(150); 5313 5314 for (i = 0; i < 4; i++) { 5315 reg = FDI_TX_CTL(pipe); 5316 temp = intel_de_read(dev_priv, reg); 5317 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5318 temp |= snb_b_fdi_train_param[i]; 5319 intel_de_write(dev_priv, reg, temp); 5320 5321 intel_de_posting_read(dev_priv, reg); 5322 udelay(500); 5323 5324 for (retry = 0; retry < 5; retry++) { 5325 reg = FDI_RX_IIR(pipe); 5326 temp = intel_de_read(dev_priv, reg); 5327 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5328 if (temp & FDI_RX_SYMBOL_LOCK) { 5329 intel_de_write(dev_priv, reg, 5330 temp | FDI_RX_SYMBOL_LOCK); 5331 drm_dbg_kms(&dev_priv->drm, 5332 "FDI train 2 done.\n"); 5333 break; 5334 } 5335 udelay(50); 5336 } 5337 if (retry < 5) 5338 break; 5339 } 5340 if (i == 4) 5341 drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); 5342 5343 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); 5344 } 5345 5346 /* Manual link training for Ivy Bridge A0 parts */ 5347 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 5348 const struct intel_crtc_state *crtc_state) 5349 { 5350 struct drm_device *dev = crtc->base.dev; 5351 struct drm_i915_private *dev_priv = to_i915(dev); 5352 enum pipe pipe = crtc->pipe; 5353 i915_reg_t reg; 5354 u32 temp, i, j; 5355 5356 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5357 for train result */ 5358 reg = FDI_RX_IMR(pipe); 5359 temp = intel_de_read(dev_priv, reg); 5360 temp &= ~FDI_RX_SYMBOL_LOCK; 5361 temp &= ~FDI_RX_BIT_LOCK; 5362 intel_de_write(dev_priv, reg, temp); 5363 5364 intel_de_posting_read(dev_priv, reg); 5365 udelay(150); 5366 5367 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n", 5368 intel_de_read(dev_priv, FDI_RX_IIR(pipe))); 5369 5370 /* Try each vswing and preemphasis setting twice before moving on */ 5371 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 5372 /* disable first in case we need to retry */ 5373 reg = FDI_TX_CTL(pipe); 5374 temp = intel_de_read(dev_priv, reg); 5375 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 5376 temp &= ~FDI_TX_ENABLE; 5377 intel_de_write(dev_priv, reg, temp); 5378 5379 reg = FDI_RX_CTL(pipe); 5380 temp = intel_de_read(dev_priv, reg); 5381 temp &= ~FDI_LINK_TRAIN_AUTO; 5382 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5383 temp &= ~FDI_RX_ENABLE; 5384 intel_de_write(dev_priv, reg, temp); 5385 5386 /* enable CPU FDI TX and PCH FDI RX */ 5387 reg = FDI_TX_CTL(pipe); 5388 temp = intel_de_read(dev_priv, reg); 5389 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5390 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5391 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 5392 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5393 temp |= snb_b_fdi_train_param[j/2]; 5394 temp |= FDI_COMPOSITE_SYNC; 5395 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 5396 5397 intel_de_write(dev_priv, FDI_RX_MISC(pipe), 5398 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 5399 5400 reg = FDI_RX_CTL(pipe); 5401 temp = intel_de_read(dev_priv, reg); 5402 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5403 temp |= FDI_COMPOSITE_SYNC; 5404 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 5405 5406 intel_de_posting_read(dev_priv, reg); 5407 udelay(1); /* should be 0.5us */ 5408 5409 for (i = 0; i < 4; i++) { 5410 reg = FDI_RX_IIR(pipe); 5411 temp = intel_de_read(dev_priv, reg); 5412 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5413 5414 if (temp & FDI_RX_BIT_LOCK || 5415 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) { 5416 intel_de_write(dev_priv, reg, 5417 temp | FDI_RX_BIT_LOCK); 5418 drm_dbg_kms(&dev_priv->drm, 5419 "FDI train 1 done, level %i.\n", 5420 i); 5421 break; 5422 } 5423 udelay(1); /* should be 0.5us */ 5424 } 5425 if (i == 4) { 5426 drm_dbg_kms(&dev_priv->drm, 5427 "FDI train 1 fail on vswing %d\n", j / 2); 5428 continue; 5429 } 5430 5431 /* Train 2 */ 5432 reg = FDI_TX_CTL(pipe); 5433 temp = intel_de_read(dev_priv, reg); 5434 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 5435 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 5436 intel_de_write(dev_priv, reg, temp); 5437 5438 reg = FDI_RX_CTL(pipe); 5439 temp = intel_de_read(dev_priv, reg); 5440 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5441 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 5442 intel_de_write(dev_priv, reg, temp); 5443 5444 intel_de_posting_read(dev_priv, reg); 5445 udelay(2); /* should be 1.5us */ 5446 5447 for (i = 0; i < 4; i++) { 5448 reg = FDI_RX_IIR(pipe); 5449 temp = intel_de_read(dev_priv, reg); 5450 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5451 5452 if (temp & FDI_RX_SYMBOL_LOCK || 5453 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) { 5454 intel_de_write(dev_priv, reg, 5455 temp | FDI_RX_SYMBOL_LOCK); 5456 drm_dbg_kms(&dev_priv->drm, 5457 "FDI train 2 done, level %i.\n", 5458 i); 5459 goto train_done; 5460 } 5461 udelay(2); /* should be 1.5us */ 5462 } 5463 if (i == 4) 5464 drm_dbg_kms(&dev_priv->drm, 5465 "FDI train 2 fail on vswing %d\n", j / 2); 5466 } 5467 5468 train_done: 5469 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); 5470 } 5471 5472 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 5473 { 5474 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 5475 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5476 enum pipe pipe = intel_crtc->pipe; 5477 i915_reg_t reg; 5478 u32 temp; 5479 5480 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 5481 reg = FDI_RX_CTL(pipe); 5482 temp = intel_de_read(dev_priv, reg); 5483 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 5484 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5485 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5486 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE); 5487 5488 intel_de_posting_read(dev_priv, reg); 5489 udelay(200); 5490 5491 /* Switch from Rawclk to PCDclk */ 5492 temp = intel_de_read(dev_priv, reg); 5493 intel_de_write(dev_priv, reg, temp | FDI_PCDCLK); 5494 5495 intel_de_posting_read(dev_priv, reg); 5496 udelay(200); 5497 5498 /* Enable CPU FDI TX PLL, always on for Ironlake */ 5499 reg = FDI_TX_CTL(pipe); 5500 temp = intel_de_read(dev_priv, reg); 5501 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 5502 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE); 5503 5504 intel_de_posting_read(dev_priv, reg); 5505 udelay(100); 5506 } 5507 } 5508 5509 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc) 5510 { 5511 struct drm_device *dev = intel_crtc->base.dev; 5512 struct drm_i915_private *dev_priv = to_i915(dev); 5513 enum pipe pipe = intel_crtc->pipe; 5514 i915_reg_t reg; 5515 u32 temp; 5516 5517 /* Switch from PCDclk to Rawclk */ 5518 reg = FDI_RX_CTL(pipe); 5519 temp = intel_de_read(dev_priv, reg); 5520 intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK); 5521 5522 /* Disable CPU FDI TX PLL */ 5523 reg = FDI_TX_CTL(pipe); 5524 temp = intel_de_read(dev_priv, reg); 5525 intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE); 5526 5527 intel_de_posting_read(dev_priv, reg); 5528 udelay(100); 5529 5530 reg = FDI_RX_CTL(pipe); 5531 temp = intel_de_read(dev_priv, reg); 5532 intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE); 5533 5534 /* Wait for the clocks to turn off. */ 5535 intel_de_posting_read(dev_priv, reg); 5536 udelay(100); 5537 } 5538 5539 static void ilk_fdi_disable(struct intel_crtc *crtc) 5540 { 5541 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5542 enum pipe pipe = crtc->pipe; 5543 i915_reg_t reg; 5544 u32 temp; 5545 5546 /* disable CPU FDI tx and PCH FDI rx */ 5547 reg = FDI_TX_CTL(pipe); 5548 temp = intel_de_read(dev_priv, reg); 5549 intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE); 5550 intel_de_posting_read(dev_priv, reg); 5551 5552 reg = FDI_RX_CTL(pipe); 5553 temp = intel_de_read(dev_priv, reg); 5554 temp &= ~(0x7 << 16); 5555 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5556 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE); 5557 5558 intel_de_posting_read(dev_priv, reg); 5559 udelay(100); 5560 5561 /* Ironlake workaround, disable clock pointer after downing FDI */ 5562 if (HAS_PCH_IBX(dev_priv)) 5563 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 5564 FDI_RX_PHASE_SYNC_POINTER_OVR); 5565 5566 /* still set train pattern 1 */ 5567 reg = FDI_TX_CTL(pipe); 5568 temp = intel_de_read(dev_priv, reg); 5569 temp &= ~FDI_LINK_TRAIN_NONE; 5570 temp |= FDI_LINK_TRAIN_PATTERN_1; 5571 intel_de_write(dev_priv, reg, temp); 5572 5573 reg = FDI_RX_CTL(pipe); 5574 temp = intel_de_read(dev_priv, reg); 5575 if (HAS_PCH_CPT(dev_priv)) { 5576 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5577 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5578 } else { 5579 temp &= ~FDI_LINK_TRAIN_NONE; 5580 temp |= FDI_LINK_TRAIN_PATTERN_1; 5581 } 5582 /* BPC in FDI rx is consistent with that in PIPECONF */ 5583 temp &= ~(0x07 << 16); 5584 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5585 intel_de_write(dev_priv, reg, temp); 5586 5587 intel_de_posting_read(dev_priv, reg); 5588 udelay(100); 5589 } 5590 5591 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 5592 { 5593 struct drm_crtc *crtc; 5594 bool cleanup_done; 5595 5596 drm_for_each_crtc(crtc, &dev_priv->drm) { 5597 struct drm_crtc_commit *commit; 5598 spin_lock(&crtc->commit_lock); 5599 commit = list_first_entry_or_null(&crtc->commit_list, 5600 struct drm_crtc_commit, commit_entry); 5601 cleanup_done = commit ? 5602 try_wait_for_completion(&commit->cleanup_done) : true; 5603 spin_unlock(&crtc->commit_lock); 5604 5605 if (cleanup_done) 5606 continue; 5607 5608 drm_crtc_wait_one_vblank(crtc); 5609 5610 return true; 5611 } 5612 5613 return false; 5614 } 5615 5616 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 5617 { 5618 u32 temp; 5619 5620 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); 5621 5622 mutex_lock(&dev_priv->sb_lock); 5623 5624 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5625 temp |= SBI_SSCCTL_DISABLE; 5626 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5627 5628 mutex_unlock(&dev_priv->sb_lock); 5629 } 5630 5631 /* Program iCLKIP clock to the desired frequency */ 5632 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 5633 { 5634 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5635 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5636 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 5637 u32 divsel, phaseinc, auxdiv, phasedir = 0; 5638 u32 temp; 5639 5640 lpt_disable_iclkip(dev_priv); 5641 5642 /* The iCLK virtual clock root frequency is in MHz, 5643 * but the adjusted_mode->crtc_clock in in KHz. To get the 5644 * divisors, it is necessary to divide one by another, so we 5645 * convert the virtual clock precision to KHz here for higher 5646 * precision. 5647 */ 5648 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 5649 u32 iclk_virtual_root_freq = 172800 * 1000; 5650 u32 iclk_pi_range = 64; 5651 u32 desired_divisor; 5652 5653 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5654 clock << auxdiv); 5655 divsel = (desired_divisor / iclk_pi_range) - 2; 5656 phaseinc = desired_divisor % iclk_pi_range; 5657 5658 /* 5659 * Near 20MHz is a corner case which is 5660 * out of range for the 7-bit divisor 5661 */ 5662 if (divsel <= 0x7f) 5663 break; 5664 } 5665 5666 /* This should not happen with any sane values */ 5667 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 5668 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 5669 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & 5670 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 5671 5672 drm_dbg_kms(&dev_priv->drm, 5673 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 5674 clock, auxdiv, divsel, phasedir, phaseinc); 5675 5676 mutex_lock(&dev_priv->sb_lock); 5677 5678 /* Program SSCDIVINTPHASE6 */ 5679 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5680 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 5681 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 5682 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 5683 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 5684 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 5685 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 5686 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 5687 5688 /* Program SSCAUXDIV */ 5689 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5690 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 5691 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 5692 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 5693 5694 /* Enable modulator and associated divider */ 5695 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5696 temp &= ~SBI_SSCCTL_DISABLE; 5697 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5698 5699 mutex_unlock(&dev_priv->sb_lock); 5700 5701 /* Wait for initialization time */ 5702 udelay(24); 5703 5704 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); 5705 } 5706 5707 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 5708 { 5709 u32 divsel, phaseinc, auxdiv; 5710 u32 iclk_virtual_root_freq = 172800 * 1000; 5711 u32 iclk_pi_range = 64; 5712 u32 desired_divisor; 5713 u32 temp; 5714 5715 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 5716 return 0; 5717 5718 mutex_lock(&dev_priv->sb_lock); 5719 5720 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5721 if (temp & SBI_SSCCTL_DISABLE) { 5722 mutex_unlock(&dev_priv->sb_lock); 5723 return 0; 5724 } 5725 5726 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5727 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 5728 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 5729 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 5730 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 5731 5732 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5733 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 5734 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 5735 5736 mutex_unlock(&dev_priv->sb_lock); 5737 5738 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 5739 5740 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5741 desired_divisor << auxdiv); 5742 } 5743 5744 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 5745 enum pipe pch_transcoder) 5746 { 5747 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5748 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5749 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5750 5751 intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), 5752 intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); 5753 intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), 5754 intel_de_read(dev_priv, HBLANK(cpu_transcoder))); 5755 intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), 5756 intel_de_read(dev_priv, HSYNC(cpu_transcoder))); 5757 5758 intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), 5759 intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 5760 intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), 5761 intel_de_read(dev_priv, VBLANK(cpu_transcoder))); 5762 intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), 5763 intel_de_read(dev_priv, VSYNC(cpu_transcoder))); 5764 intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), 5765 intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); 5766 } 5767 5768 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 5769 { 5770 u32 temp; 5771 5772 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1); 5773 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 5774 return; 5775 5776 drm_WARN_ON(&dev_priv->drm, 5777 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & 5778 FDI_RX_ENABLE); 5779 drm_WARN_ON(&dev_priv->drm, 5780 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & 5781 FDI_RX_ENABLE); 5782 5783 temp &= ~FDI_BC_BIFURCATION_SELECT; 5784 if (enable) 5785 temp |= FDI_BC_BIFURCATION_SELECT; 5786 5787 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n", 5788 enable ? "en" : "dis"); 5789 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp); 5790 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1); 5791 } 5792 5793 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 5794 { 5795 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5796 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5797 5798 switch (crtc->pipe) { 5799 case PIPE_A: 5800 break; 5801 case PIPE_B: 5802 if (crtc_state->fdi_lanes > 2) 5803 cpt_set_fdi_bc_bifurcation(dev_priv, false); 5804 else 5805 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5806 5807 break; 5808 case PIPE_C: 5809 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5810 5811 break; 5812 default: 5813 BUG(); 5814 } 5815 } 5816 5817 /* 5818 * Finds the encoder associated with the given CRTC. This can only be 5819 * used when we know that the CRTC isn't feeding multiple encoders! 5820 */ 5821 static struct intel_encoder * 5822 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 5823 const struct intel_crtc_state *crtc_state) 5824 { 5825 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5826 const struct drm_connector_state *connector_state; 5827 const struct drm_connector *connector; 5828 struct intel_encoder *encoder = NULL; 5829 int num_encoders = 0; 5830 int i; 5831 5832 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5833 if (connector_state->crtc != &crtc->base) 5834 continue; 5835 5836 encoder = to_intel_encoder(connector_state->best_encoder); 5837 num_encoders++; 5838 } 5839 5840 drm_WARN(encoder->base.dev, num_encoders != 1, 5841 "%d encoders for pipe %c\n", 5842 num_encoders, pipe_name(crtc->pipe)); 5843 5844 return encoder; 5845 } 5846 5847 /* 5848 * Enable PCH resources required for PCH ports: 5849 * - PCH PLLs 5850 * - FDI training & RX/TX 5851 * - update transcoder timings 5852 * - DP transcoding bits 5853 * - transcoder 5854 */ 5855 static void ilk_pch_enable(const struct intel_atomic_state *state, 5856 const struct intel_crtc_state *crtc_state) 5857 { 5858 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5859 struct drm_device *dev = crtc->base.dev; 5860 struct drm_i915_private *dev_priv = to_i915(dev); 5861 enum pipe pipe = crtc->pipe; 5862 u32 temp; 5863 5864 assert_pch_transcoder_disabled(dev_priv, pipe); 5865 5866 if (IS_IVYBRIDGE(dev_priv)) 5867 ivb_update_fdi_bc_bifurcation(crtc_state); 5868 5869 /* Write the TU size bits before fdi link training, so that error 5870 * detection works. */ 5871 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), 5872 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 5873 5874 /* For PCH output, training FDI link */ 5875 dev_priv->display.fdi_link_train(crtc, crtc_state); 5876 5877 /* We need to program the right clock selection before writing the pixel 5878 * mutliplier into the DPLL. */ 5879 if (HAS_PCH_CPT(dev_priv)) { 5880 u32 sel; 5881 5882 temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 5883 temp |= TRANS_DPLL_ENABLE(pipe); 5884 sel = TRANS_DPLLB_SEL(pipe); 5885 if (crtc_state->shared_dpll == 5886 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 5887 temp |= sel; 5888 else 5889 temp &= ~sel; 5890 intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 5891 } 5892 5893 /* XXX: pch pll's can be enabled any time before we enable the PCH 5894 * transcoder, and we actually should do this to not upset any PCH 5895 * transcoder that already use the clock when we share it. 5896 * 5897 * Note that enable_shared_dpll tries to do the right thing, but 5898 * get_shared_dpll unconditionally resets the pll - we need that to have 5899 * the right LVDS enable sequence. */ 5900 intel_enable_shared_dpll(crtc_state); 5901 5902 /* set transcoder timing, panel must allow it */ 5903 assert_panel_unlocked(dev_priv, pipe); 5904 ilk_pch_transcoder_set_timings(crtc_state, pipe); 5905 5906 intel_fdi_normal_train(crtc); 5907 5908 /* For PCH DP, enable TRANS_DP_CTL */ 5909 if (HAS_PCH_CPT(dev_priv) && 5910 intel_crtc_has_dp_encoder(crtc_state)) { 5911 const struct drm_display_mode *adjusted_mode = 5912 &crtc_state->hw.adjusted_mode; 5913 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 5914 i915_reg_t reg = TRANS_DP_CTL(pipe); 5915 enum port port; 5916 5917 temp = intel_de_read(dev_priv, reg); 5918 temp &= ~(TRANS_DP_PORT_SEL_MASK | 5919 TRANS_DP_SYNC_MASK | 5920 TRANS_DP_BPC_MASK); 5921 temp |= TRANS_DP_OUTPUT_ENABLE; 5922 temp |= bpc << 9; /* same format but at 11:9 */ 5923 5924 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 5925 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 5926 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 5927 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 5928 5929 port = intel_get_crtc_new_encoder(state, crtc_state)->port; 5930 drm_WARN_ON(dev, port < PORT_B || port > PORT_D); 5931 temp |= TRANS_DP_PORT_SEL(port); 5932 5933 intel_de_write(dev_priv, reg, temp); 5934 } 5935 5936 ilk_enable_pch_transcoder(crtc_state); 5937 } 5938 5939 void lpt_pch_enable(const struct intel_crtc_state *crtc_state) 5940 { 5941 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5942 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5943 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5944 5945 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 5946 5947 lpt_program_iclkip(crtc_state); 5948 5949 /* Set transcoder timing. */ 5950 ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); 5951 5952 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 5953 } 5954 5955 static void cpt_verify_modeset(struct drm_i915_private *dev_priv, 5956 enum pipe pipe) 5957 { 5958 i915_reg_t dslreg = PIPEDSL(pipe); 5959 u32 temp; 5960 5961 temp = intel_de_read(dev_priv, dslreg); 5962 udelay(500); 5963 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) { 5964 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) 5965 drm_err(&dev_priv->drm, 5966 "mode set failed: pipe %c stuck\n", 5967 pipe_name(pipe)); 5968 } 5969 } 5970 5971 /* 5972 * The hardware phase 0.0 refers to the center of the pixel. 5973 * We want to start from the top/left edge which is phase 5974 * -0.5. That matches how the hardware calculates the scaling 5975 * factors (from top-left of the first pixel to bottom-right 5976 * of the last pixel, as opposed to the pixel centers). 5977 * 5978 * For 4:2:0 subsampled chroma planes we obviously have to 5979 * adjust that so that the chroma sample position lands in 5980 * the right spot. 5981 * 5982 * Note that for packed YCbCr 4:2:2 formats there is no way to 5983 * control chroma siting. The hardware simply replicates the 5984 * chroma samples for both of the luma samples, and thus we don't 5985 * actually get the expected MPEG2 chroma siting convention :( 5986 * The same behaviour is observed on pre-SKL platforms as well. 5987 * 5988 * Theory behind the formula (note that we ignore sub-pixel 5989 * source coordinates): 5990 * s = source sample position 5991 * d = destination sample position 5992 * 5993 * Downscaling 4:1: 5994 * -0.5 5995 * | 0.0 5996 * | | 1.5 (initial phase) 5997 * | | | 5998 * v v v 5999 * | s | s | s | s | 6000 * | d | 6001 * 6002 * Upscaling 1:4: 6003 * -0.5 6004 * | -0.375 (initial phase) 6005 * | | 0.0 6006 * | | | 6007 * v v v 6008 * | s | 6009 * | d | d | d | d | 6010 */ 6011 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 6012 { 6013 int phase = -0x8000; 6014 u16 trip = 0; 6015 6016 if (chroma_cosited) 6017 phase += (sub - 1) * 0x8000 / sub; 6018 6019 phase += scale / (2 * sub); 6020 6021 /* 6022 * Hardware initial phase limited to [-0.5:1.5]. 6023 * Since the max hardware scale factor is 3.0, we 6024 * should never actually excdeed 1.0 here. 6025 */ 6026 WARN_ON(phase < -0x8000 || phase > 0x18000); 6027 6028 if (phase < 0) 6029 phase = 0x10000 + phase; 6030 else 6031 trip = PS_PHASE_TRIP; 6032 6033 return ((phase >> 2) & PS_PHASE_MASK) | trip; 6034 } 6035 6036 #define SKL_MIN_SRC_W 8 6037 #define SKL_MAX_SRC_W 4096 6038 #define SKL_MIN_SRC_H 8 6039 #define SKL_MAX_SRC_H 4096 6040 #define SKL_MIN_DST_W 8 6041 #define SKL_MAX_DST_W 4096 6042 #define SKL_MIN_DST_H 8 6043 #define SKL_MAX_DST_H 4096 6044 #define ICL_MAX_SRC_W 5120 6045 #define ICL_MAX_SRC_H 4096 6046 #define ICL_MAX_DST_W 5120 6047 #define ICL_MAX_DST_H 4096 6048 #define SKL_MIN_YUV_420_SRC_W 16 6049 #define SKL_MIN_YUV_420_SRC_H 16 6050 6051 static int 6052 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 6053 unsigned int scaler_user, int *scaler_id, 6054 int src_w, int src_h, int dst_w, int dst_h, 6055 const struct drm_format_info *format, 6056 u64 modifier, bool need_scaler) 6057 { 6058 struct intel_crtc_scaler_state *scaler_state = 6059 &crtc_state->scaler_state; 6060 struct intel_crtc *intel_crtc = 6061 to_intel_crtc(crtc_state->uapi.crtc); 6062 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 6063 const struct drm_display_mode *adjusted_mode = 6064 &crtc_state->hw.adjusted_mode; 6065 6066 /* 6067 * Src coordinates are already rotated by 270 degrees for 6068 * the 90/270 degree plane rotation cases (to match the 6069 * GTT mapping), hence no need to account for rotation here. 6070 */ 6071 if (src_w != dst_w || src_h != dst_h) 6072 need_scaler = true; 6073 6074 /* 6075 * Scaling/fitting not supported in IF-ID mode in GEN9+ 6076 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 6077 * Once NV12 is enabled, handle it here while allocating scaler 6078 * for NV12. 6079 */ 6080 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable && 6081 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 6082 drm_dbg_kms(&dev_priv->drm, 6083 "Pipe/Plane scaling not supported with IF-ID mode\n"); 6084 return -EINVAL; 6085 } 6086 6087 /* 6088 * if plane is being disabled or scaler is no more required or force detach 6089 * - free scaler binded to this plane/crtc 6090 * - in order to do this, update crtc->scaler_usage 6091 * 6092 * Here scaler state in crtc_state is set free so that 6093 * scaler can be assigned to other user. Actual register 6094 * update to free the scaler is done in plane/panel-fit programming. 6095 * For this purpose crtc/plane_state->scaler_id isn't reset here. 6096 */ 6097 if (force_detach || !need_scaler) { 6098 if (*scaler_id >= 0) { 6099 scaler_state->scaler_users &= ~(1 << scaler_user); 6100 scaler_state->scalers[*scaler_id].in_use = 0; 6101 6102 drm_dbg_kms(&dev_priv->drm, 6103 "scaler_user index %u.%u: " 6104 "Staged freeing scaler id %d scaler_users = 0x%x\n", 6105 intel_crtc->pipe, scaler_user, *scaler_id, 6106 scaler_state->scaler_users); 6107 *scaler_id = -1; 6108 } 6109 return 0; 6110 } 6111 6112 if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && 6113 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 6114 drm_dbg_kms(&dev_priv->drm, 6115 "Planar YUV: src dimensions not met\n"); 6116 return -EINVAL; 6117 } 6118 6119 /* range checks */ 6120 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 6121 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 6122 (INTEL_GEN(dev_priv) >= 11 && 6123 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 6124 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 6125 (INTEL_GEN(dev_priv) < 11 && 6126 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 6127 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 6128 drm_dbg_kms(&dev_priv->drm, 6129 "scaler_user index %u.%u: src %ux%u dst %ux%u " 6130 "size is out of scaler range\n", 6131 intel_crtc->pipe, scaler_user, src_w, src_h, 6132 dst_w, dst_h); 6133 return -EINVAL; 6134 } 6135 6136 /* mark this plane as a scaler user in crtc_state */ 6137 scaler_state->scaler_users |= (1 << scaler_user); 6138 drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " 6139 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 6140 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 6141 scaler_state->scaler_users); 6142 6143 return 0; 6144 } 6145 6146 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) 6147 { 6148 const struct drm_display_mode *adjusted_mode = 6149 &crtc_state->hw.adjusted_mode; 6150 int width, height; 6151 6152 if (crtc_state->pch_pfit.enabled) { 6153 width = drm_rect_width(&crtc_state->pch_pfit.dst); 6154 height = drm_rect_height(&crtc_state->pch_pfit.dst); 6155 } else { 6156 width = adjusted_mode->crtc_hdisplay; 6157 height = adjusted_mode->crtc_vdisplay; 6158 } 6159 6160 return skl_update_scaler(crtc_state, !crtc_state->hw.active, 6161 SKL_CRTC_INDEX, 6162 &crtc_state->scaler_state.scaler_id, 6163 crtc_state->pipe_src_w, crtc_state->pipe_src_h, 6164 width, height, NULL, 0, 6165 crtc_state->pch_pfit.enabled); 6166 } 6167 6168 /** 6169 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 6170 * @crtc_state: crtc's scaler state 6171 * @plane_state: atomic plane state to update 6172 * 6173 * Return 6174 * 0 - scaler_usage updated successfully 6175 * error - requested scaling cannot be supported or other error condition 6176 */ 6177 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 6178 struct intel_plane_state *plane_state) 6179 { 6180 struct intel_plane *intel_plane = 6181 to_intel_plane(plane_state->uapi.plane); 6182 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 6183 struct drm_framebuffer *fb = plane_state->hw.fb; 6184 int ret; 6185 bool force_detach = !fb || !plane_state->uapi.visible; 6186 bool need_scaler = false; 6187 6188 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 6189 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && 6190 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) 6191 need_scaler = true; 6192 6193 ret = skl_update_scaler(crtc_state, force_detach, 6194 drm_plane_index(&intel_plane->base), 6195 &plane_state->scaler_id, 6196 drm_rect_width(&plane_state->uapi.src) >> 16, 6197 drm_rect_height(&plane_state->uapi.src) >> 16, 6198 drm_rect_width(&plane_state->uapi.dst), 6199 drm_rect_height(&plane_state->uapi.dst), 6200 fb ? fb->format : NULL, 6201 fb ? fb->modifier : 0, 6202 need_scaler); 6203 6204 if (ret || plane_state->scaler_id < 0) 6205 return ret; 6206 6207 /* check colorkey */ 6208 if (plane_state->ckey.flags) { 6209 drm_dbg_kms(&dev_priv->drm, 6210 "[PLANE:%d:%s] scaling with color key not allowed", 6211 intel_plane->base.base.id, 6212 intel_plane->base.name); 6213 return -EINVAL; 6214 } 6215 6216 /* Check src format */ 6217 switch (fb->format->format) { 6218 case DRM_FORMAT_RGB565: 6219 case DRM_FORMAT_XBGR8888: 6220 case DRM_FORMAT_XRGB8888: 6221 case DRM_FORMAT_ABGR8888: 6222 case DRM_FORMAT_ARGB8888: 6223 case DRM_FORMAT_XRGB2101010: 6224 case DRM_FORMAT_XBGR2101010: 6225 case DRM_FORMAT_ARGB2101010: 6226 case DRM_FORMAT_ABGR2101010: 6227 case DRM_FORMAT_YUYV: 6228 case DRM_FORMAT_YVYU: 6229 case DRM_FORMAT_UYVY: 6230 case DRM_FORMAT_VYUY: 6231 case DRM_FORMAT_NV12: 6232 case DRM_FORMAT_XYUV8888: 6233 case DRM_FORMAT_P010: 6234 case DRM_FORMAT_P012: 6235 case DRM_FORMAT_P016: 6236 case DRM_FORMAT_Y210: 6237 case DRM_FORMAT_Y212: 6238 case DRM_FORMAT_Y216: 6239 case DRM_FORMAT_XVYU2101010: 6240 case DRM_FORMAT_XVYU12_16161616: 6241 case DRM_FORMAT_XVYU16161616: 6242 break; 6243 case DRM_FORMAT_XBGR16161616F: 6244 case DRM_FORMAT_ABGR16161616F: 6245 case DRM_FORMAT_XRGB16161616F: 6246 case DRM_FORMAT_ARGB16161616F: 6247 if (INTEL_GEN(dev_priv) >= 11) 6248 break; 6249 fallthrough; 6250 default: 6251 drm_dbg_kms(&dev_priv->drm, 6252 "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 6253 intel_plane->base.base.id, intel_plane->base.name, 6254 fb->base.id, fb->format->format); 6255 return -EINVAL; 6256 } 6257 6258 return 0; 6259 } 6260 6261 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) 6262 { 6263 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 6264 int i; 6265 6266 for (i = 0; i < crtc->num_scalers; i++) 6267 skl_detach_scaler(crtc, i); 6268 } 6269 6270 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) 6271 { 6272 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6273 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6274 const struct intel_crtc_scaler_state *scaler_state = 6275 &crtc_state->scaler_state; 6276 struct drm_rect src = { 6277 .x2 = crtc_state->pipe_src_w << 16, 6278 .y2 = crtc_state->pipe_src_h << 16, 6279 }; 6280 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 6281 u16 uv_rgb_hphase, uv_rgb_vphase; 6282 enum pipe pipe = crtc->pipe; 6283 int width = drm_rect_width(dst); 6284 int height = drm_rect_height(dst); 6285 int x = dst->x1; 6286 int y = dst->y1; 6287 int hscale, vscale; 6288 unsigned long irqflags; 6289 int id; 6290 6291 if (!crtc_state->pch_pfit.enabled) 6292 return; 6293 6294 if (drm_WARN_ON(&dev_priv->drm, 6295 crtc_state->scaler_state.scaler_id < 0)) 6296 return; 6297 6298 hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX); 6299 vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX); 6300 6301 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 6302 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 6303 6304 id = scaler_state->scaler_id; 6305 6306 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 6307 6308 intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 6309 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 6310 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), 6311 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 6312 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), 6313 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 6314 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), 6315 x << 16 | y); 6316 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), 6317 width << 16 | height); 6318 6319 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 6320 } 6321 6322 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 6323 { 6324 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6325 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6326 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 6327 enum pipe pipe = crtc->pipe; 6328 int width = drm_rect_width(dst); 6329 int height = drm_rect_height(dst); 6330 int x = dst->x1; 6331 int y = dst->y1; 6332 6333 if (!crtc_state->pch_pfit.enabled) 6334 return; 6335 6336 /* Force use of hard-coded filter coefficients 6337 * as some pre-programmed values are broken, 6338 * e.g. x201. 6339 */ 6340 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 6341 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | 6342 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 6343 else 6344 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | 6345 PF_FILTER_MED_3x3); 6346 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y); 6347 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height); 6348 } 6349 6350 void hsw_enable_ips(const struct intel_crtc_state *crtc_state) 6351 { 6352 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6353 struct drm_device *dev = crtc->base.dev; 6354 struct drm_i915_private *dev_priv = to_i915(dev); 6355 6356 if (!crtc_state->ips_enabled) 6357 return; 6358 6359 /* 6360 * We can only enable IPS after we enable a plane and wait for a vblank 6361 * This function is called from post_plane_update, which is run after 6362 * a vblank wait. 6363 */ 6364 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 6365 6366 if (IS_BROADWELL(dev_priv)) { 6367 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 6368 IPS_ENABLE | IPS_PCODE_CONTROL)); 6369 /* Quoting Art Runyan: "its not safe to expect any particular 6370 * value in IPS_CTL bit 31 after enabling IPS through the 6371 * mailbox." Moreover, the mailbox may return a bogus state, 6372 * so we need to just enable it and continue on. 6373 */ 6374 } else { 6375 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE); 6376 /* The bit only becomes 1 in the next vblank, so this wait here 6377 * is essentially intel_wait_for_vblank. If we don't have this 6378 * and don't wait for vblanks until the end of crtc_enable, then 6379 * the HW state readout code will complain that the expected 6380 * IPS_CTL value is not the one we read. */ 6381 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) 6382 drm_err(&dev_priv->drm, 6383 "Timed out waiting for IPS enable\n"); 6384 } 6385 } 6386 6387 void hsw_disable_ips(const struct intel_crtc_state *crtc_state) 6388 { 6389 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6390 struct drm_device *dev = crtc->base.dev; 6391 struct drm_i915_private *dev_priv = to_i915(dev); 6392 6393 if (!crtc_state->ips_enabled) 6394 return; 6395 6396 if (IS_BROADWELL(dev_priv)) { 6397 drm_WARN_ON(dev, 6398 sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 6399 /* 6400 * Wait for PCODE to finish disabling IPS. The BSpec specified 6401 * 42ms timeout value leads to occasional timeouts so use 100ms 6402 * instead. 6403 */ 6404 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) 6405 drm_err(&dev_priv->drm, 6406 "Timed out waiting for IPS disable\n"); 6407 } else { 6408 intel_de_write(dev_priv, IPS_CTL, 0); 6409 intel_de_posting_read(dev_priv, IPS_CTL); 6410 } 6411 6412 /* We need to wait for a vblank before we can disable the plane. */ 6413 intel_wait_for_vblank(dev_priv, crtc->pipe); 6414 } 6415 6416 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 6417 { 6418 if (intel_crtc->overlay) 6419 (void) intel_overlay_switch_off(intel_crtc->overlay); 6420 6421 /* Let userspace switch the overlay on again. In most cases userspace 6422 * has to recompute where to put it anyway. 6423 */ 6424 } 6425 6426 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, 6427 const struct intel_crtc_state *new_crtc_state) 6428 { 6429 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6430 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6431 6432 if (!old_crtc_state->ips_enabled) 6433 return false; 6434 6435 if (needs_modeset(new_crtc_state)) 6436 return true; 6437 6438 /* 6439 * Workaround : Do not read or write the pipe palette/gamma data while 6440 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 6441 * 6442 * Disable IPS before we program the LUT. 6443 */ 6444 if (IS_HASWELL(dev_priv) && 6445 (new_crtc_state->uapi.color_mgmt_changed || 6446 new_crtc_state->update_pipe) && 6447 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 6448 return true; 6449 6450 return !new_crtc_state->ips_enabled; 6451 } 6452 6453 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, 6454 const struct intel_crtc_state *new_crtc_state) 6455 { 6456 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6457 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6458 6459 if (!new_crtc_state->ips_enabled) 6460 return false; 6461 6462 if (needs_modeset(new_crtc_state)) 6463 return true; 6464 6465 /* 6466 * Workaround : Do not read or write the pipe palette/gamma data while 6467 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 6468 * 6469 * Re-enable IPS after the LUT has been programmed. 6470 */ 6471 if (IS_HASWELL(dev_priv) && 6472 (new_crtc_state->uapi.color_mgmt_changed || 6473 new_crtc_state->update_pipe) && 6474 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 6475 return true; 6476 6477 /* 6478 * We can't read out IPS on broadwell, assume the worst and 6479 * forcibly enable IPS on the first fastset. 6480 */ 6481 if (new_crtc_state->update_pipe && old_crtc_state->inherited) 6482 return true; 6483 6484 return !old_crtc_state->ips_enabled; 6485 } 6486 6487 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 6488 { 6489 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6490 6491 if (!crtc_state->nv12_planes) 6492 return false; 6493 6494 /* WA Display #0827: Gen9:all */ 6495 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) 6496 return true; 6497 6498 return false; 6499 } 6500 6501 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 6502 { 6503 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6504 6505 /* Wa_2006604312:icl,ehl */ 6506 if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11)) 6507 return true; 6508 6509 return false; 6510 } 6511 6512 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 6513 const struct intel_crtc_state *new_crtc_state) 6514 { 6515 return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) && 6516 new_crtc_state->active_planes; 6517 } 6518 6519 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 6520 const struct intel_crtc_state *new_crtc_state) 6521 { 6522 return old_crtc_state->active_planes && 6523 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state)); 6524 } 6525 6526 static void intel_post_plane_update(struct intel_atomic_state *state, 6527 struct intel_crtc *crtc) 6528 { 6529 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6530 const struct intel_crtc_state *old_crtc_state = 6531 intel_atomic_get_old_crtc_state(state, crtc); 6532 const struct intel_crtc_state *new_crtc_state = 6533 intel_atomic_get_new_crtc_state(state, crtc); 6534 enum pipe pipe = crtc->pipe; 6535 6536 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 6537 6538 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 6539 intel_update_watermarks(crtc); 6540 6541 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state)) 6542 hsw_enable_ips(new_crtc_state); 6543 6544 intel_fbc_post_update(state, crtc); 6545 6546 if (needs_nv12_wa(old_crtc_state) && 6547 !needs_nv12_wa(new_crtc_state)) 6548 skl_wa_827(dev_priv, pipe, false); 6549 6550 if (needs_scalerclk_wa(old_crtc_state) && 6551 !needs_scalerclk_wa(new_crtc_state)) 6552 icl_wa_scalerclkgating(dev_priv, pipe, false); 6553 } 6554 6555 static void intel_pre_plane_update(struct intel_atomic_state *state, 6556 struct intel_crtc *crtc) 6557 { 6558 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6559 const struct intel_crtc_state *old_crtc_state = 6560 intel_atomic_get_old_crtc_state(state, crtc); 6561 const struct intel_crtc_state *new_crtc_state = 6562 intel_atomic_get_new_crtc_state(state, crtc); 6563 enum pipe pipe = crtc->pipe; 6564 6565 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) 6566 hsw_disable_ips(old_crtc_state); 6567 6568 if (intel_fbc_pre_update(state, crtc)) 6569 intel_wait_for_vblank(dev_priv, pipe); 6570 6571 /* Display WA 827 */ 6572 if (!needs_nv12_wa(old_crtc_state) && 6573 needs_nv12_wa(new_crtc_state)) 6574 skl_wa_827(dev_priv, pipe, true); 6575 6576 /* Wa_2006604312:icl,ehl */ 6577 if (!needs_scalerclk_wa(old_crtc_state) && 6578 needs_scalerclk_wa(new_crtc_state)) 6579 icl_wa_scalerclkgating(dev_priv, pipe, true); 6580 6581 /* 6582 * Vblank time updates from the shadow to live plane control register 6583 * are blocked if the memory self-refresh mode is active at that 6584 * moment. So to make sure the plane gets truly disabled, disable 6585 * first the self-refresh mode. The self-refresh enable bit in turn 6586 * will be checked/applied by the HW only at the next frame start 6587 * event which is after the vblank start event, so we need to have a 6588 * wait-for-vblank between disabling the plane and the pipe. 6589 */ 6590 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 6591 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 6592 intel_wait_for_vblank(dev_priv, pipe); 6593 6594 /* 6595 * IVB workaround: must disable low power watermarks for at least 6596 * one frame before enabling scaling. LP watermarks can be re-enabled 6597 * when scaling is disabled. 6598 * 6599 * WaCxSRDisabledForSpriteScaling:ivb 6600 */ 6601 if (old_crtc_state->hw.active && 6602 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 6603 intel_wait_for_vblank(dev_priv, pipe); 6604 6605 /* 6606 * If we're doing a modeset we don't need to do any 6607 * pre-vblank watermark programming here. 6608 */ 6609 if (!needs_modeset(new_crtc_state)) { 6610 /* 6611 * For platforms that support atomic watermarks, program the 6612 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 6613 * will be the intermediate values that are safe for both pre- and 6614 * post- vblank; when vblank happens, the 'active' values will be set 6615 * to the final 'target' values and we'll do this again to get the 6616 * optimal watermarks. For gen9+ platforms, the values we program here 6617 * will be the final target values which will get automatically latched 6618 * at vblank time; no further programming will be necessary. 6619 * 6620 * If a platform hasn't been transitioned to atomic watermarks yet, 6621 * we'll continue to update watermarks the old way, if flags tell 6622 * us to. 6623 */ 6624 if (dev_priv->display.initial_watermarks) 6625 dev_priv->display.initial_watermarks(state, crtc); 6626 else if (new_crtc_state->update_wm_pre) 6627 intel_update_watermarks(crtc); 6628 } 6629 6630 /* 6631 * Gen2 reports pipe underruns whenever all planes are disabled. 6632 * So disable underrun reporting before all the planes get disabled. 6633 * 6634 * We do this after .initial_watermarks() so that we have a 6635 * chance of catching underruns with the intermediate watermarks 6636 * vs. the old plane configuration. 6637 */ 6638 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state)) 6639 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6640 } 6641 6642 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 6643 struct intel_crtc *crtc) 6644 { 6645 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6646 const struct intel_crtc_state *new_crtc_state = 6647 intel_atomic_get_new_crtc_state(state, crtc); 6648 unsigned int update_mask = new_crtc_state->update_planes; 6649 const struct intel_plane_state *old_plane_state; 6650 struct intel_plane *plane; 6651 unsigned fb_bits = 0; 6652 int i; 6653 6654 intel_crtc_dpms_overlay_disable(crtc); 6655 6656 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 6657 if (crtc->pipe != plane->pipe || 6658 !(update_mask & BIT(plane->id))) 6659 continue; 6660 6661 intel_disable_plane(plane, new_crtc_state); 6662 6663 if (old_plane_state->uapi.visible) 6664 fb_bits |= plane->frontbuffer_bit; 6665 } 6666 6667 intel_frontbuffer_flip(dev_priv, fb_bits); 6668 } 6669 6670 /* 6671 * intel_connector_primary_encoder - get the primary encoder for a connector 6672 * @connector: connector for which to return the encoder 6673 * 6674 * Returns the primary encoder for a connector. There is a 1:1 mapping from 6675 * all connectors to their encoder, except for DP-MST connectors which have 6676 * both a virtual and a primary encoder. These DP-MST primary encoders can be 6677 * pointed to by as many DP-MST connectors as there are pipes. 6678 */ 6679 static struct intel_encoder * 6680 intel_connector_primary_encoder(struct intel_connector *connector) 6681 { 6682 struct intel_encoder *encoder; 6683 6684 if (connector->mst_port) 6685 return &dp_to_dig_port(connector->mst_port)->base; 6686 6687 encoder = intel_attached_encoder(connector); 6688 drm_WARN_ON(connector->base.dev, !encoder); 6689 6690 return encoder; 6691 } 6692 6693 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 6694 { 6695 struct drm_connector_state *new_conn_state; 6696 struct drm_connector *connector; 6697 int i; 6698 6699 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 6700 i) { 6701 struct intel_connector *intel_connector; 6702 struct intel_encoder *encoder; 6703 struct intel_crtc *crtc; 6704 6705 if (!intel_connector_needs_modeset(state, connector)) 6706 continue; 6707 6708 intel_connector = to_intel_connector(connector); 6709 encoder = intel_connector_primary_encoder(intel_connector); 6710 if (!encoder->update_prepare) 6711 continue; 6712 6713 crtc = new_conn_state->crtc ? 6714 to_intel_crtc(new_conn_state->crtc) : NULL; 6715 encoder->update_prepare(state, encoder, crtc); 6716 } 6717 } 6718 6719 static void intel_encoders_update_complete(struct intel_atomic_state *state) 6720 { 6721 struct drm_connector_state *new_conn_state; 6722 struct drm_connector *connector; 6723 int i; 6724 6725 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 6726 i) { 6727 struct intel_connector *intel_connector; 6728 struct intel_encoder *encoder; 6729 struct intel_crtc *crtc; 6730 6731 if (!intel_connector_needs_modeset(state, connector)) 6732 continue; 6733 6734 intel_connector = to_intel_connector(connector); 6735 encoder = intel_connector_primary_encoder(intel_connector); 6736 if (!encoder->update_complete) 6737 continue; 6738 6739 crtc = new_conn_state->crtc ? 6740 to_intel_crtc(new_conn_state->crtc) : NULL; 6741 encoder->update_complete(state, encoder, crtc); 6742 } 6743 } 6744 6745 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 6746 struct intel_crtc *crtc) 6747 { 6748 const struct intel_crtc_state *crtc_state = 6749 intel_atomic_get_new_crtc_state(state, crtc); 6750 const struct drm_connector_state *conn_state; 6751 struct drm_connector *conn; 6752 int i; 6753 6754 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6755 struct intel_encoder *encoder = 6756 to_intel_encoder(conn_state->best_encoder); 6757 6758 if (conn_state->crtc != &crtc->base) 6759 continue; 6760 6761 if (encoder->pre_pll_enable) 6762 encoder->pre_pll_enable(state, encoder, 6763 crtc_state, conn_state); 6764 } 6765 } 6766 6767 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 6768 struct intel_crtc *crtc) 6769 { 6770 const struct intel_crtc_state *crtc_state = 6771 intel_atomic_get_new_crtc_state(state, crtc); 6772 const struct drm_connector_state *conn_state; 6773 struct drm_connector *conn; 6774 int i; 6775 6776 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6777 struct intel_encoder *encoder = 6778 to_intel_encoder(conn_state->best_encoder); 6779 6780 if (conn_state->crtc != &crtc->base) 6781 continue; 6782 6783 if (encoder->pre_enable) 6784 encoder->pre_enable(state, encoder, 6785 crtc_state, conn_state); 6786 } 6787 } 6788 6789 static void intel_encoders_enable(struct intel_atomic_state *state, 6790 struct intel_crtc *crtc) 6791 { 6792 const struct intel_crtc_state *crtc_state = 6793 intel_atomic_get_new_crtc_state(state, crtc); 6794 const struct drm_connector_state *conn_state; 6795 struct drm_connector *conn; 6796 int i; 6797 6798 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6799 struct intel_encoder *encoder = 6800 to_intel_encoder(conn_state->best_encoder); 6801 6802 if (conn_state->crtc != &crtc->base) 6803 continue; 6804 6805 if (encoder->enable) 6806 encoder->enable(state, encoder, 6807 crtc_state, conn_state); 6808 intel_opregion_notify_encoder(encoder, true); 6809 } 6810 } 6811 6812 static void intel_encoders_disable(struct intel_atomic_state *state, 6813 struct intel_crtc *crtc) 6814 { 6815 const struct intel_crtc_state *old_crtc_state = 6816 intel_atomic_get_old_crtc_state(state, crtc); 6817 const struct drm_connector_state *old_conn_state; 6818 struct drm_connector *conn; 6819 int i; 6820 6821 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6822 struct intel_encoder *encoder = 6823 to_intel_encoder(old_conn_state->best_encoder); 6824 6825 if (old_conn_state->crtc != &crtc->base) 6826 continue; 6827 6828 intel_opregion_notify_encoder(encoder, false); 6829 if (encoder->disable) 6830 encoder->disable(state, encoder, 6831 old_crtc_state, old_conn_state); 6832 } 6833 } 6834 6835 static void intel_encoders_post_disable(struct intel_atomic_state *state, 6836 struct intel_crtc *crtc) 6837 { 6838 const struct intel_crtc_state *old_crtc_state = 6839 intel_atomic_get_old_crtc_state(state, crtc); 6840 const struct drm_connector_state *old_conn_state; 6841 struct drm_connector *conn; 6842 int i; 6843 6844 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6845 struct intel_encoder *encoder = 6846 to_intel_encoder(old_conn_state->best_encoder); 6847 6848 if (old_conn_state->crtc != &crtc->base) 6849 continue; 6850 6851 if (encoder->post_disable) 6852 encoder->post_disable(state, encoder, 6853 old_crtc_state, old_conn_state); 6854 } 6855 } 6856 6857 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 6858 struct intel_crtc *crtc) 6859 { 6860 const struct intel_crtc_state *old_crtc_state = 6861 intel_atomic_get_old_crtc_state(state, crtc); 6862 const struct drm_connector_state *old_conn_state; 6863 struct drm_connector *conn; 6864 int i; 6865 6866 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6867 struct intel_encoder *encoder = 6868 to_intel_encoder(old_conn_state->best_encoder); 6869 6870 if (old_conn_state->crtc != &crtc->base) 6871 continue; 6872 6873 if (encoder->post_pll_disable) 6874 encoder->post_pll_disable(state, encoder, 6875 old_crtc_state, old_conn_state); 6876 } 6877 } 6878 6879 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 6880 struct intel_crtc *crtc) 6881 { 6882 const struct intel_crtc_state *crtc_state = 6883 intel_atomic_get_new_crtc_state(state, crtc); 6884 const struct drm_connector_state *conn_state; 6885 struct drm_connector *conn; 6886 int i; 6887 6888 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6889 struct intel_encoder *encoder = 6890 to_intel_encoder(conn_state->best_encoder); 6891 6892 if (conn_state->crtc != &crtc->base) 6893 continue; 6894 6895 if (encoder->update_pipe) 6896 encoder->update_pipe(state, encoder, 6897 crtc_state, conn_state); 6898 } 6899 } 6900 6901 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 6902 { 6903 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6904 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 6905 6906 plane->disable_plane(plane, crtc_state); 6907 } 6908 6909 static void ilk_crtc_enable(struct intel_atomic_state *state, 6910 struct intel_crtc *crtc) 6911 { 6912 const struct intel_crtc_state *new_crtc_state = 6913 intel_atomic_get_new_crtc_state(state, crtc); 6914 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6915 enum pipe pipe = crtc->pipe; 6916 6917 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 6918 return; 6919 6920 /* 6921 * Sometimes spurious CPU pipe underruns happen during FDI 6922 * training, at least with VGA+HDMI cloning. Suppress them. 6923 * 6924 * On ILK we get an occasional spurious CPU pipe underruns 6925 * between eDP port A enable and vdd enable. Also PCH port 6926 * enable seems to result in the occasional CPU pipe underrun. 6927 * 6928 * Spurious PCH underruns also occur during PCH enabling. 6929 */ 6930 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6931 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6932 6933 if (new_crtc_state->has_pch_encoder) 6934 intel_prepare_shared_dpll(new_crtc_state); 6935 6936 if (intel_crtc_has_dp_encoder(new_crtc_state)) 6937 intel_dp_set_m_n(new_crtc_state, M1_N1); 6938 6939 intel_set_pipe_timings(new_crtc_state); 6940 intel_set_pipe_src_size(new_crtc_state); 6941 6942 if (new_crtc_state->has_pch_encoder) 6943 intel_cpu_transcoder_set_m_n(new_crtc_state, 6944 &new_crtc_state->fdi_m_n, NULL); 6945 6946 ilk_set_pipeconf(new_crtc_state); 6947 6948 crtc->active = true; 6949 6950 intel_encoders_pre_enable(state, crtc); 6951 6952 if (new_crtc_state->has_pch_encoder) { 6953 /* Note: FDI PLL enabling _must_ be done before we enable the 6954 * cpu pipes, hence this is separate from all the other fdi/pch 6955 * enabling. */ 6956 ilk_fdi_pll_enable(new_crtc_state); 6957 } else { 6958 assert_fdi_tx_disabled(dev_priv, pipe); 6959 assert_fdi_rx_disabled(dev_priv, pipe); 6960 } 6961 6962 ilk_pfit_enable(new_crtc_state); 6963 6964 /* 6965 * On ILK+ LUT must be loaded before the pipe is running but with 6966 * clocks enabled 6967 */ 6968 intel_color_load_luts(new_crtc_state); 6969 intel_color_commit(new_crtc_state); 6970 /* update DSPCNTR to configure gamma for pipe bottom color */ 6971 intel_disable_primary_plane(new_crtc_state); 6972 6973 if (dev_priv->display.initial_watermarks) 6974 dev_priv->display.initial_watermarks(state, crtc); 6975 intel_enable_pipe(new_crtc_state); 6976 6977 if (new_crtc_state->has_pch_encoder) 6978 ilk_pch_enable(state, new_crtc_state); 6979 6980 intel_crtc_vblank_on(new_crtc_state); 6981 6982 intel_encoders_enable(state, crtc); 6983 6984 if (HAS_PCH_CPT(dev_priv)) 6985 cpt_verify_modeset(dev_priv, pipe); 6986 6987 /* 6988 * Must wait for vblank to avoid spurious PCH FIFO underruns. 6989 * And a second vblank wait is needed at least on ILK with 6990 * some interlaced HDMI modes. Let's do the double wait always 6991 * in case there are more corner cases we don't know about. 6992 */ 6993 if (new_crtc_state->has_pch_encoder) { 6994 intel_wait_for_vblank(dev_priv, pipe); 6995 intel_wait_for_vblank(dev_priv, pipe); 6996 } 6997 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6998 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6999 } 7000 7001 /* IPS only exists on ULT machines and is tied to pipe A. */ 7002 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 7003 { 7004 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 7005 } 7006 7007 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 7008 enum pipe pipe, bool apply) 7009 { 7010 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); 7011 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 7012 7013 if (apply) 7014 val |= mask; 7015 else 7016 val &= ~mask; 7017 7018 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); 7019 } 7020 7021 static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 7022 { 7023 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7024 enum pipe pipe = crtc->pipe; 7025 u32 val; 7026 7027 val = MBUS_DBOX_A_CREDIT(2); 7028 7029 if (INTEL_GEN(dev_priv) >= 12) { 7030 val |= MBUS_DBOX_BW_CREDIT(2); 7031 val |= MBUS_DBOX_B_CREDIT(12); 7032 } else { 7033 val |= MBUS_DBOX_BW_CREDIT(1); 7034 val |= MBUS_DBOX_B_CREDIT(8); 7035 } 7036 7037 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val); 7038 } 7039 7040 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 7041 { 7042 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7043 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7044 7045 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 7046 HSW_LINETIME(crtc_state->linetime) | 7047 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 7048 } 7049 7050 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 7051 { 7052 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7053 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7054 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); 7055 u32 val; 7056 7057 val = intel_de_read(dev_priv, reg); 7058 val &= ~HSW_FRAME_START_DELAY_MASK; 7059 val |= HSW_FRAME_START_DELAY(0); 7060 intel_de_write(dev_priv, reg, val); 7061 } 7062 7063 static void hsw_crtc_enable(struct intel_atomic_state *state, 7064 struct intel_crtc *crtc) 7065 { 7066 const struct intel_crtc_state *new_crtc_state = 7067 intel_atomic_get_new_crtc_state(state, crtc); 7068 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7069 enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 7070 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 7071 bool psl_clkgate_wa; 7072 7073 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 7074 return; 7075 7076 intel_encoders_pre_pll_enable(state, crtc); 7077 7078 if (new_crtc_state->shared_dpll) 7079 intel_enable_shared_dpll(new_crtc_state); 7080 7081 intel_encoders_pre_enable(state, crtc); 7082 7083 if (!transcoder_is_dsi(cpu_transcoder)) 7084 intel_set_pipe_timings(new_crtc_state); 7085 7086 intel_set_pipe_src_size(new_crtc_state); 7087 7088 if (cpu_transcoder != TRANSCODER_EDP && 7089 !transcoder_is_dsi(cpu_transcoder)) 7090 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), 7091 new_crtc_state->pixel_multiplier - 1); 7092 7093 if (new_crtc_state->has_pch_encoder) 7094 intel_cpu_transcoder_set_m_n(new_crtc_state, 7095 &new_crtc_state->fdi_m_n, NULL); 7096 7097 if (!transcoder_is_dsi(cpu_transcoder)) { 7098 hsw_set_frame_start_delay(new_crtc_state); 7099 hsw_set_pipeconf(new_crtc_state); 7100 } 7101 7102 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 7103 bdw_set_pipemisc(new_crtc_state); 7104 7105 crtc->active = true; 7106 7107 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 7108 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 7109 new_crtc_state->pch_pfit.enabled; 7110 if (psl_clkgate_wa) 7111 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 7112 7113 if (INTEL_GEN(dev_priv) >= 9) 7114 skl_pfit_enable(new_crtc_state); 7115 else 7116 ilk_pfit_enable(new_crtc_state); 7117 7118 /* 7119 * On ILK+ LUT must be loaded before the pipe is running but with 7120 * clocks enabled 7121 */ 7122 intel_color_load_luts(new_crtc_state); 7123 intel_color_commit(new_crtc_state); 7124 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 7125 if (INTEL_GEN(dev_priv) < 9) 7126 intel_disable_primary_plane(new_crtc_state); 7127 7128 hsw_set_linetime_wm(new_crtc_state); 7129 7130 if (INTEL_GEN(dev_priv) >= 11) 7131 icl_set_pipe_chicken(crtc); 7132 7133 if (dev_priv->display.initial_watermarks) 7134 dev_priv->display.initial_watermarks(state, crtc); 7135 7136 if (INTEL_GEN(dev_priv) >= 11) 7137 icl_pipe_mbus_enable(crtc); 7138 7139 intel_encoders_enable(state, crtc); 7140 7141 if (psl_clkgate_wa) { 7142 intel_wait_for_vblank(dev_priv, pipe); 7143 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 7144 } 7145 7146 /* If we change the relative order between pipe/planes enabling, we need 7147 * to change the workaround. */ 7148 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 7149 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 7150 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 7151 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 7152 } 7153 } 7154 7155 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 7156 { 7157 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 7158 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7159 enum pipe pipe = crtc->pipe; 7160 7161 /* To avoid upsetting the power well on haswell only disable the pfit if 7162 * it's in use. The hw state code will make sure we get this right. */ 7163 if (!old_crtc_state->pch_pfit.enabled) 7164 return; 7165 7166 intel_de_write(dev_priv, PF_CTL(pipe), 0); 7167 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0); 7168 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0); 7169 } 7170 7171 static void ilk_crtc_disable(struct intel_atomic_state *state, 7172 struct intel_crtc *crtc) 7173 { 7174 const struct intel_crtc_state *old_crtc_state = 7175 intel_atomic_get_old_crtc_state(state, crtc); 7176 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7177 enum pipe pipe = crtc->pipe; 7178 7179 /* 7180 * Sometimes spurious CPU pipe underruns happen when the 7181 * pipe is already disabled, but FDI RX/TX is still enabled. 7182 * Happens at least with VGA+HDMI cloning. Suppress them. 7183 */ 7184 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7185 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 7186 7187 intel_encoders_disable(state, crtc); 7188 7189 intel_crtc_vblank_off(old_crtc_state); 7190 7191 intel_disable_pipe(old_crtc_state); 7192 7193 ilk_pfit_disable(old_crtc_state); 7194 7195 if (old_crtc_state->has_pch_encoder) 7196 ilk_fdi_disable(crtc); 7197 7198 intel_encoders_post_disable(state, crtc); 7199 7200 if (old_crtc_state->has_pch_encoder) { 7201 ilk_disable_pch_transcoder(dev_priv, pipe); 7202 7203 if (HAS_PCH_CPT(dev_priv)) { 7204 i915_reg_t reg; 7205 u32 temp; 7206 7207 /* disable TRANS_DP_CTL */ 7208 reg = TRANS_DP_CTL(pipe); 7209 temp = intel_de_read(dev_priv, reg); 7210 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 7211 TRANS_DP_PORT_SEL_MASK); 7212 temp |= TRANS_DP_PORT_SEL_NONE; 7213 intel_de_write(dev_priv, reg, temp); 7214 7215 /* disable DPLL_SEL */ 7216 temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 7217 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 7218 intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 7219 } 7220 7221 ilk_fdi_pll_disable(crtc); 7222 } 7223 7224 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7225 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 7226 } 7227 7228 static void hsw_crtc_disable(struct intel_atomic_state *state, 7229 struct intel_crtc *crtc) 7230 { 7231 /* 7232 * FIXME collapse everything to one hook. 7233 * Need care with mst->ddi interactions. 7234 */ 7235 intel_encoders_disable(state, crtc); 7236 intel_encoders_post_disable(state, crtc); 7237 } 7238 7239 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 7240 { 7241 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7242 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7243 7244 if (!crtc_state->gmch_pfit.control) 7245 return; 7246 7247 /* 7248 * The panel fitter should only be adjusted whilst the pipe is disabled, 7249 * according to register description and PRM. 7250 */ 7251 drm_WARN_ON(&dev_priv->drm, 7252 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); 7253 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 7254 7255 intel_de_write(dev_priv, PFIT_PGM_RATIOS, 7256 crtc_state->gmch_pfit.pgm_ratios); 7257 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); 7258 7259 /* Border color in case we don't scale up to the full screen. Black by 7260 * default, change to something else for debugging. */ 7261 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); 7262 } 7263 7264 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 7265 { 7266 if (phy == PHY_NONE) 7267 return false; 7268 else if (IS_ROCKETLAKE(dev_priv)) 7269 return phy <= PHY_D; 7270 else if (IS_ELKHARTLAKE(dev_priv)) 7271 return phy <= PHY_C; 7272 else if (INTEL_GEN(dev_priv) >= 11) 7273 return phy <= PHY_B; 7274 else 7275 return false; 7276 } 7277 7278 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 7279 { 7280 if (IS_ROCKETLAKE(dev_priv)) 7281 return false; 7282 else if (INTEL_GEN(dev_priv) >= 12) 7283 return phy >= PHY_D && phy <= PHY_I; 7284 else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) 7285 return phy >= PHY_C && phy <= PHY_F; 7286 else 7287 return false; 7288 } 7289 7290 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 7291 { 7292 if (IS_ROCKETLAKE(i915) && port >= PORT_D) 7293 return (enum phy)port - 1; 7294 else if (IS_ELKHARTLAKE(i915) && port == PORT_D) 7295 return PHY_A; 7296 7297 return (enum phy)port; 7298 } 7299 7300 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 7301 { 7302 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 7303 return PORT_TC_NONE; 7304 7305 if (INTEL_GEN(dev_priv) >= 12) 7306 return port - PORT_D; 7307 7308 return port - PORT_C; 7309 } 7310 7311 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 7312 { 7313 switch (port) { 7314 case PORT_A: 7315 return POWER_DOMAIN_PORT_DDI_A_LANES; 7316 case PORT_B: 7317 return POWER_DOMAIN_PORT_DDI_B_LANES; 7318 case PORT_C: 7319 return POWER_DOMAIN_PORT_DDI_C_LANES; 7320 case PORT_D: 7321 return POWER_DOMAIN_PORT_DDI_D_LANES; 7322 case PORT_E: 7323 return POWER_DOMAIN_PORT_DDI_E_LANES; 7324 case PORT_F: 7325 return POWER_DOMAIN_PORT_DDI_F_LANES; 7326 case PORT_G: 7327 return POWER_DOMAIN_PORT_DDI_G_LANES; 7328 case PORT_H: 7329 return POWER_DOMAIN_PORT_DDI_H_LANES; 7330 case PORT_I: 7331 return POWER_DOMAIN_PORT_DDI_I_LANES; 7332 default: 7333 MISSING_CASE(port); 7334 return POWER_DOMAIN_PORT_OTHER; 7335 } 7336 } 7337 7338 enum intel_display_power_domain 7339 intel_aux_power_domain(struct intel_digital_port *dig_port) 7340 { 7341 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 7342 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 7343 7344 if (intel_phy_is_tc(dev_priv, phy) && 7345 dig_port->tc_mode == TC_PORT_TBT_ALT) { 7346 switch (dig_port->aux_ch) { 7347 case AUX_CH_C: 7348 return POWER_DOMAIN_AUX_C_TBT; 7349 case AUX_CH_D: 7350 return POWER_DOMAIN_AUX_D_TBT; 7351 case AUX_CH_E: 7352 return POWER_DOMAIN_AUX_E_TBT; 7353 case AUX_CH_F: 7354 return POWER_DOMAIN_AUX_F_TBT; 7355 case AUX_CH_G: 7356 return POWER_DOMAIN_AUX_G_TBT; 7357 case AUX_CH_H: 7358 return POWER_DOMAIN_AUX_H_TBT; 7359 case AUX_CH_I: 7360 return POWER_DOMAIN_AUX_I_TBT; 7361 default: 7362 MISSING_CASE(dig_port->aux_ch); 7363 return POWER_DOMAIN_AUX_C_TBT; 7364 } 7365 } 7366 7367 return intel_legacy_aux_to_power_domain(dig_port->aux_ch); 7368 } 7369 7370 /* 7371 * Converts aux_ch to power_domain without caring about TBT ports for that use 7372 * intel_aux_power_domain() 7373 */ 7374 enum intel_display_power_domain 7375 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) 7376 { 7377 switch (aux_ch) { 7378 case AUX_CH_A: 7379 return POWER_DOMAIN_AUX_A; 7380 case AUX_CH_B: 7381 return POWER_DOMAIN_AUX_B; 7382 case AUX_CH_C: 7383 return POWER_DOMAIN_AUX_C; 7384 case AUX_CH_D: 7385 return POWER_DOMAIN_AUX_D; 7386 case AUX_CH_E: 7387 return POWER_DOMAIN_AUX_E; 7388 case AUX_CH_F: 7389 return POWER_DOMAIN_AUX_F; 7390 case AUX_CH_G: 7391 return POWER_DOMAIN_AUX_G; 7392 case AUX_CH_H: 7393 return POWER_DOMAIN_AUX_H; 7394 case AUX_CH_I: 7395 return POWER_DOMAIN_AUX_I; 7396 default: 7397 MISSING_CASE(aux_ch); 7398 return POWER_DOMAIN_AUX_A; 7399 } 7400 } 7401 7402 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 7403 { 7404 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7405 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7406 struct drm_encoder *encoder; 7407 enum pipe pipe = crtc->pipe; 7408 u64 mask; 7409 enum transcoder transcoder = crtc_state->cpu_transcoder; 7410 7411 if (!crtc_state->hw.active) 7412 return 0; 7413 7414 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 7415 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); 7416 if (crtc_state->pch_pfit.enabled || 7417 crtc_state->pch_pfit.force_thru) 7418 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 7419 7420 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 7421 crtc_state->uapi.encoder_mask) { 7422 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7423 7424 mask |= BIT_ULL(intel_encoder->power_domain); 7425 } 7426 7427 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 7428 mask |= BIT_ULL(POWER_DOMAIN_AUDIO); 7429 7430 if (crtc_state->shared_dpll) 7431 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 7432 7433 return mask; 7434 } 7435 7436 static u64 7437 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 7438 { 7439 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7440 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7441 enum intel_display_power_domain domain; 7442 u64 domains, new_domains, old_domains; 7443 7444 old_domains = crtc->enabled_power_domains; 7445 crtc->enabled_power_domains = new_domains = 7446 get_crtc_power_domains(crtc_state); 7447 7448 domains = new_domains & ~old_domains; 7449 7450 for_each_power_domain(domain, domains) 7451 intel_display_power_get(dev_priv, domain); 7452 7453 return old_domains & ~new_domains; 7454 } 7455 7456 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 7457 u64 domains) 7458 { 7459 enum intel_display_power_domain domain; 7460 7461 for_each_power_domain(domain, domains) 7462 intel_display_power_put_unchecked(dev_priv, domain); 7463 } 7464 7465 static void valleyview_crtc_enable(struct intel_atomic_state *state, 7466 struct intel_crtc *crtc) 7467 { 7468 const struct intel_crtc_state *new_crtc_state = 7469 intel_atomic_get_new_crtc_state(state, crtc); 7470 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7471 enum pipe pipe = crtc->pipe; 7472 7473 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 7474 return; 7475 7476 if (intel_crtc_has_dp_encoder(new_crtc_state)) 7477 intel_dp_set_m_n(new_crtc_state, M1_N1); 7478 7479 intel_set_pipe_timings(new_crtc_state); 7480 intel_set_pipe_src_size(new_crtc_state); 7481 7482 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 7483 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); 7484 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); 7485 } 7486 7487 i9xx_set_pipeconf(new_crtc_state); 7488 7489 crtc->active = true; 7490 7491 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7492 7493 intel_encoders_pre_pll_enable(state, crtc); 7494 7495 if (IS_CHERRYVIEW(dev_priv)) { 7496 chv_prepare_pll(crtc, new_crtc_state); 7497 chv_enable_pll(crtc, new_crtc_state); 7498 } else { 7499 vlv_prepare_pll(crtc, new_crtc_state); 7500 vlv_enable_pll(crtc, new_crtc_state); 7501 } 7502 7503 intel_encoders_pre_enable(state, crtc); 7504 7505 i9xx_pfit_enable(new_crtc_state); 7506 7507 intel_color_load_luts(new_crtc_state); 7508 intel_color_commit(new_crtc_state); 7509 /* update DSPCNTR to configure gamma for pipe bottom color */ 7510 intel_disable_primary_plane(new_crtc_state); 7511 7512 dev_priv->display.initial_watermarks(state, crtc); 7513 intel_enable_pipe(new_crtc_state); 7514 7515 intel_crtc_vblank_on(new_crtc_state); 7516 7517 intel_encoders_enable(state, crtc); 7518 } 7519 7520 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) 7521 { 7522 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7523 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7524 7525 intel_de_write(dev_priv, FP0(crtc->pipe), 7526 crtc_state->dpll_hw_state.fp0); 7527 intel_de_write(dev_priv, FP1(crtc->pipe), 7528 crtc_state->dpll_hw_state.fp1); 7529 } 7530 7531 static void i9xx_crtc_enable(struct intel_atomic_state *state, 7532 struct intel_crtc *crtc) 7533 { 7534 const struct intel_crtc_state *new_crtc_state = 7535 intel_atomic_get_new_crtc_state(state, crtc); 7536 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7537 enum pipe pipe = crtc->pipe; 7538 7539 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 7540 return; 7541 7542 i9xx_set_pll_dividers(new_crtc_state); 7543 7544 if (intel_crtc_has_dp_encoder(new_crtc_state)) 7545 intel_dp_set_m_n(new_crtc_state, M1_N1); 7546 7547 intel_set_pipe_timings(new_crtc_state); 7548 intel_set_pipe_src_size(new_crtc_state); 7549 7550 i9xx_set_pipeconf(new_crtc_state); 7551 7552 crtc->active = true; 7553 7554 if (!IS_GEN(dev_priv, 2)) 7555 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7556 7557 intel_encoders_pre_enable(state, crtc); 7558 7559 i9xx_enable_pll(crtc, new_crtc_state); 7560 7561 i9xx_pfit_enable(new_crtc_state); 7562 7563 intel_color_load_luts(new_crtc_state); 7564 intel_color_commit(new_crtc_state); 7565 /* update DSPCNTR to configure gamma for pipe bottom color */ 7566 intel_disable_primary_plane(new_crtc_state); 7567 7568 if (dev_priv->display.initial_watermarks) 7569 dev_priv->display.initial_watermarks(state, crtc); 7570 else 7571 intel_update_watermarks(crtc); 7572 intel_enable_pipe(new_crtc_state); 7573 7574 intel_crtc_vblank_on(new_crtc_state); 7575 7576 intel_encoders_enable(state, crtc); 7577 7578 /* prevents spurious underruns */ 7579 if (IS_GEN(dev_priv, 2)) 7580 intel_wait_for_vblank(dev_priv, pipe); 7581 } 7582 7583 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 7584 { 7585 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 7586 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7587 7588 if (!old_crtc_state->gmch_pfit.control) 7589 return; 7590 7591 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 7592 7593 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 7594 intel_de_read(dev_priv, PFIT_CONTROL)); 7595 intel_de_write(dev_priv, PFIT_CONTROL, 0); 7596 } 7597 7598 static void i9xx_crtc_disable(struct intel_atomic_state *state, 7599 struct intel_crtc *crtc) 7600 { 7601 struct intel_crtc_state *old_crtc_state = 7602 intel_atomic_get_old_crtc_state(state, crtc); 7603 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7604 enum pipe pipe = crtc->pipe; 7605 7606 /* 7607 * On gen2 planes are double buffered but the pipe isn't, so we must 7608 * wait for planes to fully turn off before disabling the pipe. 7609 */ 7610 if (IS_GEN(dev_priv, 2)) 7611 intel_wait_for_vblank(dev_priv, pipe); 7612 7613 intel_encoders_disable(state, crtc); 7614 7615 intel_crtc_vblank_off(old_crtc_state); 7616 7617 intel_disable_pipe(old_crtc_state); 7618 7619 i9xx_pfit_disable(old_crtc_state); 7620 7621 intel_encoders_post_disable(state, crtc); 7622 7623 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 7624 if (IS_CHERRYVIEW(dev_priv)) 7625 chv_disable_pll(dev_priv, pipe); 7626 else if (IS_VALLEYVIEW(dev_priv)) 7627 vlv_disable_pll(dev_priv, pipe); 7628 else 7629 i9xx_disable_pll(old_crtc_state); 7630 } 7631 7632 intel_encoders_post_pll_disable(state, crtc); 7633 7634 if (!IS_GEN(dev_priv, 2)) 7635 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7636 7637 if (!dev_priv->display.initial_watermarks) 7638 intel_update_watermarks(crtc); 7639 7640 /* clock the pipe down to 640x480@60 to potentially save power */ 7641 if (IS_I830(dev_priv)) 7642 i830_enable_pipe(dev_priv, pipe); 7643 } 7644 7645 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, 7646 struct drm_modeset_acquire_ctx *ctx) 7647 { 7648 struct intel_encoder *encoder; 7649 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7650 struct intel_bw_state *bw_state = 7651 to_intel_bw_state(dev_priv->bw_obj.state); 7652 struct intel_cdclk_state *cdclk_state = 7653 to_intel_cdclk_state(dev_priv->cdclk.obj.state); 7654 struct intel_dbuf_state *dbuf_state = 7655 to_intel_dbuf_state(dev_priv->dbuf.obj.state); 7656 struct intel_crtc_state *crtc_state = 7657 to_intel_crtc_state(crtc->base.state); 7658 enum intel_display_power_domain domain; 7659 struct intel_plane *plane; 7660 struct drm_atomic_state *state; 7661 struct intel_crtc_state *temp_crtc_state; 7662 enum pipe pipe = crtc->pipe; 7663 u64 domains; 7664 int ret; 7665 7666 if (!crtc_state->hw.active) 7667 return; 7668 7669 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 7670 const struct intel_plane_state *plane_state = 7671 to_intel_plane_state(plane->base.state); 7672 7673 if (plane_state->uapi.visible) 7674 intel_plane_disable_noatomic(crtc, plane); 7675 } 7676 7677 state = drm_atomic_state_alloc(&dev_priv->drm); 7678 if (!state) { 7679 drm_dbg_kms(&dev_priv->drm, 7680 "failed to disable [CRTC:%d:%s], out of memory", 7681 crtc->base.base.id, crtc->base.name); 7682 return; 7683 } 7684 7685 state->acquire_ctx = ctx; 7686 7687 /* Everything's already locked, -EDEADLK can't happen. */ 7688 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); 7689 ret = drm_atomic_add_affected_connectors(state, &crtc->base); 7690 7691 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret); 7692 7693 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc); 7694 7695 drm_atomic_state_put(state); 7696 7697 drm_dbg_kms(&dev_priv->drm, 7698 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 7699 crtc->base.base.id, crtc->base.name); 7700 7701 crtc->active = false; 7702 crtc->base.enabled = false; 7703 7704 drm_WARN_ON(&dev_priv->drm, 7705 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); 7706 crtc_state->uapi.active = false; 7707 crtc_state->uapi.connector_mask = 0; 7708 crtc_state->uapi.encoder_mask = 0; 7709 intel_crtc_free_hw_state(crtc_state); 7710 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw)); 7711 7712 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder) 7713 encoder->base.crtc = NULL; 7714 7715 intel_fbc_disable(crtc); 7716 intel_update_watermarks(crtc); 7717 intel_disable_shared_dpll(crtc_state); 7718 7719 domains = crtc->enabled_power_domains; 7720 for_each_power_domain(domain, domains) 7721 intel_display_power_put_unchecked(dev_priv, domain); 7722 crtc->enabled_power_domains = 0; 7723 7724 dev_priv->active_pipes &= ~BIT(pipe); 7725 cdclk_state->min_cdclk[pipe] = 0; 7726 cdclk_state->min_voltage_level[pipe] = 0; 7727 cdclk_state->active_pipes &= ~BIT(pipe); 7728 7729 dbuf_state->active_pipes &= ~BIT(pipe); 7730 7731 bw_state->data_rate[pipe] = 0; 7732 bw_state->num_active_planes[pipe] = 0; 7733 } 7734 7735 /* 7736 * turn all crtc's off, but do not adjust state 7737 * This has to be paired with a call to intel_modeset_setup_hw_state. 7738 */ 7739 int intel_display_suspend(struct drm_device *dev) 7740 { 7741 struct drm_i915_private *dev_priv = to_i915(dev); 7742 struct drm_atomic_state *state; 7743 int ret; 7744 7745 state = drm_atomic_helper_suspend(dev); 7746 ret = PTR_ERR_OR_ZERO(state); 7747 if (ret) 7748 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 7749 ret); 7750 else 7751 dev_priv->modeset_restore_state = state; 7752 return ret; 7753 } 7754 7755 void intel_encoder_destroy(struct drm_encoder *encoder) 7756 { 7757 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7758 7759 drm_encoder_cleanup(encoder); 7760 kfree(intel_encoder); 7761 } 7762 7763 /* Cross check the actual hw state with our own modeset state tracking (and it's 7764 * internal consistency). */ 7765 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 7766 struct drm_connector_state *conn_state) 7767 { 7768 struct intel_connector *connector = to_intel_connector(conn_state->connector); 7769 struct drm_i915_private *i915 = to_i915(connector->base.dev); 7770 7771 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", 7772 connector->base.base.id, connector->base.name); 7773 7774 if (connector->get_hw_state(connector)) { 7775 struct intel_encoder *encoder = intel_attached_encoder(connector); 7776 7777 I915_STATE_WARN(!crtc_state, 7778 "connector enabled without attached crtc\n"); 7779 7780 if (!crtc_state) 7781 return; 7782 7783 I915_STATE_WARN(!crtc_state->hw.active, 7784 "connector is active, but attached crtc isn't\n"); 7785 7786 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 7787 return; 7788 7789 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 7790 "atomic encoder doesn't match attached encoder\n"); 7791 7792 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 7793 "attached encoder crtc differs from connector crtc\n"); 7794 } else { 7795 I915_STATE_WARN(crtc_state && crtc_state->hw.active, 7796 "attached crtc is active, but connector isn't\n"); 7797 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 7798 "best encoder set without crtc!\n"); 7799 } 7800 } 7801 7802 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 7803 { 7804 if (crtc_state->hw.enable && crtc_state->has_pch_encoder) 7805 return crtc_state->fdi_lanes; 7806 7807 return 0; 7808 } 7809 7810 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 7811 struct intel_crtc_state *pipe_config) 7812 { 7813 struct drm_i915_private *dev_priv = to_i915(dev); 7814 struct drm_atomic_state *state = pipe_config->uapi.state; 7815 struct intel_crtc *other_crtc; 7816 struct intel_crtc_state *other_crtc_state; 7817 7818 drm_dbg_kms(&dev_priv->drm, 7819 "checking fdi config on pipe %c, lanes %i\n", 7820 pipe_name(pipe), pipe_config->fdi_lanes); 7821 if (pipe_config->fdi_lanes > 4) { 7822 drm_dbg_kms(&dev_priv->drm, 7823 "invalid fdi lane config on pipe %c: %i lanes\n", 7824 pipe_name(pipe), pipe_config->fdi_lanes); 7825 return -EINVAL; 7826 } 7827 7828 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 7829 if (pipe_config->fdi_lanes > 2) { 7830 drm_dbg_kms(&dev_priv->drm, 7831 "only 2 lanes on haswell, required: %i lanes\n", 7832 pipe_config->fdi_lanes); 7833 return -EINVAL; 7834 } else { 7835 return 0; 7836 } 7837 } 7838 7839 if (INTEL_NUM_PIPES(dev_priv) == 2) 7840 return 0; 7841 7842 /* Ivybridge 3 pipe is really complicated */ 7843 switch (pipe) { 7844 case PIPE_A: 7845 return 0; 7846 case PIPE_B: 7847 if (pipe_config->fdi_lanes <= 2) 7848 return 0; 7849 7850 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 7851 other_crtc_state = 7852 intel_atomic_get_crtc_state(state, other_crtc); 7853 if (IS_ERR(other_crtc_state)) 7854 return PTR_ERR(other_crtc_state); 7855 7856 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 7857 drm_dbg_kms(&dev_priv->drm, 7858 "invalid shared fdi lane config on pipe %c: %i lanes\n", 7859 pipe_name(pipe), pipe_config->fdi_lanes); 7860 return -EINVAL; 7861 } 7862 return 0; 7863 case PIPE_C: 7864 if (pipe_config->fdi_lanes > 2) { 7865 drm_dbg_kms(&dev_priv->drm, 7866 "only 2 lanes on pipe %c: required %i lanes\n", 7867 pipe_name(pipe), pipe_config->fdi_lanes); 7868 return -EINVAL; 7869 } 7870 7871 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 7872 other_crtc_state = 7873 intel_atomic_get_crtc_state(state, other_crtc); 7874 if (IS_ERR(other_crtc_state)) 7875 return PTR_ERR(other_crtc_state); 7876 7877 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 7878 drm_dbg_kms(&dev_priv->drm, 7879 "fdi link B uses too many lanes to enable link C\n"); 7880 return -EINVAL; 7881 } 7882 return 0; 7883 default: 7884 BUG(); 7885 } 7886 } 7887 7888 #define RETRY 1 7889 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, 7890 struct intel_crtc_state *pipe_config) 7891 { 7892 struct drm_device *dev = intel_crtc->base.dev; 7893 struct drm_i915_private *i915 = to_i915(dev); 7894 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 7895 int lane, link_bw, fdi_dotclock, ret; 7896 bool needs_recompute = false; 7897 7898 retry: 7899 /* FDI is a binary signal running at ~2.7GHz, encoding 7900 * each output octet as 10 bits. The actual frequency 7901 * is stored as a divider into a 100MHz clock, and the 7902 * mode pixel clock is stored in units of 1KHz. 7903 * Hence the bw of each lane in terms of the mode signal 7904 * is: 7905 */ 7906 link_bw = intel_fdi_link_freq(i915, pipe_config); 7907 7908 fdi_dotclock = adjusted_mode->crtc_clock; 7909 7910 lane = ilk_get_lanes_required(fdi_dotclock, link_bw, 7911 pipe_config->pipe_bpp); 7912 7913 pipe_config->fdi_lanes = lane; 7914 7915 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7916 link_bw, &pipe_config->fdi_m_n, false, false); 7917 7918 ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7919 if (ret == -EDEADLK) 7920 return ret; 7921 7922 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 7923 pipe_config->pipe_bpp -= 2*3; 7924 drm_dbg_kms(&i915->drm, 7925 "fdi link bw constraint, reducing pipe bpp to %i\n", 7926 pipe_config->pipe_bpp); 7927 needs_recompute = true; 7928 pipe_config->bw_constrained = true; 7929 7930 goto retry; 7931 } 7932 7933 if (needs_recompute) 7934 return RETRY; 7935 7936 return ret; 7937 } 7938 7939 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) 7940 { 7941 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7942 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7943 7944 /* IPS only exists on ULT machines and is tied to pipe A. */ 7945 if (!hsw_crtc_supports_ips(crtc)) 7946 return false; 7947 7948 if (!dev_priv->params.enable_ips) 7949 return false; 7950 7951 if (crtc_state->pipe_bpp > 24) 7952 return false; 7953 7954 /* 7955 * We compare against max which means we must take 7956 * the increased cdclk requirement into account when 7957 * calculating the new cdclk. 7958 * 7959 * Should measure whether using a lower cdclk w/o IPS 7960 */ 7961 if (IS_BROADWELL(dev_priv) && 7962 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) 7963 return false; 7964 7965 return true; 7966 } 7967 7968 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state) 7969 { 7970 struct drm_i915_private *dev_priv = 7971 to_i915(crtc_state->uapi.crtc->dev); 7972 struct intel_atomic_state *state = 7973 to_intel_atomic_state(crtc_state->uapi.state); 7974 7975 crtc_state->ips_enabled = false; 7976 7977 if (!hsw_crtc_state_ips_capable(crtc_state)) 7978 return 0; 7979 7980 /* 7981 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 7982 * enabled and disabled dynamically based on package C states, 7983 * user space can't make reliable use of the CRCs, so let's just 7984 * completely disable it. 7985 */ 7986 if (crtc_state->crc_enabled) 7987 return 0; 7988 7989 /* IPS should be fine as long as at least one plane is enabled. */ 7990 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) 7991 return 0; 7992 7993 if (IS_BROADWELL(dev_priv)) { 7994 const struct intel_cdclk_state *cdclk_state; 7995 7996 cdclk_state = intel_atomic_get_cdclk_state(state); 7997 if (IS_ERR(cdclk_state)) 7998 return PTR_ERR(cdclk_state); 7999 8000 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 8001 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100) 8002 return 0; 8003 } 8004 8005 crtc_state->ips_enabled = true; 8006 8007 return 0; 8008 } 8009 8010 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 8011 { 8012 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8013 8014 /* GDG double wide on either pipe, otherwise pipe A only */ 8015 return INTEL_GEN(dev_priv) < 4 && 8016 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 8017 } 8018 8019 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 8020 { 8021 u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock; 8022 unsigned int pipe_w, pipe_h, pfit_w, pfit_h; 8023 8024 /* 8025 * We only use IF-ID interlacing. If we ever use 8026 * PF-ID we'll need to adjust the pixel_rate here. 8027 */ 8028 8029 if (!crtc_state->pch_pfit.enabled) 8030 return pixel_rate; 8031 8032 pipe_w = crtc_state->pipe_src_w; 8033 pipe_h = crtc_state->pipe_src_h; 8034 8035 pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst); 8036 pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst); 8037 8038 if (pipe_w < pfit_w) 8039 pipe_w = pfit_w; 8040 if (pipe_h < pfit_h) 8041 pipe_h = pfit_h; 8042 8043 if (drm_WARN_ON(crtc_state->uapi.crtc->dev, 8044 !pfit_w || !pfit_h)) 8045 return pixel_rate; 8046 8047 return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), 8048 pfit_w * pfit_h); 8049 } 8050 8051 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 8052 { 8053 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 8054 8055 if (HAS_GMCH(dev_priv)) 8056 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 8057 crtc_state->pixel_rate = 8058 crtc_state->hw.adjusted_mode.crtc_clock; 8059 else 8060 crtc_state->pixel_rate = 8061 ilk_pipe_pixel_rate(crtc_state); 8062 } 8063 8064 static int intel_crtc_compute_config(struct intel_crtc *crtc, 8065 struct intel_crtc_state *pipe_config) 8066 { 8067 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8068 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 8069 int clock_limit = dev_priv->max_dotclk_freq; 8070 8071 if (INTEL_GEN(dev_priv) < 4) { 8072 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 8073 8074 /* 8075 * Enable double wide mode when the dot clock 8076 * is > 90% of the (display) core speed. 8077 */ 8078 if (intel_crtc_supports_double_wide(crtc) && 8079 adjusted_mode->crtc_clock > clock_limit) { 8080 clock_limit = dev_priv->max_dotclk_freq; 8081 pipe_config->double_wide = true; 8082 } 8083 } 8084 8085 if (adjusted_mode->crtc_clock > clock_limit) { 8086 drm_dbg_kms(&dev_priv->drm, 8087 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 8088 adjusted_mode->crtc_clock, clock_limit, 8089 yesno(pipe_config->double_wide)); 8090 return -EINVAL; 8091 } 8092 8093 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 8094 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && 8095 pipe_config->hw.ctm) { 8096 /* 8097 * There is only one pipe CSC unit per pipe, and we need that 8098 * for output conversion from RGB->YCBCR. So if CTM is already 8099 * applied we can't support YCBCR420 output. 8100 */ 8101 drm_dbg_kms(&dev_priv->drm, 8102 "YCBCR420 and CTM together are not possible\n"); 8103 return -EINVAL; 8104 } 8105 8106 /* 8107 * Pipe horizontal size must be even in: 8108 * - DVO ganged mode 8109 * - LVDS dual channel mode 8110 * - Double wide pipe 8111 */ 8112 if (pipe_config->pipe_src_w & 1) { 8113 if (pipe_config->double_wide) { 8114 drm_dbg_kms(&dev_priv->drm, 8115 "Odd pipe source width not supported with double wide pipe\n"); 8116 return -EINVAL; 8117 } 8118 8119 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 8120 intel_is_dual_link_lvds(dev_priv)) { 8121 drm_dbg_kms(&dev_priv->drm, 8122 "Odd pipe source width not supported with dual link LVDS\n"); 8123 return -EINVAL; 8124 } 8125 } 8126 8127 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 8128 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 8129 */ 8130 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 8131 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 8132 return -EINVAL; 8133 8134 intel_crtc_compute_pixel_rate(pipe_config); 8135 8136 if (pipe_config->has_pch_encoder) 8137 return ilk_fdi_compute_config(crtc, pipe_config); 8138 8139 return 0; 8140 } 8141 8142 static void 8143 intel_reduce_m_n_ratio(u32 *num, u32 *den) 8144 { 8145 while (*num > DATA_LINK_M_N_MASK || 8146 *den > DATA_LINK_M_N_MASK) { 8147 *num >>= 1; 8148 *den >>= 1; 8149 } 8150 } 8151 8152 static void compute_m_n(unsigned int m, unsigned int n, 8153 u32 *ret_m, u32 *ret_n, 8154 bool constant_n) 8155 { 8156 /* 8157 * Several DP dongles in particular seem to be fussy about 8158 * too large link M/N values. Give N value as 0x8000 that 8159 * should be acceptable by specific devices. 0x8000 is the 8160 * specified fixed N value for asynchronous clock mode, 8161 * which the devices expect also in synchronous clock mode. 8162 */ 8163 if (constant_n) 8164 *ret_n = DP_LINK_CONSTANT_N_VALUE; 8165 else 8166 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 8167 8168 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 8169 intel_reduce_m_n_ratio(ret_m, ret_n); 8170 } 8171 8172 void 8173 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 8174 int pixel_clock, int link_clock, 8175 struct intel_link_m_n *m_n, 8176 bool constant_n, bool fec_enable) 8177 { 8178 u32 data_clock = bits_per_pixel * pixel_clock; 8179 8180 if (fec_enable) 8181 data_clock = intel_dp_mode_to_fec_clock(data_clock); 8182 8183 m_n->tu = 64; 8184 compute_m_n(data_clock, 8185 link_clock * nlanes * 8, 8186 &m_n->gmch_m, &m_n->gmch_n, 8187 constant_n); 8188 8189 compute_m_n(pixel_clock, link_clock, 8190 &m_n->link_m, &m_n->link_n, 8191 constant_n); 8192 } 8193 8194 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 8195 { 8196 /* 8197 * There may be no VBT; and if the BIOS enabled SSC we can 8198 * just keep using it to avoid unnecessary flicker. Whereas if the 8199 * BIOS isn't using it, don't assume it will work even if the VBT 8200 * indicates as much. 8201 */ 8202 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 8203 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 8204 PCH_DREF_CONTROL) & 8205 DREF_SSC1_ENABLE; 8206 8207 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 8208 drm_dbg_kms(&dev_priv->drm, 8209 "SSC %s by BIOS, overriding VBT which says %s\n", 8210 enableddisabled(bios_lvds_use_ssc), 8211 enableddisabled(dev_priv->vbt.lvds_use_ssc)); 8212 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 8213 } 8214 } 8215 } 8216 8217 static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 8218 { 8219 if (dev_priv->params.panel_use_ssc >= 0) 8220 return dev_priv->params.panel_use_ssc != 0; 8221 return dev_priv->vbt.lvds_use_ssc 8222 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 8223 } 8224 8225 static u32 pnv_dpll_compute_fp(struct dpll *dpll) 8226 { 8227 return (1 << dpll->n) << 16 | dpll->m2; 8228 } 8229 8230 static u32 i9xx_dpll_compute_fp(struct dpll *dpll) 8231 { 8232 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 8233 } 8234 8235 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 8236 struct intel_crtc_state *crtc_state, 8237 struct dpll *reduced_clock) 8238 { 8239 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8240 u32 fp, fp2 = 0; 8241 8242 if (IS_PINEVIEW(dev_priv)) { 8243 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 8244 if (reduced_clock) 8245 fp2 = pnv_dpll_compute_fp(reduced_clock); 8246 } else { 8247 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8248 if (reduced_clock) 8249 fp2 = i9xx_dpll_compute_fp(reduced_clock); 8250 } 8251 8252 crtc_state->dpll_hw_state.fp0 = fp; 8253 8254 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8255 reduced_clock) { 8256 crtc_state->dpll_hw_state.fp1 = fp2; 8257 } else { 8258 crtc_state->dpll_hw_state.fp1 = fp; 8259 } 8260 } 8261 8262 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 8263 pipe) 8264 { 8265 u32 reg_val; 8266 8267 /* 8268 * PLLB opamp always calibrates to max value of 0x3f, force enable it 8269 * and set it to a reasonable value instead. 8270 */ 8271 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 8272 reg_val &= 0xffffff00; 8273 reg_val |= 0x00000030; 8274 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 8275 8276 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 8277 reg_val &= 0x00ffffff; 8278 reg_val |= 0x8c000000; 8279 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 8280 8281 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 8282 reg_val &= 0xffffff00; 8283 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 8284 8285 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 8286 reg_val &= 0x00ffffff; 8287 reg_val |= 0xb0000000; 8288 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 8289 } 8290 8291 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 8292 const struct intel_link_m_n *m_n) 8293 { 8294 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8295 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8296 enum pipe pipe = crtc->pipe; 8297 8298 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe), 8299 TU_SIZE(m_n->tu) | m_n->gmch_m); 8300 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 8301 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m); 8302 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n); 8303 } 8304 8305 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 8306 enum transcoder transcoder) 8307 { 8308 if (IS_HASWELL(dev_priv)) 8309 return transcoder == TRANSCODER_EDP; 8310 8311 /* 8312 * Strictly speaking some registers are available before 8313 * gen7, but we only support DRRS on gen7+ 8314 */ 8315 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); 8316 } 8317 8318 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 8319 const struct intel_link_m_n *m_n, 8320 const struct intel_link_m_n *m2_n2) 8321 { 8322 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8323 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8324 enum pipe pipe = crtc->pipe; 8325 enum transcoder transcoder = crtc_state->cpu_transcoder; 8326 8327 if (INTEL_GEN(dev_priv) >= 5) { 8328 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder), 8329 TU_SIZE(m_n->tu) | m_n->gmch_m); 8330 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder), 8331 m_n->gmch_n); 8332 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder), 8333 m_n->link_m); 8334 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder), 8335 m_n->link_n); 8336 /* 8337 * M2_N2 registers are set only if DRRS is supported 8338 * (to make sure the registers are not unnecessarily accessed). 8339 */ 8340 if (m2_n2 && crtc_state->has_drrs && 8341 transcoder_has_m2_n2(dev_priv, transcoder)) { 8342 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder), 8343 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 8344 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder), 8345 m2_n2->gmch_n); 8346 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder), 8347 m2_n2->link_m); 8348 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder), 8349 m2_n2->link_n); 8350 } 8351 } else { 8352 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe), 8353 TU_SIZE(m_n->tu) | m_n->gmch_m); 8354 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 8355 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m); 8356 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n); 8357 } 8358 } 8359 8360 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) 8361 { 8362 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 8363 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 8364 8365 if (m_n == M1_N1) { 8366 dp_m_n = &crtc_state->dp_m_n; 8367 dp_m2_n2 = &crtc_state->dp_m2_n2; 8368 } else if (m_n == M2_N2) { 8369 8370 /* 8371 * M2_N2 registers are not supported. Hence m2_n2 divider value 8372 * needs to be programmed into M1_N1. 8373 */ 8374 dp_m_n = &crtc_state->dp_m2_n2; 8375 } else { 8376 drm_err(&i915->drm, "Unsupported divider value\n"); 8377 return; 8378 } 8379 8380 if (crtc_state->has_pch_encoder) 8381 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); 8382 else 8383 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); 8384 } 8385 8386 static void vlv_compute_dpll(struct intel_crtc *crtc, 8387 struct intel_crtc_state *pipe_config) 8388 { 8389 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 8390 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 8391 if (crtc->pipe != PIPE_A) 8392 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 8393 8394 /* DPLL not used with DSI, but still need the rest set up */ 8395 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 8396 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 8397 DPLL_EXT_BUFFER_ENABLE_VLV; 8398 8399 pipe_config->dpll_hw_state.dpll_md = 8400 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8401 } 8402 8403 static void chv_compute_dpll(struct intel_crtc *crtc, 8404 struct intel_crtc_state *pipe_config) 8405 { 8406 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 8407 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 8408 if (crtc->pipe != PIPE_A) 8409 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 8410 8411 /* DPLL not used with DSI, but still need the rest set up */ 8412 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 8413 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 8414 8415 pipe_config->dpll_hw_state.dpll_md = 8416 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8417 } 8418 8419 static void vlv_prepare_pll(struct intel_crtc *crtc, 8420 const struct intel_crtc_state *pipe_config) 8421 { 8422 struct drm_device *dev = crtc->base.dev; 8423 struct drm_i915_private *dev_priv = to_i915(dev); 8424 enum pipe pipe = crtc->pipe; 8425 u32 mdiv; 8426 u32 bestn, bestm1, bestm2, bestp1, bestp2; 8427 u32 coreclk, reg_val; 8428 8429 /* Enable Refclk */ 8430 intel_de_write(dev_priv, DPLL(pipe), 8431 pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 8432 8433 /* No need to actually set up the DPLL with DSI */ 8434 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8435 return; 8436 8437 vlv_dpio_get(dev_priv); 8438 8439 bestn = pipe_config->dpll.n; 8440 bestm1 = pipe_config->dpll.m1; 8441 bestm2 = pipe_config->dpll.m2; 8442 bestp1 = pipe_config->dpll.p1; 8443 bestp2 = pipe_config->dpll.p2; 8444 8445 /* See eDP HDMI DPIO driver vbios notes doc */ 8446 8447 /* PLL B needs special handling */ 8448 if (pipe == PIPE_B) 8449 vlv_pllb_recal_opamp(dev_priv, pipe); 8450 8451 /* Set up Tx target for periodic Rcomp update */ 8452 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 8453 8454 /* Disable target IRef on PLL */ 8455 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 8456 reg_val &= 0x00ffffff; 8457 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 8458 8459 /* Disable fast lock */ 8460 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 8461 8462 /* Set idtafcrecal before PLL is enabled */ 8463 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 8464 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 8465 mdiv |= ((bestn << DPIO_N_SHIFT)); 8466 mdiv |= (1 << DPIO_K_SHIFT); 8467 8468 /* 8469 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 8470 * but we don't support that). 8471 * Note: don't use the DAC post divider as it seems unstable. 8472 */ 8473 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 8474 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 8475 8476 mdiv |= DPIO_ENABLE_CALIBRATION; 8477 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 8478 8479 /* Set HBR and RBR LPF coefficients */ 8480 if (pipe_config->port_clock == 162000 || 8481 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || 8482 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) 8483 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 8484 0x009f0003); 8485 else 8486 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 8487 0x00d0000f); 8488 8489 if (intel_crtc_has_dp_encoder(pipe_config)) { 8490 /* Use SSC source */ 8491 if (pipe == PIPE_A) 8492 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8493 0x0df40000); 8494 else 8495 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8496 0x0df70000); 8497 } else { /* HDMI or VGA */ 8498 /* Use bend source */ 8499 if (pipe == PIPE_A) 8500 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8501 0x0df70000); 8502 else 8503 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8504 0x0df40000); 8505 } 8506 8507 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 8508 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 8509 if (intel_crtc_has_dp_encoder(pipe_config)) 8510 coreclk |= 0x01000000; 8511 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 8512 8513 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 8514 8515 vlv_dpio_put(dev_priv); 8516 } 8517 8518 static void chv_prepare_pll(struct intel_crtc *crtc, 8519 const struct intel_crtc_state *pipe_config) 8520 { 8521 struct drm_device *dev = crtc->base.dev; 8522 struct drm_i915_private *dev_priv = to_i915(dev); 8523 enum pipe pipe = crtc->pipe; 8524 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8525 u32 loopfilter, tribuf_calcntr; 8526 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 8527 u32 dpio_val; 8528 int vco; 8529 8530 /* Enable Refclk and SSC */ 8531 intel_de_write(dev_priv, DPLL(pipe), 8532 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 8533 8534 /* No need to actually set up the DPLL with DSI */ 8535 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8536 return; 8537 8538 bestn = pipe_config->dpll.n; 8539 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 8540 bestm1 = pipe_config->dpll.m1; 8541 bestm2 = pipe_config->dpll.m2 >> 22; 8542 bestp1 = pipe_config->dpll.p1; 8543 bestp2 = pipe_config->dpll.p2; 8544 vco = pipe_config->dpll.vco; 8545 dpio_val = 0; 8546 loopfilter = 0; 8547 8548 vlv_dpio_get(dev_priv); 8549 8550 /* p1 and p2 divider */ 8551 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 8552 5 << DPIO_CHV_S1_DIV_SHIFT | 8553 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 8554 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 8555 1 << DPIO_CHV_K_DIV_SHIFT); 8556 8557 /* Feedback post-divider - m2 */ 8558 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 8559 8560 /* Feedback refclk divider - n and m1 */ 8561 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 8562 DPIO_CHV_M1_DIV_BY_2 | 8563 1 << DPIO_CHV_N_DIV_SHIFT); 8564 8565 /* M2 fraction division */ 8566 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 8567 8568 /* M2 fraction division enable */ 8569 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8570 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 8571 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 8572 if (bestm2_frac) 8573 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 8574 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 8575 8576 /* Program digital lock detect threshold */ 8577 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 8578 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 8579 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 8580 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 8581 if (!bestm2_frac) 8582 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 8583 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 8584 8585 /* Loop filter */ 8586 if (vco == 5400000) { 8587 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 8588 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 8589 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 8590 tribuf_calcntr = 0x9; 8591 } else if (vco <= 6200000) { 8592 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 8593 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 8594 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8595 tribuf_calcntr = 0x9; 8596 } else if (vco <= 6480000) { 8597 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8598 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8599 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8600 tribuf_calcntr = 0x8; 8601 } else { 8602 /* Not supported. Apply the same limits as in the max case */ 8603 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8604 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8605 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8606 tribuf_calcntr = 0; 8607 } 8608 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 8609 8610 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 8611 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 8612 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 8613 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 8614 8615 /* AFC Recal */ 8616 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 8617 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 8618 DPIO_AFC_RECAL); 8619 8620 vlv_dpio_put(dev_priv); 8621 } 8622 8623 /** 8624 * vlv_force_pll_on - forcibly enable just the PLL 8625 * @dev_priv: i915 private structure 8626 * @pipe: pipe PLL to enable 8627 * @dpll: PLL configuration 8628 * 8629 * Enable the PLL for @pipe using the supplied @dpll config. To be used 8630 * in cases where we need the PLL enabled even when @pipe is not going to 8631 * be enabled. 8632 */ 8633 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 8634 const struct dpll *dpll) 8635 { 8636 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 8637 struct intel_crtc_state *pipe_config; 8638 8639 pipe_config = intel_crtc_state_alloc(crtc); 8640 if (!pipe_config) 8641 return -ENOMEM; 8642 8643 pipe_config->cpu_transcoder = (enum transcoder)pipe; 8644 pipe_config->pixel_multiplier = 1; 8645 pipe_config->dpll = *dpll; 8646 8647 if (IS_CHERRYVIEW(dev_priv)) { 8648 chv_compute_dpll(crtc, pipe_config); 8649 chv_prepare_pll(crtc, pipe_config); 8650 chv_enable_pll(crtc, pipe_config); 8651 } else { 8652 vlv_compute_dpll(crtc, pipe_config); 8653 vlv_prepare_pll(crtc, pipe_config); 8654 vlv_enable_pll(crtc, pipe_config); 8655 } 8656 8657 kfree(pipe_config); 8658 8659 return 0; 8660 } 8661 8662 /** 8663 * vlv_force_pll_off - forcibly disable just the PLL 8664 * @dev_priv: i915 private structure 8665 * @pipe: pipe PLL to disable 8666 * 8667 * Disable the PLL for @pipe. To be used in cases where we need 8668 * the PLL enabled even when @pipe is not going to be enabled. 8669 */ 8670 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 8671 { 8672 if (IS_CHERRYVIEW(dev_priv)) 8673 chv_disable_pll(dev_priv, pipe); 8674 else 8675 vlv_disable_pll(dev_priv, pipe); 8676 } 8677 8678 static void i9xx_compute_dpll(struct intel_crtc *crtc, 8679 struct intel_crtc_state *crtc_state, 8680 struct dpll *reduced_clock) 8681 { 8682 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8683 u32 dpll; 8684 struct dpll *clock = &crtc_state->dpll; 8685 8686 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8687 8688 dpll = DPLL_VGA_MODE_DIS; 8689 8690 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8691 dpll |= DPLLB_MODE_LVDS; 8692 else 8693 dpll |= DPLLB_MODE_DAC_SERIAL; 8694 8695 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8696 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8697 dpll |= (crtc_state->pixel_multiplier - 1) 8698 << SDVO_MULTIPLIER_SHIFT_HIRES; 8699 } 8700 8701 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8702 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8703 dpll |= DPLL_SDVO_HIGH_SPEED; 8704 8705 if (intel_crtc_has_dp_encoder(crtc_state)) 8706 dpll |= DPLL_SDVO_HIGH_SPEED; 8707 8708 /* compute bitmask from p1 value */ 8709 if (IS_PINEVIEW(dev_priv)) 8710 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 8711 else { 8712 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8713 if (IS_G4X(dev_priv) && reduced_clock) 8714 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8715 } 8716 switch (clock->p2) { 8717 case 5: 8718 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8719 break; 8720 case 7: 8721 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8722 break; 8723 case 10: 8724 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8725 break; 8726 case 14: 8727 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8728 break; 8729 } 8730 if (INTEL_GEN(dev_priv) >= 4) 8731 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 8732 8733 if (crtc_state->sdvo_tv_clock) 8734 dpll |= PLL_REF_INPUT_TVCLKINBC; 8735 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8736 intel_panel_use_ssc(dev_priv)) 8737 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8738 else 8739 dpll |= PLL_REF_INPUT_DREFCLK; 8740 8741 dpll |= DPLL_VCO_ENABLE; 8742 crtc_state->dpll_hw_state.dpll = dpll; 8743 8744 if (INTEL_GEN(dev_priv) >= 4) { 8745 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 8746 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8747 crtc_state->dpll_hw_state.dpll_md = dpll_md; 8748 } 8749 } 8750 8751 static void i8xx_compute_dpll(struct intel_crtc *crtc, 8752 struct intel_crtc_state *crtc_state, 8753 struct dpll *reduced_clock) 8754 { 8755 struct drm_device *dev = crtc->base.dev; 8756 struct drm_i915_private *dev_priv = to_i915(dev); 8757 u32 dpll; 8758 struct dpll *clock = &crtc_state->dpll; 8759 8760 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8761 8762 dpll = DPLL_VGA_MODE_DIS; 8763 8764 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8765 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8766 } else { 8767 if (clock->p1 == 2) 8768 dpll |= PLL_P1_DIVIDE_BY_TWO; 8769 else 8770 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8771 if (clock->p2 == 4) 8772 dpll |= PLL_P2_DIVIDE_BY_4; 8773 } 8774 8775 /* 8776 * Bspec: 8777 * "[Almador Errata}: For the correct operation of the muxed DVO pins 8778 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 8779 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 8780 * Enable) must be set to “1” in both the DPLL A Control Register 8781 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 8782 * 8783 * For simplicity We simply keep both bits always enabled in 8784 * both DPLLS. The spec says we should disable the DVO 2X clock 8785 * when not needed, but this seems to work fine in practice. 8786 */ 8787 if (IS_I830(dev_priv) || 8788 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 8789 dpll |= DPLL_DVO_2X_MODE; 8790 8791 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8792 intel_panel_use_ssc(dev_priv)) 8793 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8794 else 8795 dpll |= PLL_REF_INPUT_DREFCLK; 8796 8797 dpll |= DPLL_VCO_ENABLE; 8798 crtc_state->dpll_hw_state.dpll = dpll; 8799 } 8800 8801 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) 8802 { 8803 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8804 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8805 enum pipe pipe = crtc->pipe; 8806 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8807 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 8808 u32 crtc_vtotal, crtc_vblank_end; 8809 int vsyncshift = 0; 8810 8811 /* We need to be careful not to changed the adjusted mode, for otherwise 8812 * the hw state checker will get angry at the mismatch. */ 8813 crtc_vtotal = adjusted_mode->crtc_vtotal; 8814 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 8815 8816 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 8817 /* the chip adds 2 halflines automatically */ 8818 crtc_vtotal -= 1; 8819 crtc_vblank_end -= 1; 8820 8821 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8822 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 8823 else 8824 vsyncshift = adjusted_mode->crtc_hsync_start - 8825 adjusted_mode->crtc_htotal / 2; 8826 if (vsyncshift < 0) 8827 vsyncshift += adjusted_mode->crtc_htotal; 8828 } 8829 8830 if (INTEL_GEN(dev_priv) > 3) 8831 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder), 8832 vsyncshift); 8833 8834 intel_de_write(dev_priv, HTOTAL(cpu_transcoder), 8835 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); 8836 intel_de_write(dev_priv, HBLANK(cpu_transcoder), 8837 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); 8838 intel_de_write(dev_priv, HSYNC(cpu_transcoder), 8839 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); 8840 8841 intel_de_write(dev_priv, VTOTAL(cpu_transcoder), 8842 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16)); 8843 intel_de_write(dev_priv, VBLANK(cpu_transcoder), 8844 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16)); 8845 intel_de_write(dev_priv, VSYNC(cpu_transcoder), 8846 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); 8847 8848 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 8849 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 8850 * documented on the DDI_FUNC_CTL register description, EDP Input Select 8851 * bits. */ 8852 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 8853 (pipe == PIPE_B || pipe == PIPE_C)) 8854 intel_de_write(dev_priv, VTOTAL(pipe), 8855 intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 8856 8857 } 8858 8859 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 8860 { 8861 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8862 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8863 enum pipe pipe = crtc->pipe; 8864 8865 /* pipesrc controls the size that is scaled from, which should 8866 * always be the user's requested size. 8867 */ 8868 intel_de_write(dev_priv, PIPESRC(pipe), 8869 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1)); 8870 } 8871 8872 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 8873 { 8874 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 8875 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8876 8877 if (IS_GEN(dev_priv, 2)) 8878 return false; 8879 8880 if (INTEL_GEN(dev_priv) >= 9 || 8881 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 8882 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 8883 else 8884 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 8885 } 8886 8887 static void intel_get_pipe_timings(struct intel_crtc *crtc, 8888 struct intel_crtc_state *pipe_config) 8889 { 8890 struct drm_device *dev = crtc->base.dev; 8891 struct drm_i915_private *dev_priv = to_i915(dev); 8892 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 8893 u32 tmp; 8894 8895 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder)); 8896 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 8897 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 8898 8899 if (!transcoder_is_dsi(cpu_transcoder)) { 8900 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder)); 8901 pipe_config->hw.adjusted_mode.crtc_hblank_start = 8902 (tmp & 0xffff) + 1; 8903 pipe_config->hw.adjusted_mode.crtc_hblank_end = 8904 ((tmp >> 16) & 0xffff) + 1; 8905 } 8906 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder)); 8907 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 8908 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 8909 8910 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder)); 8911 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 8912 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 8913 8914 if (!transcoder_is_dsi(cpu_transcoder)) { 8915 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder)); 8916 pipe_config->hw.adjusted_mode.crtc_vblank_start = 8917 (tmp & 0xffff) + 1; 8918 pipe_config->hw.adjusted_mode.crtc_vblank_end = 8919 ((tmp >> 16) & 0xffff) + 1; 8920 } 8921 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder)); 8922 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 8923 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 8924 8925 if (intel_pipe_is_interlaced(pipe_config)) { 8926 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 8927 pipe_config->hw.adjusted_mode.crtc_vtotal += 1; 8928 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; 8929 } 8930 } 8931 8932 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 8933 struct intel_crtc_state *pipe_config) 8934 { 8935 struct drm_device *dev = crtc->base.dev; 8936 struct drm_i915_private *dev_priv = to_i915(dev); 8937 u32 tmp; 8938 8939 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); 8940 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 8941 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 8942 8943 pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h; 8944 pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w; 8945 } 8946 8947 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 8948 struct intel_crtc_state *pipe_config) 8949 { 8950 mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay; 8951 mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal; 8952 mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start; 8953 mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end; 8954 8955 mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay; 8956 mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal; 8957 mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start; 8958 mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end; 8959 8960 mode->flags = pipe_config->hw.adjusted_mode.flags; 8961 mode->type = DRM_MODE_TYPE_DRIVER; 8962 8963 mode->clock = pipe_config->hw.adjusted_mode.crtc_clock; 8964 8965 drm_mode_set_name(mode); 8966 } 8967 8968 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 8969 { 8970 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8971 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8972 u32 pipeconf; 8973 8974 pipeconf = 0; 8975 8976 /* we keep both pipes enabled on 830 */ 8977 if (IS_I830(dev_priv)) 8978 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; 8979 8980 if (crtc_state->double_wide) 8981 pipeconf |= PIPECONF_DOUBLE_WIDE; 8982 8983 /* only g4x and later have fancy bpc/dither controls */ 8984 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8985 IS_CHERRYVIEW(dev_priv)) { 8986 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 8987 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 8988 pipeconf |= PIPECONF_DITHER_EN | 8989 PIPECONF_DITHER_TYPE_SP; 8990 8991 switch (crtc_state->pipe_bpp) { 8992 case 18: 8993 pipeconf |= PIPECONF_6BPC; 8994 break; 8995 case 24: 8996 pipeconf |= PIPECONF_8BPC; 8997 break; 8998 case 30: 8999 pipeconf |= PIPECONF_10BPC; 9000 break; 9001 default: 9002 /* Case prevented by intel_choose_pipe_bpp_dither. */ 9003 BUG(); 9004 } 9005 } 9006 9007 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 9008 if (INTEL_GEN(dev_priv) < 4 || 9009 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 9010 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 9011 else 9012 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 9013 } else { 9014 pipeconf |= PIPECONF_PROGRESSIVE; 9015 } 9016 9017 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 9018 crtc_state->limited_color_range) 9019 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 9020 9021 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 9022 9023 pipeconf |= PIPECONF_FRAME_START_DELAY(0); 9024 9025 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf); 9026 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe)); 9027 } 9028 9029 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 9030 struct intel_crtc_state *crtc_state) 9031 { 9032 struct drm_device *dev = crtc->base.dev; 9033 struct drm_i915_private *dev_priv = to_i915(dev); 9034 const struct intel_limit *limit; 9035 int refclk = 48000; 9036 9037 memset(&crtc_state->dpll_hw_state, 0, 9038 sizeof(crtc_state->dpll_hw_state)); 9039 9040 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9041 if (intel_panel_use_ssc(dev_priv)) { 9042 refclk = dev_priv->vbt.lvds_ssc_freq; 9043 drm_dbg_kms(&dev_priv->drm, 9044 "using SSC reference clock of %d kHz\n", 9045 refclk); 9046 } 9047 9048 limit = &intel_limits_i8xx_lvds; 9049 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 9050 limit = &intel_limits_i8xx_dvo; 9051 } else { 9052 limit = &intel_limits_i8xx_dac; 9053 } 9054 9055 if (!crtc_state->clock_set && 9056 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9057 refclk, NULL, &crtc_state->dpll)) { 9058 drm_err(&dev_priv->drm, 9059 "Couldn't find PLL settings for mode!\n"); 9060 return -EINVAL; 9061 } 9062 9063 i8xx_compute_dpll(crtc, crtc_state, NULL); 9064 9065 return 0; 9066 } 9067 9068 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 9069 struct intel_crtc_state *crtc_state) 9070 { 9071 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9072 const struct intel_limit *limit; 9073 int refclk = 96000; 9074 9075 memset(&crtc_state->dpll_hw_state, 0, 9076 sizeof(crtc_state->dpll_hw_state)); 9077 9078 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9079 if (intel_panel_use_ssc(dev_priv)) { 9080 refclk = dev_priv->vbt.lvds_ssc_freq; 9081 drm_dbg_kms(&dev_priv->drm, 9082 "using SSC reference clock of %d kHz\n", 9083 refclk); 9084 } 9085 9086 if (intel_is_dual_link_lvds(dev_priv)) 9087 limit = &intel_limits_g4x_dual_channel_lvds; 9088 else 9089 limit = &intel_limits_g4x_single_channel_lvds; 9090 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 9091 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 9092 limit = &intel_limits_g4x_hdmi; 9093 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 9094 limit = &intel_limits_g4x_sdvo; 9095 } else { 9096 /* The option is for other outputs */ 9097 limit = &intel_limits_i9xx_sdvo; 9098 } 9099 9100 if (!crtc_state->clock_set && 9101 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9102 refclk, NULL, &crtc_state->dpll)) { 9103 drm_err(&dev_priv->drm, 9104 "Couldn't find PLL settings for mode!\n"); 9105 return -EINVAL; 9106 } 9107 9108 i9xx_compute_dpll(crtc, crtc_state, NULL); 9109 9110 return 0; 9111 } 9112 9113 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 9114 struct intel_crtc_state *crtc_state) 9115 { 9116 struct drm_device *dev = crtc->base.dev; 9117 struct drm_i915_private *dev_priv = to_i915(dev); 9118 const struct intel_limit *limit; 9119 int refclk = 96000; 9120 9121 memset(&crtc_state->dpll_hw_state, 0, 9122 sizeof(crtc_state->dpll_hw_state)); 9123 9124 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9125 if (intel_panel_use_ssc(dev_priv)) { 9126 refclk = dev_priv->vbt.lvds_ssc_freq; 9127 drm_dbg_kms(&dev_priv->drm, 9128 "using SSC reference clock of %d kHz\n", 9129 refclk); 9130 } 9131 9132 limit = &pnv_limits_lvds; 9133 } else { 9134 limit = &pnv_limits_sdvo; 9135 } 9136 9137 if (!crtc_state->clock_set && 9138 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9139 refclk, NULL, &crtc_state->dpll)) { 9140 drm_err(&dev_priv->drm, 9141 "Couldn't find PLL settings for mode!\n"); 9142 return -EINVAL; 9143 } 9144 9145 i9xx_compute_dpll(crtc, crtc_state, NULL); 9146 9147 return 0; 9148 } 9149 9150 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 9151 struct intel_crtc_state *crtc_state) 9152 { 9153 struct drm_device *dev = crtc->base.dev; 9154 struct drm_i915_private *dev_priv = to_i915(dev); 9155 const struct intel_limit *limit; 9156 int refclk = 96000; 9157 9158 memset(&crtc_state->dpll_hw_state, 0, 9159 sizeof(crtc_state->dpll_hw_state)); 9160 9161 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9162 if (intel_panel_use_ssc(dev_priv)) { 9163 refclk = dev_priv->vbt.lvds_ssc_freq; 9164 drm_dbg_kms(&dev_priv->drm, 9165 "using SSC reference clock of %d kHz\n", 9166 refclk); 9167 } 9168 9169 limit = &intel_limits_i9xx_lvds; 9170 } else { 9171 limit = &intel_limits_i9xx_sdvo; 9172 } 9173 9174 if (!crtc_state->clock_set && 9175 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9176 refclk, NULL, &crtc_state->dpll)) { 9177 drm_err(&dev_priv->drm, 9178 "Couldn't find PLL settings for mode!\n"); 9179 return -EINVAL; 9180 } 9181 9182 i9xx_compute_dpll(crtc, crtc_state, NULL); 9183 9184 return 0; 9185 } 9186 9187 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 9188 struct intel_crtc_state *crtc_state) 9189 { 9190 int refclk = 100000; 9191 const struct intel_limit *limit = &intel_limits_chv; 9192 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 9193 9194 memset(&crtc_state->dpll_hw_state, 0, 9195 sizeof(crtc_state->dpll_hw_state)); 9196 9197 if (!crtc_state->clock_set && 9198 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9199 refclk, NULL, &crtc_state->dpll)) { 9200 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n"); 9201 return -EINVAL; 9202 } 9203 9204 chv_compute_dpll(crtc, crtc_state); 9205 9206 return 0; 9207 } 9208 9209 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 9210 struct intel_crtc_state *crtc_state) 9211 { 9212 int refclk = 100000; 9213 const struct intel_limit *limit = &intel_limits_vlv; 9214 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 9215 9216 memset(&crtc_state->dpll_hw_state, 0, 9217 sizeof(crtc_state->dpll_hw_state)); 9218 9219 if (!crtc_state->clock_set && 9220 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9221 refclk, NULL, &crtc_state->dpll)) { 9222 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n"); 9223 return -EINVAL; 9224 } 9225 9226 vlv_compute_dpll(crtc, crtc_state); 9227 9228 return 0; 9229 } 9230 9231 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 9232 { 9233 if (IS_I830(dev_priv)) 9234 return false; 9235 9236 return INTEL_GEN(dev_priv) >= 4 || 9237 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 9238 } 9239 9240 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 9241 { 9242 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9243 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9244 u32 tmp; 9245 9246 if (!i9xx_has_pfit(dev_priv)) 9247 return; 9248 9249 tmp = intel_de_read(dev_priv, PFIT_CONTROL); 9250 if (!(tmp & PFIT_ENABLE)) 9251 return; 9252 9253 /* Check whether the pfit is attached to our pipe. */ 9254 if (INTEL_GEN(dev_priv) < 4) { 9255 if (crtc->pipe != PIPE_B) 9256 return; 9257 } else { 9258 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 9259 return; 9260 } 9261 9262 crtc_state->gmch_pfit.control = tmp; 9263 crtc_state->gmch_pfit.pgm_ratios = 9264 intel_de_read(dev_priv, PFIT_PGM_RATIOS); 9265 } 9266 9267 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 9268 struct intel_crtc_state *pipe_config) 9269 { 9270 struct drm_device *dev = crtc->base.dev; 9271 struct drm_i915_private *dev_priv = to_i915(dev); 9272 enum pipe pipe = crtc->pipe; 9273 struct dpll clock; 9274 u32 mdiv; 9275 int refclk = 100000; 9276 9277 /* In case of DSI, DPLL will not be used */ 9278 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 9279 return; 9280 9281 vlv_dpio_get(dev_priv); 9282 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 9283 vlv_dpio_put(dev_priv); 9284 9285 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 9286 clock.m2 = mdiv & DPIO_M2DIV_MASK; 9287 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 9288 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 9289 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 9290 9291 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 9292 } 9293 9294 static void 9295 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 9296 struct intel_initial_plane_config *plane_config) 9297 { 9298 struct drm_device *dev = crtc->base.dev; 9299 struct drm_i915_private *dev_priv = to_i915(dev); 9300 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9301 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 9302 enum pipe pipe; 9303 u32 val, base, offset; 9304 int fourcc, pixel_format; 9305 unsigned int aligned_height; 9306 struct drm_framebuffer *fb; 9307 struct intel_framebuffer *intel_fb; 9308 9309 if (!plane->get_hw_state(plane, &pipe)) 9310 return; 9311 9312 drm_WARN_ON(dev, pipe != crtc->pipe); 9313 9314 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9315 if (!intel_fb) { 9316 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); 9317 return; 9318 } 9319 9320 fb = &intel_fb->base; 9321 9322 fb->dev = dev; 9323 9324 val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 9325 9326 if (INTEL_GEN(dev_priv) >= 4) { 9327 if (val & DISPPLANE_TILED) { 9328 plane_config->tiling = I915_TILING_X; 9329 fb->modifier = I915_FORMAT_MOD_X_TILED; 9330 } 9331 9332 if (val & DISPPLANE_ROTATE_180) 9333 plane_config->rotation = DRM_MODE_ROTATE_180; 9334 } 9335 9336 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && 9337 val & DISPPLANE_MIRROR) 9338 plane_config->rotation |= DRM_MODE_REFLECT_X; 9339 9340 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9341 fourcc = i9xx_format_to_fourcc(pixel_format); 9342 fb->format = drm_format_info(fourcc); 9343 9344 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 9345 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane)); 9346 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; 9347 } else if (INTEL_GEN(dev_priv) >= 4) { 9348 if (plane_config->tiling) 9349 offset = intel_de_read(dev_priv, 9350 DSPTILEOFF(i9xx_plane)); 9351 else 9352 offset = intel_de_read(dev_priv, 9353 DSPLINOFF(i9xx_plane)); 9354 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; 9355 } else { 9356 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); 9357 } 9358 plane_config->base = base; 9359 9360 val = intel_de_read(dev_priv, PIPESRC(pipe)); 9361 fb->width = ((val >> 16) & 0xfff) + 1; 9362 fb->height = ((val >> 0) & 0xfff) + 1; 9363 9364 val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane)); 9365 fb->pitches[0] = val & 0xffffffc0; 9366 9367 aligned_height = intel_fb_align_height(fb, 0, fb->height); 9368 9369 plane_config->size = fb->pitches[0] * aligned_height; 9370 9371 drm_dbg_kms(&dev_priv->drm, 9372 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9373 crtc->base.name, plane->base.name, fb->width, fb->height, 9374 fb->format->cpp[0] * 8, base, fb->pitches[0], 9375 plane_config->size); 9376 9377 plane_config->fb = intel_fb; 9378 } 9379 9380 static void chv_crtc_clock_get(struct intel_crtc *crtc, 9381 struct intel_crtc_state *pipe_config) 9382 { 9383 struct drm_device *dev = crtc->base.dev; 9384 struct drm_i915_private *dev_priv = to_i915(dev); 9385 enum pipe pipe = crtc->pipe; 9386 enum dpio_channel port = vlv_pipe_to_channel(pipe); 9387 struct dpll clock; 9388 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 9389 int refclk = 100000; 9390 9391 /* In case of DSI, DPLL will not be used */ 9392 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 9393 return; 9394 9395 vlv_dpio_get(dev_priv); 9396 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 9397 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 9398 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 9399 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 9400 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 9401 vlv_dpio_put(dev_priv); 9402 9403 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 9404 clock.m2 = (pll_dw0 & 0xff) << 22; 9405 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 9406 clock.m2 |= pll_dw2 & 0x3fffff; 9407 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 9408 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 9409 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 9410 9411 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 9412 } 9413 9414 static enum intel_output_format 9415 bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 9416 { 9417 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9418 u32 tmp; 9419 9420 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 9421 9422 if (tmp & PIPEMISC_YUV420_ENABLE) { 9423 /* We support 4:2:0 in full blend mode only */ 9424 drm_WARN_ON(&dev_priv->drm, 9425 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 9426 9427 return INTEL_OUTPUT_FORMAT_YCBCR420; 9428 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 9429 return INTEL_OUTPUT_FORMAT_YCBCR444; 9430 } else { 9431 return INTEL_OUTPUT_FORMAT_RGB; 9432 } 9433 } 9434 9435 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 9436 { 9437 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9438 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9439 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9440 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 9441 u32 tmp; 9442 9443 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 9444 9445 if (tmp & DISPPLANE_GAMMA_ENABLE) 9446 crtc_state->gamma_enable = true; 9447 9448 if (!HAS_GMCH(dev_priv) && 9449 tmp & DISPPLANE_PIPE_CSC_ENABLE) 9450 crtc_state->csc_enable = true; 9451 } 9452 9453 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 9454 struct intel_crtc_state *pipe_config) 9455 { 9456 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9457 enum intel_display_power_domain power_domain; 9458 intel_wakeref_t wakeref; 9459 u32 tmp; 9460 bool ret; 9461 9462 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9463 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 9464 if (!wakeref) 9465 return false; 9466 9467 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 9468 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9469 pipe_config->shared_dpll = NULL; 9470 9471 ret = false; 9472 9473 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 9474 if (!(tmp & PIPECONF_ENABLE)) 9475 goto out; 9476 9477 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 9478 IS_CHERRYVIEW(dev_priv)) { 9479 switch (tmp & PIPECONF_BPC_MASK) { 9480 case PIPECONF_6BPC: 9481 pipe_config->pipe_bpp = 18; 9482 break; 9483 case PIPECONF_8BPC: 9484 pipe_config->pipe_bpp = 24; 9485 break; 9486 case PIPECONF_10BPC: 9487 pipe_config->pipe_bpp = 30; 9488 break; 9489 default: 9490 break; 9491 } 9492 } 9493 9494 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 9495 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 9496 pipe_config->limited_color_range = true; 9497 9498 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> 9499 PIPECONF_GAMMA_MODE_SHIFT; 9500 9501 if (IS_CHERRYVIEW(dev_priv)) 9502 pipe_config->cgm_mode = intel_de_read(dev_priv, 9503 CGM_PIPE_MODE(crtc->pipe)); 9504 9505 i9xx_get_pipe_color_config(pipe_config); 9506 intel_color_get_config(pipe_config); 9507 9508 if (INTEL_GEN(dev_priv) < 4) 9509 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 9510 9511 intel_get_pipe_timings(crtc, pipe_config); 9512 intel_get_pipe_src_size(crtc, pipe_config); 9513 9514 i9xx_get_pfit_config(pipe_config); 9515 9516 if (INTEL_GEN(dev_priv) >= 4) { 9517 /* No way to read it out on pipes B and C */ 9518 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 9519 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 9520 else 9521 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); 9522 pipe_config->pixel_multiplier = 9523 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 9524 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 9525 pipe_config->dpll_hw_state.dpll_md = tmp; 9526 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 9527 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 9528 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); 9529 pipe_config->pixel_multiplier = 9530 ((tmp & SDVO_MULTIPLIER_MASK) 9531 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 9532 } else { 9533 /* Note that on i915G/GM the pixel multiplier is in the sdvo 9534 * port and will be fixed up in the encoder->get_config 9535 * function. */ 9536 pipe_config->pixel_multiplier = 1; 9537 } 9538 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, 9539 DPLL(crtc->pipe)); 9540 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 9541 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, 9542 FP0(crtc->pipe)); 9543 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, 9544 FP1(crtc->pipe)); 9545 } else { 9546 /* Mask out read-only status bits. */ 9547 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 9548 DPLL_PORTC_READY_MASK | 9549 DPLL_PORTB_READY_MASK); 9550 } 9551 9552 if (IS_CHERRYVIEW(dev_priv)) 9553 chv_crtc_clock_get(crtc, pipe_config); 9554 else if (IS_VALLEYVIEW(dev_priv)) 9555 vlv_crtc_clock_get(crtc, pipe_config); 9556 else 9557 i9xx_crtc_clock_get(crtc, pipe_config); 9558 9559 /* 9560 * Normally the dotclock is filled in by the encoder .get_config() 9561 * but in case the pipe is enabled w/o any ports we need a sane 9562 * default. 9563 */ 9564 pipe_config->hw.adjusted_mode.crtc_clock = 9565 pipe_config->port_clock / pipe_config->pixel_multiplier; 9566 9567 ret = true; 9568 9569 out: 9570 intel_display_power_put(dev_priv, power_domain, wakeref); 9571 9572 return ret; 9573 } 9574 9575 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) 9576 { 9577 struct intel_encoder *encoder; 9578 int i; 9579 u32 val, final; 9580 bool has_lvds = false; 9581 bool has_cpu_edp = false; 9582 bool has_panel = false; 9583 bool has_ck505 = false; 9584 bool can_ssc = false; 9585 bool using_ssc_source = false; 9586 9587 /* We need to take the global config into account */ 9588 for_each_intel_encoder(&dev_priv->drm, encoder) { 9589 switch (encoder->type) { 9590 case INTEL_OUTPUT_LVDS: 9591 has_panel = true; 9592 has_lvds = true; 9593 break; 9594 case INTEL_OUTPUT_EDP: 9595 has_panel = true; 9596 if (encoder->port == PORT_A) 9597 has_cpu_edp = true; 9598 break; 9599 default: 9600 break; 9601 } 9602 } 9603 9604 if (HAS_PCH_IBX(dev_priv)) { 9605 has_ck505 = dev_priv->vbt.display_clock_mode; 9606 can_ssc = has_ck505; 9607 } else { 9608 has_ck505 = false; 9609 can_ssc = true; 9610 } 9611 9612 /* Check if any DPLLs are using the SSC source */ 9613 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 9614 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); 9615 9616 if (!(temp & DPLL_VCO_ENABLE)) 9617 continue; 9618 9619 if ((temp & PLL_REF_INPUT_MASK) == 9620 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 9621 using_ssc_source = true; 9622 break; 9623 } 9624 } 9625 9626 drm_dbg_kms(&dev_priv->drm, 9627 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 9628 has_panel, has_lvds, has_ck505, using_ssc_source); 9629 9630 /* Ironlake: try to setup display ref clock before DPLL 9631 * enabling. This is only under driver's control after 9632 * PCH B stepping, previous chipset stepping should be 9633 * ignoring this setting. 9634 */ 9635 val = intel_de_read(dev_priv, PCH_DREF_CONTROL); 9636 9637 /* As we must carefully and slowly disable/enable each source in turn, 9638 * compute the final state we want first and check if we need to 9639 * make any changes at all. 9640 */ 9641 final = val; 9642 final &= ~DREF_NONSPREAD_SOURCE_MASK; 9643 if (has_ck505) 9644 final |= DREF_NONSPREAD_CK505_ENABLE; 9645 else 9646 final |= DREF_NONSPREAD_SOURCE_ENABLE; 9647 9648 final &= ~DREF_SSC_SOURCE_MASK; 9649 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9650 final &= ~DREF_SSC1_ENABLE; 9651 9652 if (has_panel) { 9653 final |= DREF_SSC_SOURCE_ENABLE; 9654 9655 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9656 final |= DREF_SSC1_ENABLE; 9657 9658 if (has_cpu_edp) { 9659 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9660 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9661 else 9662 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9663 } else 9664 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9665 } else if (using_ssc_source) { 9666 final |= DREF_SSC_SOURCE_ENABLE; 9667 final |= DREF_SSC1_ENABLE; 9668 } 9669 9670 if (final == val) 9671 return; 9672 9673 /* Always enable nonspread source */ 9674 val &= ~DREF_NONSPREAD_SOURCE_MASK; 9675 9676 if (has_ck505) 9677 val |= DREF_NONSPREAD_CK505_ENABLE; 9678 else 9679 val |= DREF_NONSPREAD_SOURCE_ENABLE; 9680 9681 if (has_panel) { 9682 val &= ~DREF_SSC_SOURCE_MASK; 9683 val |= DREF_SSC_SOURCE_ENABLE; 9684 9685 /* SSC must be turned on before enabling the CPU output */ 9686 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9687 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); 9688 val |= DREF_SSC1_ENABLE; 9689 } else 9690 val &= ~DREF_SSC1_ENABLE; 9691 9692 /* Get SSC going before enabling the outputs */ 9693 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9694 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9695 udelay(200); 9696 9697 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9698 9699 /* Enable CPU source on CPU attached eDP */ 9700 if (has_cpu_edp) { 9701 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9702 drm_dbg_kms(&dev_priv->drm, 9703 "Using SSC on eDP\n"); 9704 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9705 } else 9706 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9707 } else 9708 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9709 9710 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9711 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9712 udelay(200); 9713 } else { 9714 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); 9715 9716 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9717 9718 /* Turn off CPU output */ 9719 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9720 9721 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9722 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9723 udelay(200); 9724 9725 if (!using_ssc_source) { 9726 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); 9727 9728 /* Turn off the SSC source */ 9729 val &= ~DREF_SSC_SOURCE_MASK; 9730 val |= DREF_SSC_SOURCE_DISABLE; 9731 9732 /* Turn off SSC1 */ 9733 val &= ~DREF_SSC1_ENABLE; 9734 9735 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9736 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9737 udelay(200); 9738 } 9739 } 9740 9741 BUG_ON(val != final); 9742 } 9743 9744 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 9745 { 9746 u32 tmp; 9747 9748 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 9749 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 9750 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 9751 9752 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & 9753 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 9754 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); 9755 9756 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 9757 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 9758 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 9759 9760 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & 9761 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 9762 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); 9763 } 9764 9765 /* WaMPhyProgramming:hsw */ 9766 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 9767 { 9768 u32 tmp; 9769 9770 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 9771 tmp &= ~(0xFF << 24); 9772 tmp |= (0x12 << 24); 9773 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 9774 9775 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 9776 tmp |= (1 << 11); 9777 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 9778 9779 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 9780 tmp |= (1 << 11); 9781 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 9782 9783 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 9784 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9785 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 9786 9787 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 9788 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9789 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 9790 9791 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 9792 tmp &= ~(7 << 13); 9793 tmp |= (5 << 13); 9794 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 9795 9796 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 9797 tmp &= ~(7 << 13); 9798 tmp |= (5 << 13); 9799 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 9800 9801 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 9802 tmp &= ~0xFF; 9803 tmp |= 0x1C; 9804 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 9805 9806 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 9807 tmp &= ~0xFF; 9808 tmp |= 0x1C; 9809 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 9810 9811 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 9812 tmp &= ~(0xFF << 16); 9813 tmp |= (0x1C << 16); 9814 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 9815 9816 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 9817 tmp &= ~(0xFF << 16); 9818 tmp |= (0x1C << 16); 9819 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 9820 9821 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 9822 tmp |= (1 << 27); 9823 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 9824 9825 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 9826 tmp |= (1 << 27); 9827 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 9828 9829 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 9830 tmp &= ~(0xF << 28); 9831 tmp |= (4 << 28); 9832 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 9833 9834 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 9835 tmp &= ~(0xF << 28); 9836 tmp |= (4 << 28); 9837 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 9838 } 9839 9840 /* Implements 3 different sequences from BSpec chapter "Display iCLK 9841 * Programming" based on the parameters passed: 9842 * - Sequence to enable CLKOUT_DP 9843 * - Sequence to enable CLKOUT_DP without spread 9844 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 9845 */ 9846 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 9847 bool with_spread, bool with_fdi) 9848 { 9849 u32 reg, tmp; 9850 9851 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, 9852 "FDI requires downspread\n")) 9853 with_spread = true; 9854 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && 9855 with_fdi, "LP PCH doesn't have FDI\n")) 9856 with_fdi = false; 9857 9858 mutex_lock(&dev_priv->sb_lock); 9859 9860 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9861 tmp &= ~SBI_SSCCTL_DISABLE; 9862 tmp |= SBI_SSCCTL_PATHALT; 9863 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9864 9865 udelay(24); 9866 9867 if (with_spread) { 9868 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9869 tmp &= ~SBI_SSCCTL_PATHALT; 9870 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9871 9872 if (with_fdi) { 9873 lpt_reset_fdi_mphy(dev_priv); 9874 lpt_program_fdi_mphy(dev_priv); 9875 } 9876 } 9877 9878 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9879 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9880 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9881 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9882 9883 mutex_unlock(&dev_priv->sb_lock); 9884 } 9885 9886 /* Sequence to disable CLKOUT_DP */ 9887 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 9888 { 9889 u32 reg, tmp; 9890 9891 mutex_lock(&dev_priv->sb_lock); 9892 9893 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9894 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9895 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9896 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9897 9898 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9899 if (!(tmp & SBI_SSCCTL_DISABLE)) { 9900 if (!(tmp & SBI_SSCCTL_PATHALT)) { 9901 tmp |= SBI_SSCCTL_PATHALT; 9902 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9903 udelay(32); 9904 } 9905 tmp |= SBI_SSCCTL_DISABLE; 9906 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9907 } 9908 9909 mutex_unlock(&dev_priv->sb_lock); 9910 } 9911 9912 #define BEND_IDX(steps) ((50 + (steps)) / 5) 9913 9914 static const u16 sscdivintphase[] = { 9915 [BEND_IDX( 50)] = 0x3B23, 9916 [BEND_IDX( 45)] = 0x3B23, 9917 [BEND_IDX( 40)] = 0x3C23, 9918 [BEND_IDX( 35)] = 0x3C23, 9919 [BEND_IDX( 30)] = 0x3D23, 9920 [BEND_IDX( 25)] = 0x3D23, 9921 [BEND_IDX( 20)] = 0x3E23, 9922 [BEND_IDX( 15)] = 0x3E23, 9923 [BEND_IDX( 10)] = 0x3F23, 9924 [BEND_IDX( 5)] = 0x3F23, 9925 [BEND_IDX( 0)] = 0x0025, 9926 [BEND_IDX( -5)] = 0x0025, 9927 [BEND_IDX(-10)] = 0x0125, 9928 [BEND_IDX(-15)] = 0x0125, 9929 [BEND_IDX(-20)] = 0x0225, 9930 [BEND_IDX(-25)] = 0x0225, 9931 [BEND_IDX(-30)] = 0x0325, 9932 [BEND_IDX(-35)] = 0x0325, 9933 [BEND_IDX(-40)] = 0x0425, 9934 [BEND_IDX(-45)] = 0x0425, 9935 [BEND_IDX(-50)] = 0x0525, 9936 }; 9937 9938 /* 9939 * Bend CLKOUT_DP 9940 * steps -50 to 50 inclusive, in steps of 5 9941 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 9942 * change in clock period = -(steps / 10) * 5.787 ps 9943 */ 9944 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 9945 { 9946 u32 tmp; 9947 int idx = BEND_IDX(steps); 9948 9949 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) 9950 return; 9951 9952 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) 9953 return; 9954 9955 mutex_lock(&dev_priv->sb_lock); 9956 9957 if (steps % 10 != 0) 9958 tmp = 0xAAAAAAAB; 9959 else 9960 tmp = 0x00000000; 9961 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 9962 9963 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 9964 tmp &= 0xffff0000; 9965 tmp |= sscdivintphase[idx]; 9966 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 9967 9968 mutex_unlock(&dev_priv->sb_lock); 9969 } 9970 9971 #undef BEND_IDX 9972 9973 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 9974 { 9975 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 9976 u32 ctl = intel_de_read(dev_priv, SPLL_CTL); 9977 9978 if ((ctl & SPLL_PLL_ENABLE) == 0) 9979 return false; 9980 9981 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 9982 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9983 return true; 9984 9985 if (IS_BROADWELL(dev_priv) && 9986 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 9987 return true; 9988 9989 return false; 9990 } 9991 9992 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 9993 enum intel_dpll_id id) 9994 { 9995 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 9996 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); 9997 9998 if ((ctl & WRPLL_PLL_ENABLE) == 0) 9999 return false; 10000 10001 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 10002 return true; 10003 10004 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 10005 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 10006 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 10007 return true; 10008 10009 return false; 10010 } 10011 10012 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 10013 { 10014 struct intel_encoder *encoder; 10015 bool has_fdi = false; 10016 10017 for_each_intel_encoder(&dev_priv->drm, encoder) { 10018 switch (encoder->type) { 10019 case INTEL_OUTPUT_ANALOG: 10020 has_fdi = true; 10021 break; 10022 default: 10023 break; 10024 } 10025 } 10026 10027 /* 10028 * The BIOS may have decided to use the PCH SSC 10029 * reference so we must not disable it until the 10030 * relevant PLLs have stopped relying on it. We'll 10031 * just leave the PCH SSC reference enabled in case 10032 * any active PLL is using it. It will get disabled 10033 * after runtime suspend if we don't have FDI. 10034 * 10035 * TODO: Move the whole reference clock handling 10036 * to the modeset sequence proper so that we can 10037 * actually enable/disable/reconfigure these things 10038 * safely. To do that we need to introduce a real 10039 * clock hierarchy. That would also allow us to do 10040 * clock bending finally. 10041 */ 10042 dev_priv->pch_ssc_use = 0; 10043 10044 if (spll_uses_pch_ssc(dev_priv)) { 10045 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); 10046 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 10047 } 10048 10049 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 10050 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); 10051 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 10052 } 10053 10054 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 10055 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); 10056 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 10057 } 10058 10059 if (dev_priv->pch_ssc_use) 10060 return; 10061 10062 if (has_fdi) { 10063 lpt_bend_clkout_dp(dev_priv, 0); 10064 lpt_enable_clkout_dp(dev_priv, true, true); 10065 } else { 10066 lpt_disable_clkout_dp(dev_priv); 10067 } 10068 } 10069 10070 /* 10071 * Initialize reference clocks when the driver loads 10072 */ 10073 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 10074 { 10075 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 10076 ilk_init_pch_refclk(dev_priv); 10077 else if (HAS_PCH_LPT(dev_priv)) 10078 lpt_init_pch_refclk(dev_priv); 10079 } 10080 10081 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 10082 { 10083 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10084 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10085 enum pipe pipe = crtc->pipe; 10086 u32 val; 10087 10088 val = 0; 10089 10090 switch (crtc_state->pipe_bpp) { 10091 case 18: 10092 val |= PIPECONF_6BPC; 10093 break; 10094 case 24: 10095 val |= PIPECONF_8BPC; 10096 break; 10097 case 30: 10098 val |= PIPECONF_10BPC; 10099 break; 10100 case 36: 10101 val |= PIPECONF_12BPC; 10102 break; 10103 default: 10104 /* Case prevented by intel_choose_pipe_bpp_dither. */ 10105 BUG(); 10106 } 10107 10108 if (crtc_state->dither) 10109 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 10110 10111 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 10112 val |= PIPECONF_INTERLACED_ILK; 10113 else 10114 val |= PIPECONF_PROGRESSIVE; 10115 10116 /* 10117 * This would end up with an odd purple hue over 10118 * the entire display. Make sure we don't do it. 10119 */ 10120 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 10121 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 10122 10123 if (crtc_state->limited_color_range && 10124 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 10125 val |= PIPECONF_COLOR_RANGE_SELECT; 10126 10127 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 10128 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 10129 10130 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 10131 10132 val |= PIPECONF_FRAME_START_DELAY(0); 10133 10134 intel_de_write(dev_priv, PIPECONF(pipe), val); 10135 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 10136 } 10137 10138 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state) 10139 { 10140 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10141 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10142 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 10143 u32 val = 0; 10144 10145 if (IS_HASWELL(dev_priv) && crtc_state->dither) 10146 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 10147 10148 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 10149 val |= PIPECONF_INTERLACED_ILK; 10150 else 10151 val |= PIPECONF_PROGRESSIVE; 10152 10153 if (IS_HASWELL(dev_priv) && 10154 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 10155 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 10156 10157 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); 10158 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder)); 10159 } 10160 10161 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 10162 { 10163 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10164 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10165 u32 val = 0; 10166 10167 switch (crtc_state->pipe_bpp) { 10168 case 18: 10169 val |= PIPEMISC_DITHER_6_BPC; 10170 break; 10171 case 24: 10172 val |= PIPEMISC_DITHER_8_BPC; 10173 break; 10174 case 30: 10175 val |= PIPEMISC_DITHER_10_BPC; 10176 break; 10177 case 36: 10178 val |= PIPEMISC_DITHER_12_BPC; 10179 break; 10180 default: 10181 MISSING_CASE(crtc_state->pipe_bpp); 10182 break; 10183 } 10184 10185 if (crtc_state->dither) 10186 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 10187 10188 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 10189 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 10190 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 10191 10192 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 10193 val |= PIPEMISC_YUV420_ENABLE | 10194 PIPEMISC_YUV420_MODE_FULL_BLEND; 10195 10196 if (INTEL_GEN(dev_priv) >= 11 && 10197 (crtc_state->active_planes & ~(icl_hdr_plane_mask() | 10198 BIT(PLANE_CURSOR))) == 0) 10199 val |= PIPEMISC_HDR_MODE_PRECISION; 10200 10201 if (INTEL_GEN(dev_priv) >= 12) 10202 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC; 10203 10204 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val); 10205 } 10206 10207 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 10208 { 10209 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10210 u32 tmp; 10211 10212 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 10213 10214 switch (tmp & PIPEMISC_DITHER_BPC_MASK) { 10215 case PIPEMISC_DITHER_6_BPC: 10216 return 18; 10217 case PIPEMISC_DITHER_8_BPC: 10218 return 24; 10219 case PIPEMISC_DITHER_10_BPC: 10220 return 30; 10221 case PIPEMISC_DITHER_12_BPC: 10222 return 36; 10223 default: 10224 MISSING_CASE(tmp); 10225 return 0; 10226 } 10227 } 10228 10229 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 10230 { 10231 /* 10232 * Account for spread spectrum to avoid 10233 * oversubscribing the link. Max center spread 10234 * is 2.5%; use 5% for safety's sake. 10235 */ 10236 u32 bps = target_clock * bpp * 21 / 20; 10237 return DIV_ROUND_UP(bps, link_bw * 8); 10238 } 10239 10240 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor) 10241 { 10242 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 10243 } 10244 10245 static void ilk_compute_dpll(struct intel_crtc *crtc, 10246 struct intel_crtc_state *crtc_state, 10247 struct dpll *reduced_clock) 10248 { 10249 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10250 u32 dpll, fp, fp2; 10251 int factor; 10252 10253 /* Enable autotuning of the PLL clock (if permissible) */ 10254 factor = 21; 10255 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 10256 if ((intel_panel_use_ssc(dev_priv) && 10257 dev_priv->vbt.lvds_ssc_freq == 100000) || 10258 (HAS_PCH_IBX(dev_priv) && 10259 intel_is_dual_link_lvds(dev_priv))) 10260 factor = 25; 10261 } else if (crtc_state->sdvo_tv_clock) { 10262 factor = 20; 10263 } 10264 10265 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 10266 10267 if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor)) 10268 fp |= FP_CB_TUNE; 10269 10270 if (reduced_clock) { 10271 fp2 = i9xx_dpll_compute_fp(reduced_clock); 10272 10273 if (reduced_clock->m < factor * reduced_clock->n) 10274 fp2 |= FP_CB_TUNE; 10275 } else { 10276 fp2 = fp; 10277 } 10278 10279 dpll = 0; 10280 10281 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 10282 dpll |= DPLLB_MODE_LVDS; 10283 else 10284 dpll |= DPLLB_MODE_DAC_SERIAL; 10285 10286 dpll |= (crtc_state->pixel_multiplier - 1) 10287 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 10288 10289 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 10290 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 10291 dpll |= DPLL_SDVO_HIGH_SPEED; 10292 10293 if (intel_crtc_has_dp_encoder(crtc_state)) 10294 dpll |= DPLL_SDVO_HIGH_SPEED; 10295 10296 /* 10297 * The high speed IO clock is only really required for 10298 * SDVO/HDMI/DP, but we also enable it for CRT to make it 10299 * possible to share the DPLL between CRT and HDMI. Enabling 10300 * the clock needlessly does no real harm, except use up a 10301 * bit of power potentially. 10302 * 10303 * We'll limit this to IVB with 3 pipes, since it has only two 10304 * DPLLs and so DPLL sharing is the only way to get three pipes 10305 * driving PCH ports at the same time. On SNB we could do this, 10306 * and potentially avoid enabling the second DPLL, but it's not 10307 * clear if it''s a win or loss power wise. No point in doing 10308 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 10309 */ 10310 if (INTEL_NUM_PIPES(dev_priv) == 3 && 10311 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 10312 dpll |= DPLL_SDVO_HIGH_SPEED; 10313 10314 /* compute bitmask from p1 value */ 10315 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 10316 /* also FPA1 */ 10317 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 10318 10319 switch (crtc_state->dpll.p2) { 10320 case 5: 10321 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 10322 break; 10323 case 7: 10324 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 10325 break; 10326 case 10: 10327 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 10328 break; 10329 case 14: 10330 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 10331 break; 10332 } 10333 10334 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 10335 intel_panel_use_ssc(dev_priv)) 10336 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 10337 else 10338 dpll |= PLL_REF_INPUT_DREFCLK; 10339 10340 dpll |= DPLL_VCO_ENABLE; 10341 10342 crtc_state->dpll_hw_state.dpll = dpll; 10343 crtc_state->dpll_hw_state.fp0 = fp; 10344 crtc_state->dpll_hw_state.fp1 = fp2; 10345 } 10346 10347 static int ilk_crtc_compute_clock(struct intel_crtc *crtc, 10348 struct intel_crtc_state *crtc_state) 10349 { 10350 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10351 struct intel_atomic_state *state = 10352 to_intel_atomic_state(crtc_state->uapi.state); 10353 const struct intel_limit *limit; 10354 int refclk = 120000; 10355 10356 memset(&crtc_state->dpll_hw_state, 0, 10357 sizeof(crtc_state->dpll_hw_state)); 10358 10359 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 10360 if (!crtc_state->has_pch_encoder) 10361 return 0; 10362 10363 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 10364 if (intel_panel_use_ssc(dev_priv)) { 10365 drm_dbg_kms(&dev_priv->drm, 10366 "using SSC reference clock of %d kHz\n", 10367 dev_priv->vbt.lvds_ssc_freq); 10368 refclk = dev_priv->vbt.lvds_ssc_freq; 10369 } 10370 10371 if (intel_is_dual_link_lvds(dev_priv)) { 10372 if (refclk == 100000) 10373 limit = &ilk_limits_dual_lvds_100m; 10374 else 10375 limit = &ilk_limits_dual_lvds; 10376 } else { 10377 if (refclk == 100000) 10378 limit = &ilk_limits_single_lvds_100m; 10379 else 10380 limit = &ilk_limits_single_lvds; 10381 } 10382 } else { 10383 limit = &ilk_limits_dac; 10384 } 10385 10386 if (!crtc_state->clock_set && 10387 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 10388 refclk, NULL, &crtc_state->dpll)) { 10389 drm_err(&dev_priv->drm, 10390 "Couldn't find PLL settings for mode!\n"); 10391 return -EINVAL; 10392 } 10393 10394 ilk_compute_dpll(crtc, crtc_state, NULL); 10395 10396 if (!intel_reserve_shared_dplls(state, crtc, NULL)) { 10397 drm_dbg_kms(&dev_priv->drm, 10398 "failed to find PLL for pipe %c\n", 10399 pipe_name(crtc->pipe)); 10400 return -EINVAL; 10401 } 10402 10403 return 0; 10404 } 10405 10406 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 10407 struct intel_link_m_n *m_n) 10408 { 10409 struct drm_device *dev = crtc->base.dev; 10410 struct drm_i915_private *dev_priv = to_i915(dev); 10411 enum pipe pipe = crtc->pipe; 10412 10413 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe)); 10414 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe)); 10415 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) 10416 & ~TU_SIZE_MASK; 10417 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe)); 10418 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) 10419 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10420 } 10421 10422 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 10423 enum transcoder transcoder, 10424 struct intel_link_m_n *m_n, 10425 struct intel_link_m_n *m2_n2) 10426 { 10427 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10428 enum pipe pipe = crtc->pipe; 10429 10430 if (INTEL_GEN(dev_priv) >= 5) { 10431 m_n->link_m = intel_de_read(dev_priv, 10432 PIPE_LINK_M1(transcoder)); 10433 m_n->link_n = intel_de_read(dev_priv, 10434 PIPE_LINK_N1(transcoder)); 10435 m_n->gmch_m = intel_de_read(dev_priv, 10436 PIPE_DATA_M1(transcoder)) 10437 & ~TU_SIZE_MASK; 10438 m_n->gmch_n = intel_de_read(dev_priv, 10439 PIPE_DATA_N1(transcoder)); 10440 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder)) 10441 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10442 10443 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { 10444 m2_n2->link_m = intel_de_read(dev_priv, 10445 PIPE_LINK_M2(transcoder)); 10446 m2_n2->link_n = intel_de_read(dev_priv, 10447 PIPE_LINK_N2(transcoder)); 10448 m2_n2->gmch_m = intel_de_read(dev_priv, 10449 PIPE_DATA_M2(transcoder)) 10450 & ~TU_SIZE_MASK; 10451 m2_n2->gmch_n = intel_de_read(dev_priv, 10452 PIPE_DATA_N2(transcoder)); 10453 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder)) 10454 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10455 } 10456 } else { 10457 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe)); 10458 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe)); 10459 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) 10460 & ~TU_SIZE_MASK; 10461 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe)); 10462 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) 10463 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10464 } 10465 } 10466 10467 void intel_dp_get_m_n(struct intel_crtc *crtc, 10468 struct intel_crtc_state *pipe_config) 10469 { 10470 if (pipe_config->has_pch_encoder) 10471 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 10472 else 10473 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 10474 &pipe_config->dp_m_n, 10475 &pipe_config->dp_m2_n2); 10476 } 10477 10478 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, 10479 struct intel_crtc_state *pipe_config) 10480 { 10481 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 10482 &pipe_config->fdi_m_n, NULL); 10483 } 10484 10485 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, 10486 u32 pos, u32 size) 10487 { 10488 drm_rect_init(&crtc_state->pch_pfit.dst, 10489 pos >> 16, pos & 0xffff, 10490 size >> 16, size & 0xffff); 10491 } 10492 10493 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) 10494 { 10495 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10496 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10497 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 10498 int id = -1; 10499 int i; 10500 10501 /* find scaler attached to this pipe */ 10502 for (i = 0; i < crtc->num_scalers; i++) { 10503 u32 ctl, pos, size; 10504 10505 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); 10506 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN) 10507 continue; 10508 10509 id = i; 10510 crtc_state->pch_pfit.enabled = true; 10511 10512 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); 10513 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); 10514 10515 ilk_get_pfit_pos_size(crtc_state, pos, size); 10516 10517 scaler_state->scalers[i].in_use = true; 10518 break; 10519 } 10520 10521 scaler_state->scaler_id = id; 10522 if (id >= 0) 10523 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 10524 else 10525 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 10526 } 10527 10528 static void 10529 skl_get_initial_plane_config(struct intel_crtc *crtc, 10530 struct intel_initial_plane_config *plane_config) 10531 { 10532 struct drm_device *dev = crtc->base.dev; 10533 struct drm_i915_private *dev_priv = to_i915(dev); 10534 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 10535 enum plane_id plane_id = plane->id; 10536 enum pipe pipe; 10537 u32 val, base, offset, stride_mult, tiling, alpha; 10538 int fourcc, pixel_format; 10539 unsigned int aligned_height; 10540 struct drm_framebuffer *fb; 10541 struct intel_framebuffer *intel_fb; 10542 10543 if (!plane->get_hw_state(plane, &pipe)) 10544 return; 10545 10546 drm_WARN_ON(dev, pipe != crtc->pipe); 10547 10548 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10549 if (!intel_fb) { 10550 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); 10551 return; 10552 } 10553 10554 fb = &intel_fb->base; 10555 10556 fb->dev = dev; 10557 10558 val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id)); 10559 10560 if (INTEL_GEN(dev_priv) >= 11) 10561 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; 10562 else 10563 pixel_format = val & PLANE_CTL_FORMAT_MASK; 10564 10565 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 10566 alpha = intel_de_read(dev_priv, 10567 PLANE_COLOR_CTL(pipe, plane_id)); 10568 alpha &= PLANE_COLOR_ALPHA_MASK; 10569 } else { 10570 alpha = val & PLANE_CTL_ALPHA_MASK; 10571 } 10572 10573 fourcc = skl_format_to_fourcc(pixel_format, 10574 val & PLANE_CTL_ORDER_RGBX, alpha); 10575 fb->format = drm_format_info(fourcc); 10576 10577 tiling = val & PLANE_CTL_TILED_MASK; 10578 switch (tiling) { 10579 case PLANE_CTL_TILED_LINEAR: 10580 fb->modifier = DRM_FORMAT_MOD_LINEAR; 10581 break; 10582 case PLANE_CTL_TILED_X: 10583 plane_config->tiling = I915_TILING_X; 10584 fb->modifier = I915_FORMAT_MOD_X_TILED; 10585 break; 10586 case PLANE_CTL_TILED_Y: 10587 plane_config->tiling = I915_TILING_Y; 10588 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 10589 fb->modifier = INTEL_GEN(dev_priv) >= 12 ? 10590 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : 10591 I915_FORMAT_MOD_Y_TILED_CCS; 10592 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) 10593 fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 10594 else 10595 fb->modifier = I915_FORMAT_MOD_Y_TILED; 10596 break; 10597 case PLANE_CTL_TILED_YF: 10598 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 10599 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 10600 else 10601 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 10602 break; 10603 default: 10604 MISSING_CASE(tiling); 10605 goto error; 10606 } 10607 10608 /* 10609 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 10610 * while i915 HW rotation is clockwise, thats why this swapping. 10611 */ 10612 switch (val & PLANE_CTL_ROTATE_MASK) { 10613 case PLANE_CTL_ROTATE_0: 10614 plane_config->rotation = DRM_MODE_ROTATE_0; 10615 break; 10616 case PLANE_CTL_ROTATE_90: 10617 plane_config->rotation = DRM_MODE_ROTATE_270; 10618 break; 10619 case PLANE_CTL_ROTATE_180: 10620 plane_config->rotation = DRM_MODE_ROTATE_180; 10621 break; 10622 case PLANE_CTL_ROTATE_270: 10623 plane_config->rotation = DRM_MODE_ROTATE_90; 10624 break; 10625 } 10626 10627 if (INTEL_GEN(dev_priv) >= 10 && 10628 val & PLANE_CTL_FLIP_HORIZONTAL) 10629 plane_config->rotation |= DRM_MODE_REFLECT_X; 10630 10631 base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000; 10632 plane_config->base = base; 10633 10634 offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); 10635 10636 val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); 10637 fb->height = ((val >> 16) & 0xffff) + 1; 10638 fb->width = ((val >> 0) & 0xffff) + 1; 10639 10640 val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); 10641 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); 10642 fb->pitches[0] = (val & 0x3ff) * stride_mult; 10643 10644 aligned_height = intel_fb_align_height(fb, 0, fb->height); 10645 10646 plane_config->size = fb->pitches[0] * aligned_height; 10647 10648 drm_dbg_kms(&dev_priv->drm, 10649 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 10650 crtc->base.name, plane->base.name, fb->width, fb->height, 10651 fb->format->cpp[0] * 8, base, fb->pitches[0], 10652 plane_config->size); 10653 10654 plane_config->fb = intel_fb; 10655 return; 10656 10657 error: 10658 kfree(intel_fb); 10659 } 10660 10661 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 10662 { 10663 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10664 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10665 u32 ctl, pos, size; 10666 10667 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 10668 if ((ctl & PF_ENABLE) == 0) 10669 return; 10670 10671 crtc_state->pch_pfit.enabled = true; 10672 10673 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 10674 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 10675 10676 ilk_get_pfit_pos_size(crtc_state, pos, size); 10677 10678 /* 10679 * We currently do not free assignements of panel fitters on 10680 * ivb/hsw (since we don't use the higher upscaling modes which 10681 * differentiates them) so just WARN about this case for now. 10682 */ 10683 drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) && 10684 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); 10685 } 10686 10687 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 10688 struct intel_crtc_state *pipe_config) 10689 { 10690 struct drm_device *dev = crtc->base.dev; 10691 struct drm_i915_private *dev_priv = to_i915(dev); 10692 enum intel_display_power_domain power_domain; 10693 intel_wakeref_t wakeref; 10694 u32 tmp; 10695 bool ret; 10696 10697 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10698 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 10699 if (!wakeref) 10700 return false; 10701 10702 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10703 pipe_config->shared_dpll = NULL; 10704 10705 ret = false; 10706 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 10707 if (!(tmp & PIPECONF_ENABLE)) 10708 goto out; 10709 10710 switch (tmp & PIPECONF_BPC_MASK) { 10711 case PIPECONF_6BPC: 10712 pipe_config->pipe_bpp = 18; 10713 break; 10714 case PIPECONF_8BPC: 10715 pipe_config->pipe_bpp = 24; 10716 break; 10717 case PIPECONF_10BPC: 10718 pipe_config->pipe_bpp = 30; 10719 break; 10720 case PIPECONF_12BPC: 10721 pipe_config->pipe_bpp = 36; 10722 break; 10723 default: 10724 break; 10725 } 10726 10727 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 10728 pipe_config->limited_color_range = true; 10729 10730 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 10731 case PIPECONF_OUTPUT_COLORSPACE_YUV601: 10732 case PIPECONF_OUTPUT_COLORSPACE_YUV709: 10733 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10734 break; 10735 default: 10736 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10737 break; 10738 } 10739 10740 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> 10741 PIPECONF_GAMMA_MODE_SHIFT; 10742 10743 pipe_config->csc_mode = intel_de_read(dev_priv, 10744 PIPE_CSC_MODE(crtc->pipe)); 10745 10746 i9xx_get_pipe_color_config(pipe_config); 10747 intel_color_get_config(pipe_config); 10748 10749 if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 10750 struct intel_shared_dpll *pll; 10751 enum intel_dpll_id pll_id; 10752 10753 pipe_config->has_pch_encoder = true; 10754 10755 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe)); 10756 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10757 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10758 10759 ilk_get_fdi_m_n_config(crtc, pipe_config); 10760 10761 if (HAS_PCH_IBX(dev_priv)) { 10762 /* 10763 * The pipe->pch transcoder and pch transcoder->pll 10764 * mapping is fixed. 10765 */ 10766 pll_id = (enum intel_dpll_id) crtc->pipe; 10767 } else { 10768 tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); 10769 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 10770 pll_id = DPLL_ID_PCH_PLL_B; 10771 else 10772 pll_id= DPLL_ID_PCH_PLL_A; 10773 } 10774 10775 pipe_config->shared_dpll = 10776 intel_get_shared_dpll_by_id(dev_priv, pll_id); 10777 pll = pipe_config->shared_dpll; 10778 10779 drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll, 10780 &pipe_config->dpll_hw_state)); 10781 10782 tmp = pipe_config->dpll_hw_state.dpll; 10783 pipe_config->pixel_multiplier = 10784 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 10785 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 10786 10787 ilk_pch_clock_get(crtc, pipe_config); 10788 } else { 10789 pipe_config->pixel_multiplier = 1; 10790 } 10791 10792 intel_get_pipe_timings(crtc, pipe_config); 10793 intel_get_pipe_src_size(crtc, pipe_config); 10794 10795 ilk_get_pfit_config(pipe_config); 10796 10797 ret = true; 10798 10799 out: 10800 intel_display_power_put(dev_priv, power_domain, wakeref); 10801 10802 return ret; 10803 } 10804 10805 static int hsw_crtc_compute_clock(struct intel_crtc *crtc, 10806 struct intel_crtc_state *crtc_state) 10807 { 10808 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10809 struct intel_atomic_state *state = 10810 to_intel_atomic_state(crtc_state->uapi.state); 10811 10812 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || 10813 INTEL_GEN(dev_priv) >= 11) { 10814 struct intel_encoder *encoder = 10815 intel_get_crtc_new_encoder(state, crtc_state); 10816 10817 if (!intel_reserve_shared_dplls(state, crtc, encoder)) { 10818 drm_dbg_kms(&dev_priv->drm, 10819 "failed to find PLL for pipe %c\n", 10820 pipe_name(crtc->pipe)); 10821 return -EINVAL; 10822 } 10823 } 10824 10825 return 0; 10826 } 10827 10828 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10829 struct intel_crtc_state *pipe_config) 10830 { 10831 enum intel_dpll_id id; 10832 u32 temp; 10833 10834 temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 10835 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 10836 10837 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2)) 10838 return; 10839 10840 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10841 } 10842 10843 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10844 struct intel_crtc_state *pipe_config) 10845 { 10846 enum phy phy = intel_port_to_phy(dev_priv, port); 10847 enum icl_port_dpll_id port_dpll_id; 10848 enum intel_dpll_id id; 10849 u32 temp; 10850 10851 if (intel_phy_is_combo(dev_priv, phy)) { 10852 u32 mask, shift; 10853 10854 if (IS_ROCKETLAKE(dev_priv)) { 10855 mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 10856 shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 10857 } else { 10858 mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 10859 shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 10860 } 10861 10862 temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask; 10863 id = temp >> shift; 10864 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10865 } else if (intel_phy_is_tc(dev_priv, phy)) { 10866 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; 10867 10868 if (clk_sel == DDI_CLK_SEL_MG) { 10869 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 10870 port)); 10871 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 10872 } else { 10873 drm_WARN_ON(&dev_priv->drm, 10874 clk_sel < DDI_CLK_SEL_TBT_162); 10875 id = DPLL_ID_ICL_TBTPLL; 10876 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10877 } 10878 } else { 10879 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port); 10880 return; 10881 } 10882 10883 pipe_config->icl_port_dplls[port_dpll_id].pll = 10884 intel_get_shared_dpll_by_id(dev_priv, id); 10885 10886 icl_set_active_port_dpll(pipe_config, port_dpll_id); 10887 } 10888 10889 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 10890 enum port port, 10891 struct intel_crtc_state *pipe_config) 10892 { 10893 enum intel_dpll_id id; 10894 10895 switch (port) { 10896 case PORT_A: 10897 id = DPLL_ID_SKL_DPLL0; 10898 break; 10899 case PORT_B: 10900 id = DPLL_ID_SKL_DPLL1; 10901 break; 10902 case PORT_C: 10903 id = DPLL_ID_SKL_DPLL2; 10904 break; 10905 default: 10906 drm_err(&dev_priv->drm, "Incorrect port type\n"); 10907 return; 10908 } 10909 10910 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10911 } 10912 10913 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10914 struct intel_crtc_state *pipe_config) 10915 { 10916 enum intel_dpll_id id; 10917 u32 temp; 10918 10919 temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 10920 id = temp >> (port * 3 + 1); 10921 10922 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3)) 10923 return; 10924 10925 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10926 } 10927 10928 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10929 struct intel_crtc_state *pipe_config) 10930 { 10931 enum intel_dpll_id id; 10932 u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port)); 10933 10934 switch (ddi_pll_sel) { 10935 case PORT_CLK_SEL_WRPLL1: 10936 id = DPLL_ID_WRPLL1; 10937 break; 10938 case PORT_CLK_SEL_WRPLL2: 10939 id = DPLL_ID_WRPLL2; 10940 break; 10941 case PORT_CLK_SEL_SPLL: 10942 id = DPLL_ID_SPLL; 10943 break; 10944 case PORT_CLK_SEL_LCPLL_810: 10945 id = DPLL_ID_LCPLL_810; 10946 break; 10947 case PORT_CLK_SEL_LCPLL_1350: 10948 id = DPLL_ID_LCPLL_1350; 10949 break; 10950 case PORT_CLK_SEL_LCPLL_2700: 10951 id = DPLL_ID_LCPLL_2700; 10952 break; 10953 default: 10954 MISSING_CASE(ddi_pll_sel); 10955 fallthrough; 10956 case PORT_CLK_SEL_NONE: 10957 return; 10958 } 10959 10960 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10961 } 10962 10963 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10964 struct intel_crtc_state *pipe_config, 10965 u64 *power_domain_mask, 10966 intel_wakeref_t *wakerefs) 10967 { 10968 struct drm_device *dev = crtc->base.dev; 10969 struct drm_i915_private *dev_priv = to_i915(dev); 10970 enum intel_display_power_domain power_domain; 10971 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); 10972 unsigned long enabled_panel_transcoders = 0; 10973 enum transcoder panel_transcoder; 10974 intel_wakeref_t wf; 10975 u32 tmp; 10976 10977 if (INTEL_GEN(dev_priv) >= 11) 10978 panel_transcoder_mask |= 10979 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 10980 10981 /* 10982 * The pipe->transcoder mapping is fixed with the exception of the eDP 10983 * and DSI transcoders handled below. 10984 */ 10985 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10986 10987 /* 10988 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10989 * consistency and less surprising code; it's in always on power). 10990 */ 10991 for_each_cpu_transcoder_masked(dev_priv, panel_transcoder, 10992 panel_transcoder_mask) { 10993 bool force_thru = false; 10994 enum pipe trans_pipe; 10995 10996 tmp = intel_de_read(dev_priv, 10997 TRANS_DDI_FUNC_CTL(panel_transcoder)); 10998 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 10999 continue; 11000 11001 /* 11002 * Log all enabled ones, only use the first one. 11003 * 11004 * FIXME: This won't work for two separate DSI displays. 11005 */ 11006 enabled_panel_transcoders |= BIT(panel_transcoder); 11007 if (enabled_panel_transcoders != BIT(panel_transcoder)) 11008 continue; 11009 11010 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 11011 default: 11012 drm_WARN(dev, 1, 11013 "unknown pipe linked to transcoder %s\n", 11014 transcoder_name(panel_transcoder)); 11015 fallthrough; 11016 case TRANS_DDI_EDP_INPUT_A_ONOFF: 11017 force_thru = true; 11018 fallthrough; 11019 case TRANS_DDI_EDP_INPUT_A_ON: 11020 trans_pipe = PIPE_A; 11021 break; 11022 case TRANS_DDI_EDP_INPUT_B_ONOFF: 11023 trans_pipe = PIPE_B; 11024 break; 11025 case TRANS_DDI_EDP_INPUT_C_ONOFF: 11026 trans_pipe = PIPE_C; 11027 break; 11028 case TRANS_DDI_EDP_INPUT_D_ONOFF: 11029 trans_pipe = PIPE_D; 11030 break; 11031 } 11032 11033 if (trans_pipe == crtc->pipe) { 11034 pipe_config->cpu_transcoder = panel_transcoder; 11035 pipe_config->pch_pfit.force_thru = force_thru; 11036 } 11037 } 11038 11039 /* 11040 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 11041 */ 11042 drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 11043 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 11044 11045 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 11046 drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); 11047 11048 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11049 if (!wf) 11050 return false; 11051 11052 wakerefs[power_domain] = wf; 11053 *power_domain_mask |= BIT_ULL(power_domain); 11054 11055 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); 11056 11057 return tmp & PIPECONF_ENABLE; 11058 } 11059 11060 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 11061 struct intel_crtc_state *pipe_config, 11062 u64 *power_domain_mask, 11063 intel_wakeref_t *wakerefs) 11064 { 11065 struct drm_device *dev = crtc->base.dev; 11066 struct drm_i915_private *dev_priv = to_i915(dev); 11067 enum intel_display_power_domain power_domain; 11068 enum transcoder cpu_transcoder; 11069 intel_wakeref_t wf; 11070 enum port port; 11071 u32 tmp; 11072 11073 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 11074 if (port == PORT_A) 11075 cpu_transcoder = TRANSCODER_DSI_A; 11076 else 11077 cpu_transcoder = TRANSCODER_DSI_C; 11078 11079 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 11080 drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); 11081 11082 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11083 if (!wf) 11084 continue; 11085 11086 wakerefs[power_domain] = wf; 11087 *power_domain_mask |= BIT_ULL(power_domain); 11088 11089 /* 11090 * The PLL needs to be enabled with a valid divider 11091 * configuration, otherwise accessing DSI registers will hang 11092 * the machine. See BSpec North Display Engine 11093 * registers/MIPI[BXT]. We can break out here early, since we 11094 * need the same DSI PLL to be enabled for both DSI ports. 11095 */ 11096 if (!bxt_dsi_pll_is_enabled(dev_priv)) 11097 break; 11098 11099 /* XXX: this works for video mode only */ 11100 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); 11101 if (!(tmp & DPI_ENABLE)) 11102 continue; 11103 11104 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 11105 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 11106 continue; 11107 11108 pipe_config->cpu_transcoder = cpu_transcoder; 11109 break; 11110 } 11111 11112 return transcoder_is_dsi(pipe_config->cpu_transcoder); 11113 } 11114 11115 static void hsw_get_ddi_port_state(struct intel_crtc *crtc, 11116 struct intel_crtc_state *pipe_config) 11117 { 11118 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11119 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 11120 struct intel_shared_dpll *pll; 11121 enum port port; 11122 u32 tmp; 11123 11124 if (transcoder_is_dsi(cpu_transcoder)) { 11125 port = (cpu_transcoder == TRANSCODER_DSI_A) ? 11126 PORT_A : PORT_B; 11127 } else { 11128 tmp = intel_de_read(dev_priv, 11129 TRANS_DDI_FUNC_CTL(cpu_transcoder)); 11130 if (INTEL_GEN(dev_priv) >= 12) 11131 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 11132 else 11133 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 11134 } 11135 11136 if (INTEL_GEN(dev_priv) >= 11) 11137 icl_get_ddi_pll(dev_priv, port, pipe_config); 11138 else if (IS_CANNONLAKE(dev_priv)) 11139 cnl_get_ddi_pll(dev_priv, port, pipe_config); 11140 else if (IS_GEN9_BC(dev_priv)) 11141 skl_get_ddi_pll(dev_priv, port, pipe_config); 11142 else if (IS_GEN9_LP(dev_priv)) 11143 bxt_get_ddi_pll(dev_priv, port, pipe_config); 11144 else 11145 hsw_get_ddi_pll(dev_priv, port, pipe_config); 11146 11147 pll = pipe_config->shared_dpll; 11148 if (pll) { 11149 drm_WARN_ON(&dev_priv->drm, 11150 !pll->info->funcs->get_hw_state(dev_priv, pll, 11151 &pipe_config->dpll_hw_state)); 11152 } 11153 11154 /* 11155 * Haswell has only FDI/PCH transcoder A. It is which is connected to 11156 * DDI E. So just check whether this pipe is wired to DDI E and whether 11157 * the PCH transcoder is on. 11158 */ 11159 if (INTEL_GEN(dev_priv) < 9 && 11160 (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) { 11161 pipe_config->has_pch_encoder = true; 11162 11163 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 11164 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 11165 FDI_DP_PORT_WIDTH_SHIFT) + 1; 11166 11167 ilk_get_fdi_m_n_config(crtc, pipe_config); 11168 } 11169 } 11170 11171 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 11172 struct intel_crtc_state *pipe_config) 11173 { 11174 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11175 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; 11176 enum intel_display_power_domain power_domain; 11177 u64 power_domain_mask; 11178 bool active; 11179 u32 tmp; 11180 11181 pipe_config->master_transcoder = INVALID_TRANSCODER; 11182 11183 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 11184 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11185 if (!wf) 11186 return false; 11187 11188 wakerefs[power_domain] = wf; 11189 power_domain_mask = BIT_ULL(power_domain); 11190 11191 pipe_config->shared_dpll = NULL; 11192 11193 active = hsw_get_transcoder_state(crtc, pipe_config, 11194 &power_domain_mask, wakerefs); 11195 11196 if (IS_GEN9_LP(dev_priv) && 11197 bxt_get_dsi_transcoder_state(crtc, pipe_config, 11198 &power_domain_mask, wakerefs)) { 11199 drm_WARN_ON(&dev_priv->drm, active); 11200 active = true; 11201 } 11202 11203 if (!active) 11204 goto out; 11205 11206 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 11207 INTEL_GEN(dev_priv) >= 11) { 11208 hsw_get_ddi_port_state(crtc, pipe_config); 11209 intel_get_pipe_timings(crtc, pipe_config); 11210 } 11211 11212 intel_get_pipe_src_size(crtc, pipe_config); 11213 11214 if (IS_HASWELL(dev_priv)) { 11215 u32 tmp = intel_de_read(dev_priv, 11216 PIPECONF(pipe_config->cpu_transcoder)); 11217 11218 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 11219 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 11220 else 11221 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 11222 } else { 11223 pipe_config->output_format = 11224 bdw_get_pipemisc_output_format(crtc); 11225 11226 /* 11227 * Currently there is no interface defined to 11228 * check user preference between RGB/YCBCR444 11229 * or YCBCR420. So the only possible case for 11230 * YCBCR444 usage is driving YCBCR420 output 11231 * with LSPCON, when pipe is configured for 11232 * YCBCR444 output and LSPCON takes care of 11233 * downsampling it. 11234 */ 11235 pipe_config->lspcon_downsampling = 11236 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; 11237 } 11238 11239 pipe_config->gamma_mode = intel_de_read(dev_priv, 11240 GAMMA_MODE(crtc->pipe)); 11241 11242 pipe_config->csc_mode = intel_de_read(dev_priv, 11243 PIPE_CSC_MODE(crtc->pipe)); 11244 11245 if (INTEL_GEN(dev_priv) >= 9) { 11246 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe)); 11247 11248 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 11249 pipe_config->gamma_enable = true; 11250 11251 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 11252 pipe_config->csc_enable = true; 11253 } else { 11254 i9xx_get_pipe_color_config(pipe_config); 11255 } 11256 11257 intel_color_get_config(pipe_config); 11258 11259 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 11260 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 11261 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 11262 pipe_config->ips_linetime = 11263 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 11264 11265 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 11266 drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain)); 11267 11268 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11269 if (wf) { 11270 wakerefs[power_domain] = wf; 11271 power_domain_mask |= BIT_ULL(power_domain); 11272 11273 if (INTEL_GEN(dev_priv) >= 9) 11274 skl_get_pfit_config(pipe_config); 11275 else 11276 ilk_get_pfit_config(pipe_config); 11277 } 11278 11279 if (hsw_crtc_supports_ips(crtc)) { 11280 if (IS_HASWELL(dev_priv)) 11281 pipe_config->ips_enabled = intel_de_read(dev_priv, 11282 IPS_CTL) & IPS_ENABLE; 11283 else { 11284 /* 11285 * We cannot readout IPS state on broadwell, set to 11286 * true so we can set it to a defined state on first 11287 * commit. 11288 */ 11289 pipe_config->ips_enabled = true; 11290 } 11291 } 11292 11293 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 11294 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 11295 pipe_config->pixel_multiplier = 11296 intel_de_read(dev_priv, 11297 PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 11298 } else { 11299 pipe_config->pixel_multiplier = 1; 11300 } 11301 11302 out: 11303 for_each_power_domain(power_domain, power_domain_mask) 11304 intel_display_power_put(dev_priv, 11305 power_domain, wakerefs[power_domain]); 11306 11307 return active; 11308 } 11309 11310 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 11311 { 11312 struct drm_i915_private *dev_priv = 11313 to_i915(plane_state->uapi.plane->dev); 11314 const struct drm_framebuffer *fb = plane_state->hw.fb; 11315 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11316 u32 base; 11317 11318 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 11319 base = sg_dma_address(obj->mm.pages->sgl); 11320 else 11321 base = intel_plane_ggtt_offset(plane_state); 11322 11323 return base + plane_state->color_plane[0].offset; 11324 } 11325 11326 static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 11327 { 11328 int x = plane_state->uapi.dst.x1; 11329 int y = plane_state->uapi.dst.y1; 11330 u32 pos = 0; 11331 11332 if (x < 0) { 11333 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 11334 x = -x; 11335 } 11336 pos |= x << CURSOR_X_SHIFT; 11337 11338 if (y < 0) { 11339 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 11340 y = -y; 11341 } 11342 pos |= y << CURSOR_Y_SHIFT; 11343 11344 return pos; 11345 } 11346 11347 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 11348 { 11349 const struct drm_mode_config *config = 11350 &plane_state->uapi.plane->dev->mode_config; 11351 int width = drm_rect_width(&plane_state->uapi.dst); 11352 int height = drm_rect_height(&plane_state->uapi.dst); 11353 11354 return width > 0 && width <= config->cursor_width && 11355 height > 0 && height <= config->cursor_height; 11356 } 11357 11358 static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 11359 { 11360 struct drm_i915_private *dev_priv = 11361 to_i915(plane_state->uapi.plane->dev); 11362 unsigned int rotation = plane_state->hw.rotation; 11363 int src_x, src_y; 11364 u32 offset; 11365 int ret; 11366 11367 ret = intel_plane_compute_gtt(plane_state); 11368 if (ret) 11369 return ret; 11370 11371 if (!plane_state->uapi.visible) 11372 return 0; 11373 11374 src_x = plane_state->uapi.src.x1 >> 16; 11375 src_y = plane_state->uapi.src.y1 >> 16; 11376 11377 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 11378 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 11379 plane_state, 0); 11380 11381 if (src_x != 0 || src_y != 0) { 11382 drm_dbg_kms(&dev_priv->drm, 11383 "Arbitrary cursor panning not supported\n"); 11384 return -EINVAL; 11385 } 11386 11387 /* 11388 * Put the final coordinates back so that the src 11389 * coordinate checks will see the right values. 11390 */ 11391 drm_rect_translate_to(&plane_state->uapi.src, 11392 src_x << 16, src_y << 16); 11393 11394 /* ILK+ do this automagically in hardware */ 11395 if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { 11396 const struct drm_framebuffer *fb = plane_state->hw.fb; 11397 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 11398 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 11399 11400 offset += (src_h * src_w - 1) * fb->format->cpp[0]; 11401 } 11402 11403 plane_state->color_plane[0].offset = offset; 11404 plane_state->color_plane[0].x = src_x; 11405 plane_state->color_plane[0].y = src_y; 11406 11407 return 0; 11408 } 11409 11410 static int intel_check_cursor(struct intel_crtc_state *crtc_state, 11411 struct intel_plane_state *plane_state) 11412 { 11413 const struct drm_framebuffer *fb = plane_state->hw.fb; 11414 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 11415 int ret; 11416 11417 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 11418 drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n"); 11419 return -EINVAL; 11420 } 11421 11422 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, 11423 &crtc_state->uapi, 11424 DRM_PLANE_HELPER_NO_SCALING, 11425 DRM_PLANE_HELPER_NO_SCALING, 11426 true, true); 11427 if (ret) 11428 return ret; 11429 11430 /* Use the unclipped src/dst rectangles, which we program to hw */ 11431 plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi); 11432 plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi); 11433 11434 ret = intel_cursor_check_surface(plane_state); 11435 if (ret) 11436 return ret; 11437 11438 if (!plane_state->uapi.visible) 11439 return 0; 11440 11441 ret = intel_plane_check_src_coordinates(plane_state); 11442 if (ret) 11443 return ret; 11444 11445 return 0; 11446 } 11447 11448 static unsigned int 11449 i845_cursor_max_stride(struct intel_plane *plane, 11450 u32 pixel_format, u64 modifier, 11451 unsigned int rotation) 11452 { 11453 return 2048; 11454 } 11455 11456 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11457 { 11458 u32 cntl = 0; 11459 11460 if (crtc_state->gamma_enable) 11461 cntl |= CURSOR_GAMMA_ENABLE; 11462 11463 return cntl; 11464 } 11465 11466 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 11467 const struct intel_plane_state *plane_state) 11468 { 11469 return CURSOR_ENABLE | 11470 CURSOR_FORMAT_ARGB | 11471 CURSOR_STRIDE(plane_state->color_plane[0].stride); 11472 } 11473 11474 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 11475 { 11476 int width = drm_rect_width(&plane_state->uapi.dst); 11477 11478 /* 11479 * 845g/865g are only limited by the width of their cursors, 11480 * the height is arbitrary up to the precision of the register. 11481 */ 11482 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 11483 } 11484 11485 static int i845_check_cursor(struct intel_crtc_state *crtc_state, 11486 struct intel_plane_state *plane_state) 11487 { 11488 const struct drm_framebuffer *fb = plane_state->hw.fb; 11489 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 11490 int ret; 11491 11492 ret = intel_check_cursor(crtc_state, plane_state); 11493 if (ret) 11494 return ret; 11495 11496 /* if we want to turn off the cursor ignore width and height */ 11497 if (!fb) 11498 return 0; 11499 11500 /* Check for which cursor types we support */ 11501 if (!i845_cursor_size_ok(plane_state)) { 11502 drm_dbg_kms(&i915->drm, 11503 "Cursor dimension %dx%d not supported\n", 11504 drm_rect_width(&plane_state->uapi.dst), 11505 drm_rect_height(&plane_state->uapi.dst)); 11506 return -EINVAL; 11507 } 11508 11509 drm_WARN_ON(&i915->drm, plane_state->uapi.visible && 11510 plane_state->color_plane[0].stride != fb->pitches[0]); 11511 11512 switch (fb->pitches[0]) { 11513 case 256: 11514 case 512: 11515 case 1024: 11516 case 2048: 11517 break; 11518 default: 11519 drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n", 11520 fb->pitches[0]); 11521 return -EINVAL; 11522 } 11523 11524 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 11525 11526 return 0; 11527 } 11528 11529 static void i845_update_cursor(struct intel_plane *plane, 11530 const struct intel_crtc_state *crtc_state, 11531 const struct intel_plane_state *plane_state) 11532 { 11533 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11534 u32 cntl = 0, base = 0, pos = 0, size = 0; 11535 unsigned long irqflags; 11536 11537 if (plane_state && plane_state->uapi.visible) { 11538 unsigned int width = drm_rect_width(&plane_state->uapi.dst); 11539 unsigned int height = drm_rect_height(&plane_state->uapi.dst); 11540 11541 cntl = plane_state->ctl | 11542 i845_cursor_ctl_crtc(crtc_state); 11543 11544 size = (height << 12) | width; 11545 11546 base = intel_cursor_base(plane_state); 11547 pos = intel_cursor_position(plane_state); 11548 } 11549 11550 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11551 11552 /* On these chipsets we can only modify the base/size/stride 11553 * whilst the cursor is disabled. 11554 */ 11555 if (plane->cursor.base != base || 11556 plane->cursor.size != size || 11557 plane->cursor.cntl != cntl) { 11558 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); 11559 intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); 11560 intel_de_write_fw(dev_priv, CURSIZE, size); 11561 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); 11562 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); 11563 11564 plane->cursor.base = base; 11565 plane->cursor.size = size; 11566 plane->cursor.cntl = cntl; 11567 } else { 11568 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); 11569 } 11570 11571 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11572 } 11573 11574 static void i845_disable_cursor(struct intel_plane *plane, 11575 const struct intel_crtc_state *crtc_state) 11576 { 11577 i845_update_cursor(plane, crtc_state, NULL); 11578 } 11579 11580 static bool i845_cursor_get_hw_state(struct intel_plane *plane, 11581 enum pipe *pipe) 11582 { 11583 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11584 enum intel_display_power_domain power_domain; 11585 intel_wakeref_t wakeref; 11586 bool ret; 11587 11588 power_domain = POWER_DOMAIN_PIPE(PIPE_A); 11589 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11590 if (!wakeref) 11591 return false; 11592 11593 ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE; 11594 11595 *pipe = PIPE_A; 11596 11597 intel_display_power_put(dev_priv, power_domain, wakeref); 11598 11599 return ret; 11600 } 11601 11602 static unsigned int 11603 i9xx_cursor_max_stride(struct intel_plane *plane, 11604 u32 pixel_format, u64 modifier, 11605 unsigned int rotation) 11606 { 11607 return plane->base.dev->mode_config.cursor_width * 4; 11608 } 11609 11610 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11611 { 11612 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 11613 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11614 u32 cntl = 0; 11615 11616 if (INTEL_GEN(dev_priv) >= 11) 11617 return cntl; 11618 11619 if (crtc_state->gamma_enable) 11620 cntl = MCURSOR_GAMMA_ENABLE; 11621 11622 if (crtc_state->csc_enable) 11623 cntl |= MCURSOR_PIPE_CSC_ENABLE; 11624 11625 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11626 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 11627 11628 return cntl; 11629 } 11630 11631 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 11632 const struct intel_plane_state *plane_state) 11633 { 11634 struct drm_i915_private *dev_priv = 11635 to_i915(plane_state->uapi.plane->dev); 11636 u32 cntl = 0; 11637 11638 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 11639 cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 11640 11641 switch (drm_rect_width(&plane_state->uapi.dst)) { 11642 case 64: 11643 cntl |= MCURSOR_MODE_64_ARGB_AX; 11644 break; 11645 case 128: 11646 cntl |= MCURSOR_MODE_128_ARGB_AX; 11647 break; 11648 case 256: 11649 cntl |= MCURSOR_MODE_256_ARGB_AX; 11650 break; 11651 default: 11652 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); 11653 return 0; 11654 } 11655 11656 if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) 11657 cntl |= MCURSOR_ROTATE_180; 11658 11659 return cntl; 11660 } 11661 11662 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 11663 { 11664 struct drm_i915_private *dev_priv = 11665 to_i915(plane_state->uapi.plane->dev); 11666 int width = drm_rect_width(&plane_state->uapi.dst); 11667 int height = drm_rect_height(&plane_state->uapi.dst); 11668 11669 if (!intel_cursor_size_ok(plane_state)) 11670 return false; 11671 11672 /* Cursor width is limited to a few power-of-two sizes */ 11673 switch (width) { 11674 case 256: 11675 case 128: 11676 case 64: 11677 break; 11678 default: 11679 return false; 11680 } 11681 11682 /* 11683 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 11684 * height from 8 lines up to the cursor width, when the 11685 * cursor is not rotated. Everything else requires square 11686 * cursors. 11687 */ 11688 if (HAS_CUR_FBC(dev_priv) && 11689 plane_state->hw.rotation & DRM_MODE_ROTATE_0) { 11690 if (height < 8 || height > width) 11691 return false; 11692 } else { 11693 if (height != width) 11694 return false; 11695 } 11696 11697 return true; 11698 } 11699 11700 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 11701 struct intel_plane_state *plane_state) 11702 { 11703 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 11704 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11705 const struct drm_framebuffer *fb = plane_state->hw.fb; 11706 enum pipe pipe = plane->pipe; 11707 int ret; 11708 11709 ret = intel_check_cursor(crtc_state, plane_state); 11710 if (ret) 11711 return ret; 11712 11713 /* if we want to turn off the cursor ignore width and height */ 11714 if (!fb) 11715 return 0; 11716 11717 /* Check for which cursor types we support */ 11718 if (!i9xx_cursor_size_ok(plane_state)) { 11719 drm_dbg(&dev_priv->drm, 11720 "Cursor dimension %dx%d not supported\n", 11721 drm_rect_width(&plane_state->uapi.dst), 11722 drm_rect_height(&plane_state->uapi.dst)); 11723 return -EINVAL; 11724 } 11725 11726 drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && 11727 plane_state->color_plane[0].stride != fb->pitches[0]); 11728 11729 if (fb->pitches[0] != 11730 drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { 11731 drm_dbg_kms(&dev_priv->drm, 11732 "Invalid cursor stride (%u) (cursor width %d)\n", 11733 fb->pitches[0], 11734 drm_rect_width(&plane_state->uapi.dst)); 11735 return -EINVAL; 11736 } 11737 11738 /* 11739 * There's something wrong with the cursor on CHV pipe C. 11740 * If it straddles the left edge of the screen then 11741 * moving it away from the edge or disabling it often 11742 * results in a pipe underrun, and often that can lead to 11743 * dead pipe (constant underrun reported, and it scans 11744 * out just a solid color). To recover from that, the 11745 * display power well must be turned off and on again. 11746 * Refuse the put the cursor into that compromised position. 11747 */ 11748 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 11749 plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { 11750 drm_dbg_kms(&dev_priv->drm, 11751 "CHV cursor C not allowed to straddle the left screen edge\n"); 11752 return -EINVAL; 11753 } 11754 11755 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 11756 11757 return 0; 11758 } 11759 11760 static void i9xx_update_cursor(struct intel_plane *plane, 11761 const struct intel_crtc_state *crtc_state, 11762 const struct intel_plane_state *plane_state) 11763 { 11764 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11765 enum pipe pipe = plane->pipe; 11766 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 11767 unsigned long irqflags; 11768 11769 if (plane_state && plane_state->uapi.visible) { 11770 unsigned width = drm_rect_width(&plane_state->uapi.dst); 11771 unsigned height = drm_rect_height(&plane_state->uapi.dst); 11772 11773 cntl = plane_state->ctl | 11774 i9xx_cursor_ctl_crtc(crtc_state); 11775 11776 if (width != height) 11777 fbc_ctl = CUR_FBC_CTL_EN | (height - 1); 11778 11779 base = intel_cursor_base(plane_state); 11780 pos = intel_cursor_position(plane_state); 11781 } 11782 11783 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11784 11785 /* 11786 * On some platforms writing CURCNTR first will also 11787 * cause CURPOS to be armed by the CURBASE write. 11788 * Without the CURCNTR write the CURPOS write would 11789 * arm itself. Thus we always update CURCNTR before 11790 * CURPOS. 11791 * 11792 * On other platforms CURPOS always requires the 11793 * CURBASE write to arm the update. Additonally 11794 * a write to any of the cursor register will cancel 11795 * an already armed cursor update. Thus leaving out 11796 * the CURBASE write after CURPOS could lead to a 11797 * cursor that doesn't appear to move, or even change 11798 * shape. Thus we always write CURBASE. 11799 * 11800 * The other registers are armed by by the CURBASE write 11801 * except when the plane is getting enabled at which time 11802 * the CURCNTR write arms the update. 11803 */ 11804 11805 if (INTEL_GEN(dev_priv) >= 9) 11806 skl_write_cursor_wm(plane, crtc_state); 11807 11808 if (plane->cursor.base != base || 11809 plane->cursor.size != fbc_ctl || 11810 plane->cursor.cntl != cntl) { 11811 if (HAS_CUR_FBC(dev_priv)) 11812 intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe), 11813 fbc_ctl); 11814 intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl); 11815 intel_de_write_fw(dev_priv, CURPOS(pipe), pos); 11816 intel_de_write_fw(dev_priv, CURBASE(pipe), base); 11817 11818 plane->cursor.base = base; 11819 plane->cursor.size = fbc_ctl; 11820 plane->cursor.cntl = cntl; 11821 } else { 11822 intel_de_write_fw(dev_priv, CURPOS(pipe), pos); 11823 intel_de_write_fw(dev_priv, CURBASE(pipe), base); 11824 } 11825 11826 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11827 } 11828 11829 static void i9xx_disable_cursor(struct intel_plane *plane, 11830 const struct intel_crtc_state *crtc_state) 11831 { 11832 i9xx_update_cursor(plane, crtc_state, NULL); 11833 } 11834 11835 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 11836 enum pipe *pipe) 11837 { 11838 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11839 enum intel_display_power_domain power_domain; 11840 intel_wakeref_t wakeref; 11841 bool ret; 11842 u32 val; 11843 11844 /* 11845 * Not 100% correct for planes that can move between pipes, 11846 * but that's only the case for gen2-3 which don't have any 11847 * display power wells. 11848 */ 11849 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 11850 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11851 if (!wakeref) 11852 return false; 11853 11854 val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); 11855 11856 ret = val & MCURSOR_MODE; 11857 11858 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 11859 *pipe = plane->pipe; 11860 else 11861 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 11862 MCURSOR_PIPE_SELECT_SHIFT; 11863 11864 intel_display_power_put(dev_priv, power_domain, wakeref); 11865 11866 return ret; 11867 } 11868 11869 /* VESA 640x480x72Hz mode to set on the pipe */ 11870 static const struct drm_display_mode load_detect_mode = { 11871 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 11872 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 11873 }; 11874 11875 struct drm_framebuffer * 11876 intel_framebuffer_create(struct drm_i915_gem_object *obj, 11877 struct drm_mode_fb_cmd2 *mode_cmd) 11878 { 11879 struct intel_framebuffer *intel_fb; 11880 int ret; 11881 11882 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 11883 if (!intel_fb) 11884 return ERR_PTR(-ENOMEM); 11885 11886 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 11887 if (ret) 11888 goto err; 11889 11890 return &intel_fb->base; 11891 11892 err: 11893 kfree(intel_fb); 11894 return ERR_PTR(ret); 11895 } 11896 11897 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 11898 struct drm_crtc *crtc) 11899 { 11900 struct drm_plane *plane; 11901 struct drm_plane_state *plane_state; 11902 int ret, i; 11903 11904 ret = drm_atomic_add_affected_planes(state, crtc); 11905 if (ret) 11906 return ret; 11907 11908 for_each_new_plane_in_state(state, plane, plane_state, i) { 11909 if (plane_state->crtc != crtc) 11910 continue; 11911 11912 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 11913 if (ret) 11914 return ret; 11915 11916 drm_atomic_set_fb_for_plane(plane_state, NULL); 11917 } 11918 11919 return 0; 11920 } 11921 11922 int intel_get_load_detect_pipe(struct drm_connector *connector, 11923 struct intel_load_detect_pipe *old, 11924 struct drm_modeset_acquire_ctx *ctx) 11925 { 11926 struct intel_crtc *intel_crtc; 11927 struct intel_encoder *intel_encoder = 11928 intel_attached_encoder(to_intel_connector(connector)); 11929 struct drm_crtc *possible_crtc; 11930 struct drm_encoder *encoder = &intel_encoder->base; 11931 struct drm_crtc *crtc = NULL; 11932 struct drm_device *dev = encoder->dev; 11933 struct drm_i915_private *dev_priv = to_i915(dev); 11934 struct drm_mode_config *config = &dev->mode_config; 11935 struct drm_atomic_state *state = NULL, *restore_state = NULL; 11936 struct drm_connector_state *connector_state; 11937 struct intel_crtc_state *crtc_state; 11938 int ret, i = -1; 11939 11940 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11941 connector->base.id, connector->name, 11942 encoder->base.id, encoder->name); 11943 11944 old->restore_state = NULL; 11945 11946 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); 11947 11948 /* 11949 * Algorithm gets a little messy: 11950 * 11951 * - if the connector already has an assigned crtc, use it (but make 11952 * sure it's on first) 11953 * 11954 * - try to find the first unused crtc that can drive this connector, 11955 * and use that if we find one 11956 */ 11957 11958 /* See if we already have a CRTC for this connector */ 11959 if (connector->state->crtc) { 11960 crtc = connector->state->crtc; 11961 11962 ret = drm_modeset_lock(&crtc->mutex, ctx); 11963 if (ret) 11964 goto fail; 11965 11966 /* Make sure the crtc and connector are running */ 11967 goto found; 11968 } 11969 11970 /* Find an unused one (if possible) */ 11971 for_each_crtc(dev, possible_crtc) { 11972 i++; 11973 if (!(encoder->possible_crtcs & (1 << i))) 11974 continue; 11975 11976 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 11977 if (ret) 11978 goto fail; 11979 11980 if (possible_crtc->state->enable) { 11981 drm_modeset_unlock(&possible_crtc->mutex); 11982 continue; 11983 } 11984 11985 crtc = possible_crtc; 11986 break; 11987 } 11988 11989 /* 11990 * If we didn't find an unused CRTC, don't use any. 11991 */ 11992 if (!crtc) { 11993 drm_dbg_kms(&dev_priv->drm, 11994 "no pipe available for load-detect\n"); 11995 ret = -ENODEV; 11996 goto fail; 11997 } 11998 11999 found: 12000 intel_crtc = to_intel_crtc(crtc); 12001 12002 state = drm_atomic_state_alloc(dev); 12003 restore_state = drm_atomic_state_alloc(dev); 12004 if (!state || !restore_state) { 12005 ret = -ENOMEM; 12006 goto fail; 12007 } 12008 12009 state->acquire_ctx = ctx; 12010 restore_state->acquire_ctx = ctx; 12011 12012 connector_state = drm_atomic_get_connector_state(state, connector); 12013 if (IS_ERR(connector_state)) { 12014 ret = PTR_ERR(connector_state); 12015 goto fail; 12016 } 12017 12018 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 12019 if (ret) 12020 goto fail; 12021 12022 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 12023 if (IS_ERR(crtc_state)) { 12024 ret = PTR_ERR(crtc_state); 12025 goto fail; 12026 } 12027 12028 crtc_state->uapi.active = true; 12029 12030 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, 12031 &load_detect_mode); 12032 if (ret) 12033 goto fail; 12034 12035 ret = intel_modeset_disable_planes(state, crtc); 12036 if (ret) 12037 goto fail; 12038 12039 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 12040 if (!ret) 12041 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 12042 if (!ret) 12043 ret = drm_atomic_add_affected_planes(restore_state, crtc); 12044 if (ret) { 12045 drm_dbg_kms(&dev_priv->drm, 12046 "Failed to create a copy of old state to restore: %i\n", 12047 ret); 12048 goto fail; 12049 } 12050 12051 ret = drm_atomic_commit(state); 12052 if (ret) { 12053 drm_dbg_kms(&dev_priv->drm, 12054 "failed to set mode on load-detect pipe\n"); 12055 goto fail; 12056 } 12057 12058 old->restore_state = restore_state; 12059 drm_atomic_state_put(state); 12060 12061 /* let the connector get through one full cycle before testing */ 12062 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 12063 return true; 12064 12065 fail: 12066 if (state) { 12067 drm_atomic_state_put(state); 12068 state = NULL; 12069 } 12070 if (restore_state) { 12071 drm_atomic_state_put(restore_state); 12072 restore_state = NULL; 12073 } 12074 12075 if (ret == -EDEADLK) 12076 return ret; 12077 12078 return false; 12079 } 12080 12081 void intel_release_load_detect_pipe(struct drm_connector *connector, 12082 struct intel_load_detect_pipe *old, 12083 struct drm_modeset_acquire_ctx *ctx) 12084 { 12085 struct intel_encoder *intel_encoder = 12086 intel_attached_encoder(to_intel_connector(connector)); 12087 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); 12088 struct drm_encoder *encoder = &intel_encoder->base; 12089 struct drm_atomic_state *state = old->restore_state; 12090 int ret; 12091 12092 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 12093 connector->base.id, connector->name, 12094 encoder->base.id, encoder->name); 12095 12096 if (!state) 12097 return; 12098 12099 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 12100 if (ret) 12101 drm_dbg_kms(&i915->drm, 12102 "Couldn't release load detect pipe: %i\n", ret); 12103 drm_atomic_state_put(state); 12104 } 12105 12106 static int i9xx_pll_refclk(struct drm_device *dev, 12107 const struct intel_crtc_state *pipe_config) 12108 { 12109 struct drm_i915_private *dev_priv = to_i915(dev); 12110 u32 dpll = pipe_config->dpll_hw_state.dpll; 12111 12112 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 12113 return dev_priv->vbt.lvds_ssc_freq; 12114 else if (HAS_PCH_SPLIT(dev_priv)) 12115 return 120000; 12116 else if (!IS_GEN(dev_priv, 2)) 12117 return 96000; 12118 else 12119 return 48000; 12120 } 12121 12122 /* Returns the clock of the currently programmed mode of the given pipe. */ 12123 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 12124 struct intel_crtc_state *pipe_config) 12125 { 12126 struct drm_device *dev = crtc->base.dev; 12127 struct drm_i915_private *dev_priv = to_i915(dev); 12128 enum pipe pipe = crtc->pipe; 12129 u32 dpll = pipe_config->dpll_hw_state.dpll; 12130 u32 fp; 12131 struct dpll clock; 12132 int port_clock; 12133 int refclk = i9xx_pll_refclk(dev, pipe_config); 12134 12135 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 12136 fp = pipe_config->dpll_hw_state.fp0; 12137 else 12138 fp = pipe_config->dpll_hw_state.fp1; 12139 12140 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 12141 if (IS_PINEVIEW(dev_priv)) { 12142 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 12143 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 12144 } else { 12145 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 12146 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 12147 } 12148 12149 if (!IS_GEN(dev_priv, 2)) { 12150 if (IS_PINEVIEW(dev_priv)) 12151 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 12152 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 12153 else 12154 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 12155 DPLL_FPA01_P1_POST_DIV_SHIFT); 12156 12157 switch (dpll & DPLL_MODE_MASK) { 12158 case DPLLB_MODE_DAC_SERIAL: 12159 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 12160 5 : 10; 12161 break; 12162 case DPLLB_MODE_LVDS: 12163 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 12164 7 : 14; 12165 break; 12166 default: 12167 drm_dbg_kms(&dev_priv->drm, 12168 "Unknown DPLL mode %08x in programmed " 12169 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 12170 return; 12171 } 12172 12173 if (IS_PINEVIEW(dev_priv)) 12174 port_clock = pnv_calc_dpll_params(refclk, &clock); 12175 else 12176 port_clock = i9xx_calc_dpll_params(refclk, &clock); 12177 } else { 12178 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv, 12179 LVDS); 12180 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 12181 12182 if (is_lvds) { 12183 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 12184 DPLL_FPA01_P1_POST_DIV_SHIFT); 12185 12186 if (lvds & LVDS_CLKB_POWER_UP) 12187 clock.p2 = 7; 12188 else 12189 clock.p2 = 14; 12190 } else { 12191 if (dpll & PLL_P1_DIVIDE_BY_TWO) 12192 clock.p1 = 2; 12193 else { 12194 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 12195 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 12196 } 12197 if (dpll & PLL_P2_DIVIDE_BY_4) 12198 clock.p2 = 4; 12199 else 12200 clock.p2 = 2; 12201 } 12202 12203 port_clock = i9xx_calc_dpll_params(refclk, &clock); 12204 } 12205 12206 /* 12207 * This value includes pixel_multiplier. We will use 12208 * port_clock to compute adjusted_mode.crtc_clock in the 12209 * encoder's get_config() function. 12210 */ 12211 pipe_config->port_clock = port_clock; 12212 } 12213 12214 int intel_dotclock_calculate(int link_freq, 12215 const struct intel_link_m_n *m_n) 12216 { 12217 /* 12218 * The calculation for the data clock is: 12219 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 12220 * But we want to avoid losing precison if possible, so: 12221 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 12222 * 12223 * and the link clock is simpler: 12224 * link_clock = (m * link_clock) / n 12225 */ 12226 12227 if (!m_n->link_n) 12228 return 0; 12229 12230 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 12231 } 12232 12233 static void ilk_pch_clock_get(struct intel_crtc *crtc, 12234 struct intel_crtc_state *pipe_config) 12235 { 12236 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12237 12238 /* read out port_clock from the DPLL */ 12239 i9xx_crtc_clock_get(crtc, pipe_config); 12240 12241 /* 12242 * In case there is an active pipe without active ports, 12243 * we may need some idea for the dotclock anyway. 12244 * Calculate one based on the FDI configuration. 12245 */ 12246 pipe_config->hw.adjusted_mode.crtc_clock = 12247 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 12248 &pipe_config->fdi_m_n); 12249 } 12250 12251 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, 12252 struct intel_crtc *crtc) 12253 { 12254 memset(crtc_state, 0, sizeof(*crtc_state)); 12255 12256 __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base); 12257 12258 crtc_state->cpu_transcoder = INVALID_TRANSCODER; 12259 crtc_state->master_transcoder = INVALID_TRANSCODER; 12260 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 12261 crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID; 12262 crtc_state->scaler_state.scaler_id = -1; 12263 crtc_state->mst_master_transcoder = INVALID_TRANSCODER; 12264 } 12265 12266 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) 12267 { 12268 struct intel_crtc_state *crtc_state; 12269 12270 crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL); 12271 12272 if (crtc_state) 12273 intel_crtc_state_reset(crtc_state, crtc); 12274 12275 return crtc_state; 12276 } 12277 12278 /* Returns the currently programmed mode of the given encoder. */ 12279 struct drm_display_mode * 12280 intel_encoder_current_mode(struct intel_encoder *encoder) 12281 { 12282 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 12283 struct intel_crtc_state *crtc_state; 12284 struct drm_display_mode *mode; 12285 struct intel_crtc *crtc; 12286 enum pipe pipe; 12287 12288 if (!encoder->get_hw_state(encoder, &pipe)) 12289 return NULL; 12290 12291 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 12292 12293 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 12294 if (!mode) 12295 return NULL; 12296 12297 crtc_state = intel_crtc_state_alloc(crtc); 12298 if (!crtc_state) { 12299 kfree(mode); 12300 return NULL; 12301 } 12302 12303 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { 12304 kfree(crtc_state); 12305 kfree(mode); 12306 return NULL; 12307 } 12308 12309 encoder->get_config(encoder, crtc_state); 12310 12311 intel_mode_from_pipe_config(mode, crtc_state); 12312 12313 kfree(crtc_state); 12314 12315 return mode; 12316 } 12317 12318 static void intel_crtc_destroy(struct drm_crtc *crtc) 12319 { 12320 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12321 12322 drm_crtc_cleanup(crtc); 12323 kfree(intel_crtc); 12324 } 12325 12326 /** 12327 * intel_wm_need_update - Check whether watermarks need updating 12328 * @cur: current plane state 12329 * @new: new plane state 12330 * 12331 * Check current plane state versus the new one to determine whether 12332 * watermarks need to be recalculated. 12333 * 12334 * Returns true or false. 12335 */ 12336 static bool intel_wm_need_update(const struct intel_plane_state *cur, 12337 struct intel_plane_state *new) 12338 { 12339 /* Update watermarks on tiling or size changes. */ 12340 if (new->uapi.visible != cur->uapi.visible) 12341 return true; 12342 12343 if (!cur->hw.fb || !new->hw.fb) 12344 return false; 12345 12346 if (cur->hw.fb->modifier != new->hw.fb->modifier || 12347 cur->hw.rotation != new->hw.rotation || 12348 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || 12349 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || 12350 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || 12351 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) 12352 return true; 12353 12354 return false; 12355 } 12356 12357 static bool needs_scaling(const struct intel_plane_state *state) 12358 { 12359 int src_w = drm_rect_width(&state->uapi.src) >> 16; 12360 int src_h = drm_rect_height(&state->uapi.src) >> 16; 12361 int dst_w = drm_rect_width(&state->uapi.dst); 12362 int dst_h = drm_rect_height(&state->uapi.dst); 12363 12364 return (src_w != dst_w || src_h != dst_h); 12365 } 12366 12367 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 12368 struct intel_crtc_state *crtc_state, 12369 const struct intel_plane_state *old_plane_state, 12370 struct intel_plane_state *plane_state) 12371 { 12372 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12373 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 12374 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12375 bool mode_changed = needs_modeset(crtc_state); 12376 bool was_crtc_enabled = old_crtc_state->hw.active; 12377 bool is_crtc_enabled = crtc_state->hw.active; 12378 bool turn_off, turn_on, visible, was_visible; 12379 int ret; 12380 12381 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 12382 ret = skl_update_scaler_plane(crtc_state, plane_state); 12383 if (ret) 12384 return ret; 12385 } 12386 12387 was_visible = old_plane_state->uapi.visible; 12388 visible = plane_state->uapi.visible; 12389 12390 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible)) 12391 was_visible = false; 12392 12393 /* 12394 * Visibility is calculated as if the crtc was on, but 12395 * after scaler setup everything depends on it being off 12396 * when the crtc isn't active. 12397 * 12398 * FIXME this is wrong for watermarks. Watermarks should also 12399 * be computed as if the pipe would be active. Perhaps move 12400 * per-plane wm computation to the .check_plane() hook, and 12401 * only combine the results from all planes in the current place? 12402 */ 12403 if (!is_crtc_enabled) { 12404 intel_plane_set_invisible(crtc_state, plane_state); 12405 visible = false; 12406 } 12407 12408 if (!was_visible && !visible) 12409 return 0; 12410 12411 turn_off = was_visible && (!visible || mode_changed); 12412 turn_on = visible && (!was_visible || mode_changed); 12413 12414 drm_dbg_atomic(&dev_priv->drm, 12415 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 12416 crtc->base.base.id, crtc->base.name, 12417 plane->base.base.id, plane->base.name, 12418 was_visible, visible, 12419 turn_off, turn_on, mode_changed); 12420 12421 if (turn_on) { 12422 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 12423 crtc_state->update_wm_pre = true; 12424 12425 /* must disable cxsr around plane enable/disable */ 12426 if (plane->id != PLANE_CURSOR) 12427 crtc_state->disable_cxsr = true; 12428 } else if (turn_off) { 12429 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 12430 crtc_state->update_wm_post = true; 12431 12432 /* must disable cxsr around plane enable/disable */ 12433 if (plane->id != PLANE_CURSOR) 12434 crtc_state->disable_cxsr = true; 12435 } else if (intel_wm_need_update(old_plane_state, plane_state)) { 12436 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 12437 /* FIXME bollocks */ 12438 crtc_state->update_wm_pre = true; 12439 crtc_state->update_wm_post = true; 12440 } 12441 } 12442 12443 if (visible || was_visible) 12444 crtc_state->fb_bits |= plane->frontbuffer_bit; 12445 12446 /* 12447 * ILK/SNB DVSACNTR/Sprite Enable 12448 * IVB SPR_CTL/Sprite Enable 12449 * "When in Self Refresh Big FIFO mode, a write to enable the 12450 * plane will be internally buffered and delayed while Big FIFO 12451 * mode is exiting." 12452 * 12453 * Which means that enabling the sprite can take an extra frame 12454 * when we start in big FIFO mode (LP1+). Thus we need to drop 12455 * down to LP0 and wait for vblank in order to make sure the 12456 * sprite gets enabled on the next vblank after the register write. 12457 * Doing otherwise would risk enabling the sprite one frame after 12458 * we've already signalled flip completion. We can resume LP1+ 12459 * once the sprite has been enabled. 12460 * 12461 * 12462 * WaCxSRDisabledForSpriteScaling:ivb 12463 * IVB SPR_SCALE/Scaling Enable 12464 * "Low Power watermarks must be disabled for at least one 12465 * frame before enabling sprite scaling, and kept disabled 12466 * until sprite scaling is disabled." 12467 * 12468 * ILK/SNB DVSASCALE/Scaling Enable 12469 * "When in Self Refresh Big FIFO mode, scaling enable will be 12470 * masked off while Big FIFO mode is exiting." 12471 * 12472 * Despite the w/a only being listed for IVB we assume that 12473 * the ILK/SNB note has similar ramifications, hence we apply 12474 * the w/a on all three platforms. 12475 * 12476 * With experimental results seems this is needed also for primary 12477 * plane, not only sprite plane. 12478 */ 12479 if (plane->id != PLANE_CURSOR && 12480 (IS_GEN_RANGE(dev_priv, 5, 6) || 12481 IS_IVYBRIDGE(dev_priv)) && 12482 (turn_on || (!needs_scaling(old_plane_state) && 12483 needs_scaling(plane_state)))) 12484 crtc_state->disable_lp_wm = true; 12485 12486 return 0; 12487 } 12488 12489 static bool encoders_cloneable(const struct intel_encoder *a, 12490 const struct intel_encoder *b) 12491 { 12492 /* masks could be asymmetric, so check both ways */ 12493 return a == b || (a->cloneable & (1 << b->type) && 12494 b->cloneable & (1 << a->type)); 12495 } 12496 12497 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 12498 struct intel_crtc *crtc, 12499 struct intel_encoder *encoder) 12500 { 12501 struct intel_encoder *source_encoder; 12502 struct drm_connector *connector; 12503 struct drm_connector_state *connector_state; 12504 int i; 12505 12506 for_each_new_connector_in_state(state, connector, connector_state, i) { 12507 if (connector_state->crtc != &crtc->base) 12508 continue; 12509 12510 source_encoder = 12511 to_intel_encoder(connector_state->best_encoder); 12512 if (!encoders_cloneable(encoder, source_encoder)) 12513 return false; 12514 } 12515 12516 return true; 12517 } 12518 12519 static int icl_add_linked_planes(struct intel_atomic_state *state) 12520 { 12521 struct intel_plane *plane, *linked; 12522 struct intel_plane_state *plane_state, *linked_plane_state; 12523 int i; 12524 12525 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12526 linked = plane_state->planar_linked_plane; 12527 12528 if (!linked) 12529 continue; 12530 12531 linked_plane_state = intel_atomic_get_plane_state(state, linked); 12532 if (IS_ERR(linked_plane_state)) 12533 return PTR_ERR(linked_plane_state); 12534 12535 drm_WARN_ON(state->base.dev, 12536 linked_plane_state->planar_linked_plane != plane); 12537 drm_WARN_ON(state->base.dev, 12538 linked_plane_state->planar_slave == plane_state->planar_slave); 12539 } 12540 12541 return 0; 12542 } 12543 12544 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 12545 { 12546 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12547 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12548 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 12549 struct intel_plane *plane, *linked; 12550 struct intel_plane_state *plane_state; 12551 int i; 12552 12553 if (INTEL_GEN(dev_priv) < 11) 12554 return 0; 12555 12556 /* 12557 * Destroy all old plane links and make the slave plane invisible 12558 * in the crtc_state->active_planes mask. 12559 */ 12560 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12561 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 12562 continue; 12563 12564 plane_state->planar_linked_plane = NULL; 12565 if (plane_state->planar_slave && !plane_state->uapi.visible) { 12566 crtc_state->active_planes &= ~BIT(plane->id); 12567 crtc_state->update_planes |= BIT(plane->id); 12568 } 12569 12570 plane_state->planar_slave = false; 12571 } 12572 12573 if (!crtc_state->nv12_planes) 12574 return 0; 12575 12576 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12577 struct intel_plane_state *linked_state = NULL; 12578 12579 if (plane->pipe != crtc->pipe || 12580 !(crtc_state->nv12_planes & BIT(plane->id))) 12581 continue; 12582 12583 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 12584 if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 12585 continue; 12586 12587 if (crtc_state->active_planes & BIT(linked->id)) 12588 continue; 12589 12590 linked_state = intel_atomic_get_plane_state(state, linked); 12591 if (IS_ERR(linked_state)) 12592 return PTR_ERR(linked_state); 12593 12594 break; 12595 } 12596 12597 if (!linked_state) { 12598 drm_dbg_kms(&dev_priv->drm, 12599 "Need %d free Y planes for planar YUV\n", 12600 hweight8(crtc_state->nv12_planes)); 12601 12602 return -EINVAL; 12603 } 12604 12605 plane_state->planar_linked_plane = linked; 12606 12607 linked_state->planar_slave = true; 12608 linked_state->planar_linked_plane = plane; 12609 crtc_state->active_planes |= BIT(linked->id); 12610 crtc_state->update_planes |= BIT(linked->id); 12611 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 12612 linked->base.name, plane->base.name); 12613 12614 /* Copy parameters to slave plane */ 12615 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 12616 linked_state->color_ctl = plane_state->color_ctl; 12617 linked_state->view = plane_state->view; 12618 memcpy(linked_state->color_plane, plane_state->color_plane, 12619 sizeof(linked_state->color_plane)); 12620 12621 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state); 12622 linked_state->uapi.src = plane_state->uapi.src; 12623 linked_state->uapi.dst = plane_state->uapi.dst; 12624 12625 if (icl_is_hdr_plane(dev_priv, plane->id)) { 12626 if (linked->id == PLANE_SPRITE5) 12627 plane_state->cus_ctl |= PLANE_CUS_PLANE_7; 12628 else if (linked->id == PLANE_SPRITE4) 12629 plane_state->cus_ctl |= PLANE_CUS_PLANE_6; 12630 else if (linked->id == PLANE_SPRITE3) 12631 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL; 12632 else if (linked->id == PLANE_SPRITE2) 12633 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL; 12634 else 12635 MISSING_CASE(linked->id); 12636 } 12637 } 12638 12639 return 0; 12640 } 12641 12642 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 12643 { 12644 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 12645 struct intel_atomic_state *state = 12646 to_intel_atomic_state(new_crtc_state->uapi.state); 12647 const struct intel_crtc_state *old_crtc_state = 12648 intel_atomic_get_old_crtc_state(state, crtc); 12649 12650 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 12651 } 12652 12653 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 12654 { 12655 const struct drm_display_mode *adjusted_mode = 12656 &crtc_state->hw.adjusted_mode; 12657 int linetime_wm; 12658 12659 if (!crtc_state->hw.enable) 12660 return 0; 12661 12662 linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 12663 adjusted_mode->crtc_clock); 12664 12665 return min(linetime_wm, 0x1ff); 12666 } 12667 12668 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 12669 const struct intel_cdclk_state *cdclk_state) 12670 { 12671 const struct drm_display_mode *adjusted_mode = 12672 &crtc_state->hw.adjusted_mode; 12673 int linetime_wm; 12674 12675 if (!crtc_state->hw.enable) 12676 return 0; 12677 12678 linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 12679 cdclk_state->logical.cdclk); 12680 12681 return min(linetime_wm, 0x1ff); 12682 } 12683 12684 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 12685 { 12686 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12687 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12688 const struct drm_display_mode *adjusted_mode = 12689 &crtc_state->hw.adjusted_mode; 12690 int linetime_wm; 12691 12692 if (!crtc_state->hw.enable) 12693 return 0; 12694 12695 linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8, 12696 crtc_state->pixel_rate); 12697 12698 /* Display WA #1135: BXT:ALL GLK:ALL */ 12699 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled) 12700 linetime_wm /= 2; 12701 12702 return min(linetime_wm, 0x1ff); 12703 } 12704 12705 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 12706 struct intel_crtc *crtc) 12707 { 12708 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12709 struct intel_crtc_state *crtc_state = 12710 intel_atomic_get_new_crtc_state(state, crtc); 12711 const struct intel_cdclk_state *cdclk_state; 12712 12713 if (INTEL_GEN(dev_priv) >= 9) 12714 crtc_state->linetime = skl_linetime_wm(crtc_state); 12715 else 12716 crtc_state->linetime = hsw_linetime_wm(crtc_state); 12717 12718 if (!hsw_crtc_supports_ips(crtc)) 12719 return 0; 12720 12721 cdclk_state = intel_atomic_get_cdclk_state(state); 12722 if (IS_ERR(cdclk_state)) 12723 return PTR_ERR(cdclk_state); 12724 12725 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 12726 cdclk_state); 12727 12728 return 0; 12729 } 12730 12731 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 12732 struct intel_crtc *crtc) 12733 { 12734 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12735 struct intel_crtc_state *crtc_state = 12736 intel_atomic_get_new_crtc_state(state, crtc); 12737 bool mode_changed = needs_modeset(crtc_state); 12738 int ret; 12739 12740 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && 12741 mode_changed && !crtc_state->hw.active) 12742 crtc_state->update_wm_post = true; 12743 12744 if (mode_changed && crtc_state->hw.enable && 12745 dev_priv->display.crtc_compute_clock && 12746 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { 12747 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); 12748 if (ret) 12749 return ret; 12750 } 12751 12752 /* 12753 * May need to update pipe gamma enable bits 12754 * when C8 planes are getting enabled/disabled. 12755 */ 12756 if (c8_planes_changed(crtc_state)) 12757 crtc_state->uapi.color_mgmt_changed = true; 12758 12759 if (mode_changed || crtc_state->update_pipe || 12760 crtc_state->uapi.color_mgmt_changed) { 12761 ret = intel_color_check(crtc_state); 12762 if (ret) 12763 return ret; 12764 } 12765 12766 if (dev_priv->display.compute_pipe_wm) { 12767 ret = dev_priv->display.compute_pipe_wm(crtc_state); 12768 if (ret) { 12769 drm_dbg_kms(&dev_priv->drm, 12770 "Target pipe watermarks are invalid\n"); 12771 return ret; 12772 } 12773 } 12774 12775 if (dev_priv->display.compute_intermediate_wm) { 12776 if (drm_WARN_ON(&dev_priv->drm, 12777 !dev_priv->display.compute_pipe_wm)) 12778 return 0; 12779 12780 /* 12781 * Calculate 'intermediate' watermarks that satisfy both the 12782 * old state and the new state. We can program these 12783 * immediately. 12784 */ 12785 ret = dev_priv->display.compute_intermediate_wm(crtc_state); 12786 if (ret) { 12787 drm_dbg_kms(&dev_priv->drm, 12788 "No valid intermediate pipe watermarks are possible\n"); 12789 return ret; 12790 } 12791 } 12792 12793 if (INTEL_GEN(dev_priv) >= 9) { 12794 if (mode_changed || crtc_state->update_pipe) { 12795 ret = skl_update_scaler_crtc(crtc_state); 12796 if (ret) 12797 return ret; 12798 } 12799 12800 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); 12801 if (ret) 12802 return ret; 12803 } 12804 12805 if (HAS_IPS(dev_priv)) { 12806 ret = hsw_compute_ips_config(crtc_state); 12807 if (ret) 12808 return ret; 12809 } 12810 12811 if (INTEL_GEN(dev_priv) >= 9 || 12812 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 12813 ret = hsw_compute_linetime_wm(state, crtc); 12814 if (ret) 12815 return ret; 12816 12817 } 12818 12819 if (!mode_changed) 12820 intel_psr2_sel_fetch_update(state, crtc); 12821 12822 return 0; 12823 } 12824 12825 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 12826 { 12827 struct intel_connector *connector; 12828 struct drm_connector_list_iter conn_iter; 12829 12830 drm_connector_list_iter_begin(dev, &conn_iter); 12831 for_each_intel_connector_iter(connector, &conn_iter) { 12832 if (connector->base.state->crtc) 12833 drm_connector_put(&connector->base); 12834 12835 if (connector->base.encoder) { 12836 connector->base.state->best_encoder = 12837 connector->base.encoder; 12838 connector->base.state->crtc = 12839 connector->base.encoder->crtc; 12840 12841 drm_connector_get(&connector->base); 12842 } else { 12843 connector->base.state->best_encoder = NULL; 12844 connector->base.state->crtc = NULL; 12845 } 12846 } 12847 drm_connector_list_iter_end(&conn_iter); 12848 } 12849 12850 static int 12851 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 12852 struct intel_crtc_state *pipe_config) 12853 { 12854 struct drm_connector *connector = conn_state->connector; 12855 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 12856 const struct drm_display_info *info = &connector->display_info; 12857 int bpp; 12858 12859 switch (conn_state->max_bpc) { 12860 case 6 ... 7: 12861 bpp = 6 * 3; 12862 break; 12863 case 8 ... 9: 12864 bpp = 8 * 3; 12865 break; 12866 case 10 ... 11: 12867 bpp = 10 * 3; 12868 break; 12869 case 12: 12870 bpp = 12 * 3; 12871 break; 12872 default: 12873 return -EINVAL; 12874 } 12875 12876 if (bpp < pipe_config->pipe_bpp) { 12877 drm_dbg_kms(&i915->drm, 12878 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 12879 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 12880 connector->base.id, connector->name, 12881 bpp, 3 * info->bpc, 12882 3 * conn_state->max_requested_bpc, 12883 pipe_config->pipe_bpp); 12884 12885 pipe_config->pipe_bpp = bpp; 12886 } 12887 12888 return 0; 12889 } 12890 12891 static int 12892 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 12893 struct intel_crtc_state *pipe_config) 12894 { 12895 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12896 struct drm_atomic_state *state = pipe_config->uapi.state; 12897 struct drm_connector *connector; 12898 struct drm_connector_state *connector_state; 12899 int bpp, i; 12900 12901 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 12902 IS_CHERRYVIEW(dev_priv))) 12903 bpp = 10*3; 12904 else if (INTEL_GEN(dev_priv) >= 5) 12905 bpp = 12*3; 12906 else 12907 bpp = 8*3; 12908 12909 pipe_config->pipe_bpp = bpp; 12910 12911 /* Clamp display bpp to connector max bpp */ 12912 for_each_new_connector_in_state(state, connector, connector_state, i) { 12913 int ret; 12914 12915 if (connector_state->crtc != &crtc->base) 12916 continue; 12917 12918 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 12919 if (ret) 12920 return ret; 12921 } 12922 12923 return 0; 12924 } 12925 12926 static void intel_dump_crtc_timings(struct drm_i915_private *i915, 12927 const struct drm_display_mode *mode) 12928 { 12929 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, " 12930 "type: 0x%x flags: 0x%x\n", 12931 mode->crtc_clock, 12932 mode->crtc_hdisplay, mode->crtc_hsync_start, 12933 mode->crtc_hsync_end, mode->crtc_htotal, 12934 mode->crtc_vdisplay, mode->crtc_vsync_start, 12935 mode->crtc_vsync_end, mode->crtc_vtotal, 12936 mode->type, mode->flags); 12937 } 12938 12939 static void 12940 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 12941 const char *id, unsigned int lane_count, 12942 const struct intel_link_m_n *m_n) 12943 { 12944 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 12945 12946 drm_dbg_kms(&i915->drm, 12947 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12948 id, lane_count, 12949 m_n->gmch_m, m_n->gmch_n, 12950 m_n->link_m, m_n->link_n, m_n->tu); 12951 } 12952 12953 static void 12954 intel_dump_infoframe(struct drm_i915_private *dev_priv, 12955 const union hdmi_infoframe *frame) 12956 { 12957 if (!drm_debug_enabled(DRM_UT_KMS)) 12958 return; 12959 12960 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 12961 } 12962 12963 static void 12964 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv, 12965 const struct drm_dp_vsc_sdp *vsc) 12966 { 12967 if (!drm_debug_enabled(DRM_UT_KMS)) 12968 return; 12969 12970 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc); 12971 } 12972 12973 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 12974 12975 static const char * const output_type_str[] = { 12976 OUTPUT_TYPE(UNUSED), 12977 OUTPUT_TYPE(ANALOG), 12978 OUTPUT_TYPE(DVO), 12979 OUTPUT_TYPE(SDVO), 12980 OUTPUT_TYPE(LVDS), 12981 OUTPUT_TYPE(TVOUT), 12982 OUTPUT_TYPE(HDMI), 12983 OUTPUT_TYPE(DP), 12984 OUTPUT_TYPE(EDP), 12985 OUTPUT_TYPE(DSI), 12986 OUTPUT_TYPE(DDI), 12987 OUTPUT_TYPE(DP_MST), 12988 }; 12989 12990 #undef OUTPUT_TYPE 12991 12992 static void snprintf_output_types(char *buf, size_t len, 12993 unsigned int output_types) 12994 { 12995 char *str = buf; 12996 int i; 12997 12998 str[0] = '\0'; 12999 13000 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 13001 int r; 13002 13003 if ((output_types & BIT(i)) == 0) 13004 continue; 13005 13006 r = snprintf(str, len, "%s%s", 13007 str != buf ? "," : "", output_type_str[i]); 13008 if (r >= len) 13009 break; 13010 str += r; 13011 len -= r; 13012 13013 output_types &= ~BIT(i); 13014 } 13015 13016 WARN_ON_ONCE(output_types != 0); 13017 } 13018 13019 static const char * const output_format_str[] = { 13020 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", 13021 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 13022 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 13023 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 13024 }; 13025 13026 static const char *output_formats(enum intel_output_format format) 13027 { 13028 if (format >= ARRAY_SIZE(output_format_str)) 13029 format = INTEL_OUTPUT_FORMAT_INVALID; 13030 return output_format_str[format]; 13031 } 13032 13033 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 13034 { 13035 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 13036 struct drm_i915_private *i915 = to_i915(plane->base.dev); 13037 const struct drm_framebuffer *fb = plane_state->hw.fb; 13038 struct drm_format_name_buf format_name; 13039 13040 if (!fb) { 13041 drm_dbg_kms(&i915->drm, 13042 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 13043 plane->base.base.id, plane->base.name, 13044 yesno(plane_state->uapi.visible)); 13045 return; 13046 } 13047 13048 drm_dbg_kms(&i915->drm, 13049 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", 13050 plane->base.base.id, plane->base.name, 13051 fb->base.id, fb->width, fb->height, 13052 drm_get_format_name(fb->format->format, &format_name), 13053 yesno(plane_state->uapi.visible)); 13054 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n", 13055 plane_state->hw.rotation, plane_state->scaler_id); 13056 if (plane_state->uapi.visible) 13057 drm_dbg_kms(&i915->drm, 13058 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 13059 DRM_RECT_FP_ARG(&plane_state->uapi.src), 13060 DRM_RECT_ARG(&plane_state->uapi.dst)); 13061 } 13062 13063 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 13064 struct intel_atomic_state *state, 13065 const char *context) 13066 { 13067 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 13068 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13069 const struct intel_plane_state *plane_state; 13070 struct intel_plane *plane; 13071 char buf[64]; 13072 int i; 13073 13074 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n", 13075 crtc->base.base.id, crtc->base.name, 13076 yesno(pipe_config->hw.enable), context); 13077 13078 if (!pipe_config->hw.enable) 13079 goto dump_planes; 13080 13081 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 13082 drm_dbg_kms(&dev_priv->drm, 13083 "active: %s, output_types: %s (0x%x), output format: %s\n", 13084 yesno(pipe_config->hw.active), 13085 buf, pipe_config->output_types, 13086 output_formats(pipe_config->output_format)); 13087 13088 drm_dbg_kms(&dev_priv->drm, 13089 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 13090 transcoder_name(pipe_config->cpu_transcoder), 13091 pipe_config->pipe_bpp, pipe_config->dither); 13092 13093 drm_dbg_kms(&dev_priv->drm, 13094 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n", 13095 transcoder_name(pipe_config->master_transcoder), 13096 pipe_config->sync_mode_slaves_mask); 13097 13098 if (pipe_config->has_pch_encoder) 13099 intel_dump_m_n_config(pipe_config, "fdi", 13100 pipe_config->fdi_lanes, 13101 &pipe_config->fdi_m_n); 13102 13103 if (intel_crtc_has_dp_encoder(pipe_config)) { 13104 intel_dump_m_n_config(pipe_config, "dp m_n", 13105 pipe_config->lane_count, &pipe_config->dp_m_n); 13106 if (pipe_config->has_drrs) 13107 intel_dump_m_n_config(pipe_config, "dp m2_n2", 13108 pipe_config->lane_count, 13109 &pipe_config->dp_m2_n2); 13110 } 13111 13112 drm_dbg_kms(&dev_priv->drm, 13113 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 13114 pipe_config->has_audio, pipe_config->has_infoframe, 13115 pipe_config->infoframes.enable); 13116 13117 if (pipe_config->infoframes.enable & 13118 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 13119 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n", 13120 pipe_config->infoframes.gcp); 13121 if (pipe_config->infoframes.enable & 13122 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 13123 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 13124 if (pipe_config->infoframes.enable & 13125 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 13126 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 13127 if (pipe_config->infoframes.enable & 13128 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 13129 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 13130 if (pipe_config->infoframes.enable & 13131 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM)) 13132 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm); 13133 if (pipe_config->infoframes.enable & 13134 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA)) 13135 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm); 13136 if (pipe_config->infoframes.enable & 13137 intel_hdmi_infoframe_enable(DP_SDP_VSC)) 13138 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc); 13139 13140 drm_dbg_kms(&dev_priv->drm, "requested mode:\n"); 13141 drm_mode_debug_printmodeline(&pipe_config->hw.mode); 13142 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n"); 13143 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); 13144 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode); 13145 drm_dbg_kms(&dev_priv->drm, 13146 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 13147 pipe_config->port_clock, 13148 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 13149 pipe_config->pixel_rate); 13150 13151 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n", 13152 pipe_config->linetime, pipe_config->ips_linetime); 13153 13154 if (INTEL_GEN(dev_priv) >= 9) 13155 drm_dbg_kms(&dev_priv->drm, 13156 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 13157 crtc->num_scalers, 13158 pipe_config->scaler_state.scaler_users, 13159 pipe_config->scaler_state.scaler_id); 13160 13161 if (HAS_GMCH(dev_priv)) 13162 drm_dbg_kms(&dev_priv->drm, 13163 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 13164 pipe_config->gmch_pfit.control, 13165 pipe_config->gmch_pfit.pgm_ratios, 13166 pipe_config->gmch_pfit.lvds_border_bits); 13167 else 13168 drm_dbg_kms(&dev_priv->drm, 13169 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n", 13170 DRM_RECT_ARG(&pipe_config->pch_pfit.dst), 13171 enableddisabled(pipe_config->pch_pfit.enabled), 13172 yesno(pipe_config->pch_pfit.force_thru)); 13173 13174 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n", 13175 pipe_config->ips_enabled, pipe_config->double_wide); 13176 13177 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 13178 13179 if (IS_CHERRYVIEW(dev_priv)) 13180 drm_dbg_kms(&dev_priv->drm, 13181 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 13182 pipe_config->cgm_mode, pipe_config->gamma_mode, 13183 pipe_config->gamma_enable, pipe_config->csc_enable); 13184 else 13185 drm_dbg_kms(&dev_priv->drm, 13186 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 13187 pipe_config->csc_mode, pipe_config->gamma_mode, 13188 pipe_config->gamma_enable, pipe_config->csc_enable); 13189 13190 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n", 13191 transcoder_name(pipe_config->mst_master_transcoder)); 13192 13193 dump_planes: 13194 if (!state) 13195 return; 13196 13197 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 13198 if (plane->pipe == crtc->pipe) 13199 intel_dump_plane_state(plane_state); 13200 } 13201 } 13202 13203 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 13204 { 13205 struct drm_device *dev = state->base.dev; 13206 struct drm_connector *connector; 13207 struct drm_connector_list_iter conn_iter; 13208 unsigned int used_ports = 0; 13209 unsigned int used_mst_ports = 0; 13210 bool ret = true; 13211 13212 /* 13213 * We're going to peek into connector->state, 13214 * hence connection_mutex must be held. 13215 */ 13216 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 13217 13218 /* 13219 * Walk the connector list instead of the encoder 13220 * list to detect the problem on ddi platforms 13221 * where there's just one encoder per digital port. 13222 */ 13223 drm_connector_list_iter_begin(dev, &conn_iter); 13224 drm_for_each_connector_iter(connector, &conn_iter) { 13225 struct drm_connector_state *connector_state; 13226 struct intel_encoder *encoder; 13227 13228 connector_state = 13229 drm_atomic_get_new_connector_state(&state->base, 13230 connector); 13231 if (!connector_state) 13232 connector_state = connector->state; 13233 13234 if (!connector_state->best_encoder) 13235 continue; 13236 13237 encoder = to_intel_encoder(connector_state->best_encoder); 13238 13239 drm_WARN_ON(dev, !connector_state->crtc); 13240 13241 switch (encoder->type) { 13242 case INTEL_OUTPUT_DDI: 13243 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 13244 break; 13245 fallthrough; 13246 case INTEL_OUTPUT_DP: 13247 case INTEL_OUTPUT_HDMI: 13248 case INTEL_OUTPUT_EDP: 13249 /* the same port mustn't appear more than once */ 13250 if (used_ports & BIT(encoder->port)) 13251 ret = false; 13252 13253 used_ports |= BIT(encoder->port); 13254 break; 13255 case INTEL_OUTPUT_DP_MST: 13256 used_mst_ports |= 13257 1 << encoder->port; 13258 break; 13259 default: 13260 break; 13261 } 13262 } 13263 drm_connector_list_iter_end(&conn_iter); 13264 13265 /* can't mix MST and SST/HDMI on the same port */ 13266 if (used_ports & used_mst_ports) 13267 return false; 13268 13269 return ret; 13270 } 13271 13272 static void 13273 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state) 13274 { 13275 intel_crtc_copy_color_blobs(crtc_state); 13276 } 13277 13278 static void 13279 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state) 13280 { 13281 crtc_state->hw.enable = crtc_state->uapi.enable; 13282 crtc_state->hw.active = crtc_state->uapi.active; 13283 crtc_state->hw.mode = crtc_state->uapi.mode; 13284 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; 13285 intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state); 13286 } 13287 13288 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) 13289 { 13290 crtc_state->uapi.enable = crtc_state->hw.enable; 13291 crtc_state->uapi.active = crtc_state->hw.active; 13292 drm_WARN_ON(crtc_state->uapi.crtc->dev, 13293 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); 13294 13295 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; 13296 13297 /* copy color blobs to uapi */ 13298 drm_property_replace_blob(&crtc_state->uapi.degamma_lut, 13299 crtc_state->hw.degamma_lut); 13300 drm_property_replace_blob(&crtc_state->uapi.gamma_lut, 13301 crtc_state->hw.gamma_lut); 13302 drm_property_replace_blob(&crtc_state->uapi.ctm, 13303 crtc_state->hw.ctm); 13304 } 13305 13306 static int 13307 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) 13308 { 13309 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 13310 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13311 struct intel_crtc_state *saved_state; 13312 13313 saved_state = intel_crtc_state_alloc(crtc); 13314 if (!saved_state) 13315 return -ENOMEM; 13316 13317 /* free the old crtc_state->hw members */ 13318 intel_crtc_free_hw_state(crtc_state); 13319 13320 /* FIXME: before the switch to atomic started, a new pipe_config was 13321 * kzalloc'd. Code that depends on any field being zero should be 13322 * fixed, so that the crtc_state can be safely duplicated. For now, 13323 * only fields that are know to not cause problems are preserved. */ 13324 13325 saved_state->uapi = crtc_state->uapi; 13326 saved_state->scaler_state = crtc_state->scaler_state; 13327 saved_state->shared_dpll = crtc_state->shared_dpll; 13328 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 13329 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 13330 sizeof(saved_state->icl_port_dplls)); 13331 saved_state->crc_enabled = crtc_state->crc_enabled; 13332 if (IS_G4X(dev_priv) || 13333 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13334 saved_state->wm = crtc_state->wm; 13335 13336 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 13337 kfree(saved_state); 13338 13339 intel_crtc_copy_uapi_to_hw_state(crtc_state); 13340 13341 return 0; 13342 } 13343 13344 static int 13345 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) 13346 { 13347 struct drm_crtc *crtc = pipe_config->uapi.crtc; 13348 struct drm_atomic_state *state = pipe_config->uapi.state; 13349 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 13350 struct drm_connector *connector; 13351 struct drm_connector_state *connector_state; 13352 int base_bpp, ret, i; 13353 bool retry = true; 13354 13355 pipe_config->cpu_transcoder = 13356 (enum transcoder) to_intel_crtc(crtc)->pipe; 13357 13358 /* 13359 * Sanitize sync polarity flags based on requested ones. If neither 13360 * positive or negative polarity is requested, treat this as meaning 13361 * negative polarity. 13362 */ 13363 if (!(pipe_config->hw.adjusted_mode.flags & 13364 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 13365 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 13366 13367 if (!(pipe_config->hw.adjusted_mode.flags & 13368 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 13369 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 13370 13371 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 13372 pipe_config); 13373 if (ret) 13374 return ret; 13375 13376 base_bpp = pipe_config->pipe_bpp; 13377 13378 /* 13379 * Determine the real pipe dimensions. Note that stereo modes can 13380 * increase the actual pipe size due to the frame doubling and 13381 * insertion of additional space for blanks between the frame. This 13382 * is stored in the crtc timings. We use the requested mode to do this 13383 * computation to clearly distinguish it from the adjusted mode, which 13384 * can be changed by the connectors in the below retry loop. 13385 */ 13386 drm_mode_get_hv_timing(&pipe_config->hw.mode, 13387 &pipe_config->pipe_src_w, 13388 &pipe_config->pipe_src_h); 13389 13390 for_each_new_connector_in_state(state, connector, connector_state, i) { 13391 struct intel_encoder *encoder = 13392 to_intel_encoder(connector_state->best_encoder); 13393 13394 if (connector_state->crtc != crtc) 13395 continue; 13396 13397 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 13398 drm_dbg_kms(&i915->drm, 13399 "rejecting invalid cloning configuration\n"); 13400 return -EINVAL; 13401 } 13402 13403 /* 13404 * Determine output_types before calling the .compute_config() 13405 * hooks so that the hooks can use this information safely. 13406 */ 13407 if (encoder->compute_output_type) 13408 pipe_config->output_types |= 13409 BIT(encoder->compute_output_type(encoder, pipe_config, 13410 connector_state)); 13411 else 13412 pipe_config->output_types |= BIT(encoder->type); 13413 } 13414 13415 encoder_retry: 13416 /* Ensure the port clock defaults are reset when retrying. */ 13417 pipe_config->port_clock = 0; 13418 pipe_config->pixel_multiplier = 1; 13419 13420 /* Fill in default crtc timings, allow encoders to overwrite them. */ 13421 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, 13422 CRTC_STEREO_DOUBLE); 13423 13424 /* Pass our mode to the connectors and the CRTC to give them a chance to 13425 * adjust it according to limitations or connector properties, and also 13426 * a chance to reject the mode entirely. 13427 */ 13428 for_each_new_connector_in_state(state, connector, connector_state, i) { 13429 struct intel_encoder *encoder = 13430 to_intel_encoder(connector_state->best_encoder); 13431 13432 if (connector_state->crtc != crtc) 13433 continue; 13434 13435 ret = encoder->compute_config(encoder, pipe_config, 13436 connector_state); 13437 if (ret < 0) { 13438 if (ret != -EDEADLK) 13439 drm_dbg_kms(&i915->drm, 13440 "Encoder config failure: %d\n", 13441 ret); 13442 return ret; 13443 } 13444 } 13445 13446 /* Set default port clock if not overwritten by the encoder. Needs to be 13447 * done afterwards in case the encoder adjusts the mode. */ 13448 if (!pipe_config->port_clock) 13449 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock 13450 * pipe_config->pixel_multiplier; 13451 13452 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 13453 if (ret == -EDEADLK) 13454 return ret; 13455 if (ret < 0) { 13456 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n"); 13457 return ret; 13458 } 13459 13460 if (ret == RETRY) { 13461 if (drm_WARN(&i915->drm, !retry, 13462 "loop in pipe configuration computation\n")) 13463 return -EINVAL; 13464 13465 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n"); 13466 retry = false; 13467 goto encoder_retry; 13468 } 13469 13470 /* Dithering seems to not pass-through bits correctly when it should, so 13471 * only enable it on 6bpc panels and when its not a compliance 13472 * test requesting 6bpc video pattern. 13473 */ 13474 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 13475 !pipe_config->dither_force_disable; 13476 drm_dbg_kms(&i915->drm, 13477 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 13478 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 13479 13480 return 0; 13481 } 13482 13483 static int 13484 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state) 13485 { 13486 struct intel_atomic_state *state = 13487 to_intel_atomic_state(crtc_state->uapi.state); 13488 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 13489 struct drm_connector_state *conn_state; 13490 struct drm_connector *connector; 13491 int i; 13492 13493 for_each_new_connector_in_state(&state->base, connector, 13494 conn_state, i) { 13495 struct intel_encoder *encoder = 13496 to_intel_encoder(conn_state->best_encoder); 13497 int ret; 13498 13499 if (conn_state->crtc != &crtc->base || 13500 !encoder->compute_config_late) 13501 continue; 13502 13503 ret = encoder->compute_config_late(encoder, crtc_state, 13504 conn_state); 13505 if (ret) 13506 return ret; 13507 } 13508 13509 return 0; 13510 } 13511 13512 bool intel_fuzzy_clock_check(int clock1, int clock2) 13513 { 13514 int diff; 13515 13516 if (clock1 == clock2) 13517 return true; 13518 13519 if (!clock1 || !clock2) 13520 return false; 13521 13522 diff = abs(clock1 - clock2); 13523 13524 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 13525 return true; 13526 13527 return false; 13528 } 13529 13530 static bool 13531 intel_compare_m_n(unsigned int m, unsigned int n, 13532 unsigned int m2, unsigned int n2, 13533 bool exact) 13534 { 13535 if (m == m2 && n == n2) 13536 return true; 13537 13538 if (exact || !m || !n || !m2 || !n2) 13539 return false; 13540 13541 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 13542 13543 if (n > n2) { 13544 while (n > n2) { 13545 m2 <<= 1; 13546 n2 <<= 1; 13547 } 13548 } else if (n < n2) { 13549 while (n < n2) { 13550 m <<= 1; 13551 n <<= 1; 13552 } 13553 } 13554 13555 if (n != n2) 13556 return false; 13557 13558 return intel_fuzzy_clock_check(m, m2); 13559 } 13560 13561 static bool 13562 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 13563 const struct intel_link_m_n *m2_n2, 13564 bool exact) 13565 { 13566 return m_n->tu == m2_n2->tu && 13567 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 13568 m2_n2->gmch_m, m2_n2->gmch_n, exact) && 13569 intel_compare_m_n(m_n->link_m, m_n->link_n, 13570 m2_n2->link_m, m2_n2->link_n, exact); 13571 } 13572 13573 static bool 13574 intel_compare_infoframe(const union hdmi_infoframe *a, 13575 const union hdmi_infoframe *b) 13576 { 13577 return memcmp(a, b, sizeof(*a)) == 0; 13578 } 13579 13580 static bool 13581 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 13582 const struct drm_dp_vsc_sdp *b) 13583 { 13584 return memcmp(a, b, sizeof(*a)) == 0; 13585 } 13586 13587 static void 13588 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 13589 bool fastset, const char *name, 13590 const union hdmi_infoframe *a, 13591 const union hdmi_infoframe *b) 13592 { 13593 if (fastset) { 13594 if (!drm_debug_enabled(DRM_UT_KMS)) 13595 return; 13596 13597 drm_dbg_kms(&dev_priv->drm, 13598 "fastset mismatch in %s infoframe\n", name); 13599 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 13600 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 13601 drm_dbg_kms(&dev_priv->drm, "found:\n"); 13602 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 13603 } else { 13604 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); 13605 drm_err(&dev_priv->drm, "expected:\n"); 13606 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 13607 drm_err(&dev_priv->drm, "found:\n"); 13608 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 13609 } 13610 } 13611 13612 static void 13613 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, 13614 bool fastset, const char *name, 13615 const struct drm_dp_vsc_sdp *a, 13616 const struct drm_dp_vsc_sdp *b) 13617 { 13618 if (fastset) { 13619 if (!drm_debug_enabled(DRM_UT_KMS)) 13620 return; 13621 13622 drm_dbg_kms(&dev_priv->drm, 13623 "fastset mismatch in %s dp sdp\n", name); 13624 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 13625 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a); 13626 drm_dbg_kms(&dev_priv->drm, "found:\n"); 13627 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b); 13628 } else { 13629 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name); 13630 drm_err(&dev_priv->drm, "expected:\n"); 13631 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a); 13632 drm_err(&dev_priv->drm, "found:\n"); 13633 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b); 13634 } 13635 } 13636 13637 static void __printf(4, 5) 13638 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 13639 const char *name, const char *format, ...) 13640 { 13641 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 13642 struct va_format vaf; 13643 va_list args; 13644 13645 va_start(args, format); 13646 vaf.fmt = format; 13647 vaf.va = &args; 13648 13649 if (fastset) 13650 drm_dbg_kms(&i915->drm, 13651 "[CRTC:%d:%s] fastset mismatch in %s %pV\n", 13652 crtc->base.base.id, crtc->base.name, name, &vaf); 13653 else 13654 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", 13655 crtc->base.base.id, crtc->base.name, name, &vaf); 13656 13657 va_end(args); 13658 } 13659 13660 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 13661 { 13662 if (dev_priv->params.fastboot != -1) 13663 return dev_priv->params.fastboot; 13664 13665 /* Enable fastboot by default on Skylake and newer */ 13666 if (INTEL_GEN(dev_priv) >= 9) 13667 return true; 13668 13669 /* Enable fastboot by default on VLV and CHV */ 13670 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13671 return true; 13672 13673 /* Disabled by default on all others */ 13674 return false; 13675 } 13676 13677 static bool 13678 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 13679 const struct intel_crtc_state *pipe_config, 13680 bool fastset) 13681 { 13682 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 13683 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 13684 bool ret = true; 13685 u32 bp_gamma = 0; 13686 bool fixup_inherited = fastset && 13687 current_config->inherited && !pipe_config->inherited; 13688 13689 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 13690 drm_dbg_kms(&dev_priv->drm, 13691 "initial modeset and fastboot not set\n"); 13692 ret = false; 13693 } 13694 13695 #define PIPE_CONF_CHECK_X(name) do { \ 13696 if (current_config->name != pipe_config->name) { \ 13697 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13698 "(expected 0x%08x, found 0x%08x)", \ 13699 current_config->name, \ 13700 pipe_config->name); \ 13701 ret = false; \ 13702 } \ 13703 } while (0) 13704 13705 #define PIPE_CONF_CHECK_I(name) do { \ 13706 if (current_config->name != pipe_config->name) { \ 13707 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13708 "(expected %i, found %i)", \ 13709 current_config->name, \ 13710 pipe_config->name); \ 13711 ret = false; \ 13712 } \ 13713 } while (0) 13714 13715 #define PIPE_CONF_CHECK_BOOL(name) do { \ 13716 if (current_config->name != pipe_config->name) { \ 13717 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13718 "(expected %s, found %s)", \ 13719 yesno(current_config->name), \ 13720 yesno(pipe_config->name)); \ 13721 ret = false; \ 13722 } \ 13723 } while (0) 13724 13725 /* 13726 * Checks state where we only read out the enabling, but not the entire 13727 * state itself (like full infoframes or ELD for audio). These states 13728 * require a full modeset on bootup to fix up. 13729 */ 13730 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 13731 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 13732 PIPE_CONF_CHECK_BOOL(name); \ 13733 } else { \ 13734 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13735 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 13736 yesno(current_config->name), \ 13737 yesno(pipe_config->name)); \ 13738 ret = false; \ 13739 } \ 13740 } while (0) 13741 13742 #define PIPE_CONF_CHECK_P(name) do { \ 13743 if (current_config->name != pipe_config->name) { \ 13744 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13745 "(expected %p, found %p)", \ 13746 current_config->name, \ 13747 pipe_config->name); \ 13748 ret = false; \ 13749 } \ 13750 } while (0) 13751 13752 #define PIPE_CONF_CHECK_M_N(name) do { \ 13753 if (!intel_compare_link_m_n(¤t_config->name, \ 13754 &pipe_config->name,\ 13755 !fastset)) { \ 13756 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13757 "(expected tu %i gmch %i/%i link %i/%i, " \ 13758 "found tu %i, gmch %i/%i link %i/%i)", \ 13759 current_config->name.tu, \ 13760 current_config->name.gmch_m, \ 13761 current_config->name.gmch_n, \ 13762 current_config->name.link_m, \ 13763 current_config->name.link_n, \ 13764 pipe_config->name.tu, \ 13765 pipe_config->name.gmch_m, \ 13766 pipe_config->name.gmch_n, \ 13767 pipe_config->name.link_m, \ 13768 pipe_config->name.link_n); \ 13769 ret = false; \ 13770 } \ 13771 } while (0) 13772 13773 /* This is required for BDW+ where there is only one set of registers for 13774 * switching between high and low RR. 13775 * This macro can be used whenever a comparison has to be made between one 13776 * hw state and multiple sw state variables. 13777 */ 13778 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 13779 if (!intel_compare_link_m_n(¤t_config->name, \ 13780 &pipe_config->name, !fastset) && \ 13781 !intel_compare_link_m_n(¤t_config->alt_name, \ 13782 &pipe_config->name, !fastset)) { \ 13783 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13784 "(expected tu %i gmch %i/%i link %i/%i, " \ 13785 "or tu %i gmch %i/%i link %i/%i, " \ 13786 "found tu %i, gmch %i/%i link %i/%i)", \ 13787 current_config->name.tu, \ 13788 current_config->name.gmch_m, \ 13789 current_config->name.gmch_n, \ 13790 current_config->name.link_m, \ 13791 current_config->name.link_n, \ 13792 current_config->alt_name.tu, \ 13793 current_config->alt_name.gmch_m, \ 13794 current_config->alt_name.gmch_n, \ 13795 current_config->alt_name.link_m, \ 13796 current_config->alt_name.link_n, \ 13797 pipe_config->name.tu, \ 13798 pipe_config->name.gmch_m, \ 13799 pipe_config->name.gmch_n, \ 13800 pipe_config->name.link_m, \ 13801 pipe_config->name.link_n); \ 13802 ret = false; \ 13803 } \ 13804 } while (0) 13805 13806 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 13807 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 13808 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13809 "(%x) (expected %i, found %i)", \ 13810 (mask), \ 13811 current_config->name & (mask), \ 13812 pipe_config->name & (mask)); \ 13813 ret = false; \ 13814 } \ 13815 } while (0) 13816 13817 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 13818 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 13819 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13820 "(expected %i, found %i)", \ 13821 current_config->name, \ 13822 pipe_config->name); \ 13823 ret = false; \ 13824 } \ 13825 } while (0) 13826 13827 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 13828 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 13829 &pipe_config->infoframes.name)) { \ 13830 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 13831 ¤t_config->infoframes.name, \ 13832 &pipe_config->infoframes.name); \ 13833 ret = false; \ 13834 } \ 13835 } while (0) 13836 13837 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 13838 if (!current_config->has_psr && !pipe_config->has_psr && \ 13839 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 13840 &pipe_config->infoframes.name)) { \ 13841 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ 13842 ¤t_config->infoframes.name, \ 13843 &pipe_config->infoframes.name); \ 13844 ret = false; \ 13845 } \ 13846 } while (0) 13847 13848 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ 13849 if (current_config->name1 != pipe_config->name1) { \ 13850 pipe_config_mismatch(fastset, crtc, __stringify(name1), \ 13851 "(expected %i, found %i, won't compare lut values)", \ 13852 current_config->name1, \ 13853 pipe_config->name1); \ 13854 ret = false;\ 13855 } else { \ 13856 if (!intel_color_lut_equal(current_config->name2, \ 13857 pipe_config->name2, pipe_config->name1, \ 13858 bit_precision)) { \ 13859 pipe_config_mismatch(fastset, crtc, __stringify(name2), \ 13860 "hw_state doesn't match sw_state"); \ 13861 ret = false; \ 13862 } \ 13863 } \ 13864 } while (0) 13865 13866 #define PIPE_CONF_QUIRK(quirk) \ 13867 ((current_config->quirks | pipe_config->quirks) & (quirk)) 13868 13869 PIPE_CONF_CHECK_I(cpu_transcoder); 13870 13871 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 13872 PIPE_CONF_CHECK_I(fdi_lanes); 13873 PIPE_CONF_CHECK_M_N(fdi_m_n); 13874 13875 PIPE_CONF_CHECK_I(lane_count); 13876 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 13877 13878 if (INTEL_GEN(dev_priv) < 8) { 13879 PIPE_CONF_CHECK_M_N(dp_m_n); 13880 13881 if (current_config->has_drrs) 13882 PIPE_CONF_CHECK_M_N(dp_m2_n2); 13883 } else 13884 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 13885 13886 PIPE_CONF_CHECK_X(output_types); 13887 13888 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); 13889 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); 13890 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); 13891 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); 13892 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); 13893 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); 13894 13895 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); 13896 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); 13897 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); 13898 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); 13899 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); 13900 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); 13901 13902 PIPE_CONF_CHECK_I(pixel_multiplier); 13903 PIPE_CONF_CHECK_I(output_format); 13904 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 13905 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 13906 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13907 PIPE_CONF_CHECK_BOOL(limited_color_range); 13908 13909 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 13910 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 13911 PIPE_CONF_CHECK_BOOL(has_infoframe); 13912 PIPE_CONF_CHECK_BOOL(fec_enable); 13913 13914 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 13915 13916 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13917 DRM_MODE_FLAG_INTERLACE); 13918 13919 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 13920 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13921 DRM_MODE_FLAG_PHSYNC); 13922 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13923 DRM_MODE_FLAG_NHSYNC); 13924 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13925 DRM_MODE_FLAG_PVSYNC); 13926 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13927 DRM_MODE_FLAG_NVSYNC); 13928 } 13929 13930 PIPE_CONF_CHECK_X(gmch_pfit.control); 13931 /* pfit ratios are autocomputed by the hw on gen4+ */ 13932 if (INTEL_GEN(dev_priv) < 4) 13933 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 13934 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 13935 13936 /* 13937 * Changing the EDP transcoder input mux 13938 * (A_ONOFF vs. A_ON) requires a full modeset. 13939 */ 13940 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 13941 13942 if (!fastset) { 13943 PIPE_CONF_CHECK_I(pipe_src_w); 13944 PIPE_CONF_CHECK_I(pipe_src_h); 13945 13946 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 13947 if (current_config->pch_pfit.enabled) { 13948 PIPE_CONF_CHECK_I(pch_pfit.dst.x1); 13949 PIPE_CONF_CHECK_I(pch_pfit.dst.y1); 13950 PIPE_CONF_CHECK_I(pch_pfit.dst.x2); 13951 PIPE_CONF_CHECK_I(pch_pfit.dst.y2); 13952 } 13953 13954 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 13955 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 13956 13957 PIPE_CONF_CHECK_X(gamma_mode); 13958 if (IS_CHERRYVIEW(dev_priv)) 13959 PIPE_CONF_CHECK_X(cgm_mode); 13960 else 13961 PIPE_CONF_CHECK_X(csc_mode); 13962 PIPE_CONF_CHECK_BOOL(gamma_enable); 13963 PIPE_CONF_CHECK_BOOL(csc_enable); 13964 13965 PIPE_CONF_CHECK_I(linetime); 13966 PIPE_CONF_CHECK_I(ips_linetime); 13967 13968 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); 13969 if (bp_gamma) 13970 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); 13971 } 13972 13973 PIPE_CONF_CHECK_BOOL(double_wide); 13974 13975 PIPE_CONF_CHECK_P(shared_dpll); 13976 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 13977 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 13978 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 13979 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 13980 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 13981 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 13982 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 13983 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 13984 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 13985 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 13986 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 13987 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 13988 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 13989 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 13990 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 13991 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 13992 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 13993 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 13994 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 13995 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 13996 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 13997 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 13998 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 13999 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 14000 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 14001 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 14002 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 14003 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 14004 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 14005 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 14006 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 14007 14008 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 14009 PIPE_CONF_CHECK_X(dsi_pll.div); 14010 14011 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 14012 PIPE_CONF_CHECK_I(pipe_bpp); 14013 14014 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); 14015 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 14016 14017 PIPE_CONF_CHECK_I(min_voltage_level); 14018 14019 PIPE_CONF_CHECK_X(infoframes.enable); 14020 PIPE_CONF_CHECK_X(infoframes.gcp); 14021 PIPE_CONF_CHECK_INFOFRAME(avi); 14022 PIPE_CONF_CHECK_INFOFRAME(spd); 14023 PIPE_CONF_CHECK_INFOFRAME(hdmi); 14024 PIPE_CONF_CHECK_INFOFRAME(drm); 14025 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 14026 14027 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 14028 PIPE_CONF_CHECK_I(master_transcoder); 14029 14030 PIPE_CONF_CHECK_I(dsc.compression_enable); 14031 PIPE_CONF_CHECK_I(dsc.dsc_split); 14032 PIPE_CONF_CHECK_I(dsc.compressed_bpp); 14033 14034 PIPE_CONF_CHECK_I(mst_master_transcoder); 14035 14036 #undef PIPE_CONF_CHECK_X 14037 #undef PIPE_CONF_CHECK_I 14038 #undef PIPE_CONF_CHECK_BOOL 14039 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 14040 #undef PIPE_CONF_CHECK_P 14041 #undef PIPE_CONF_CHECK_FLAGS 14042 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 14043 #undef PIPE_CONF_CHECK_COLOR_LUT 14044 #undef PIPE_CONF_QUIRK 14045 14046 return ret; 14047 } 14048 14049 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 14050 const struct intel_crtc_state *pipe_config) 14051 { 14052 if (pipe_config->has_pch_encoder) { 14053 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 14054 &pipe_config->fdi_m_n); 14055 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock; 14056 14057 /* 14058 * FDI already provided one idea for the dotclock. 14059 * Yell if the encoder disagrees. 14060 */ 14061 drm_WARN(&dev_priv->drm, 14062 !intel_fuzzy_clock_check(fdi_dotclock, dotclock), 14063 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 14064 fdi_dotclock, dotclock); 14065 } 14066 } 14067 14068 static void verify_wm_state(struct intel_crtc *crtc, 14069 struct intel_crtc_state *new_crtc_state) 14070 { 14071 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14072 struct skl_hw_state { 14073 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 14074 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 14075 struct skl_pipe_wm wm; 14076 } *hw; 14077 struct skl_pipe_wm *sw_wm; 14078 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 14079 u8 hw_enabled_slices; 14080 const enum pipe pipe = crtc->pipe; 14081 int plane, level, max_level = ilk_wm_max_level(dev_priv); 14082 14083 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active) 14084 return; 14085 14086 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 14087 if (!hw) 14088 return; 14089 14090 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 14091 sw_wm = &new_crtc_state->wm.skl.optimal; 14092 14093 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 14094 14095 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); 14096 14097 if (INTEL_GEN(dev_priv) >= 11 && 14098 hw_enabled_slices != dev_priv->dbuf.enabled_slices) 14099 drm_err(&dev_priv->drm, 14100 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", 14101 dev_priv->dbuf.enabled_slices, 14102 hw_enabled_slices); 14103 14104 /* planes */ 14105 for_each_universal_plane(dev_priv, pipe, plane) { 14106 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 14107 14108 hw_plane_wm = &hw->wm.planes[plane]; 14109 sw_plane_wm = &sw_wm->planes[plane]; 14110 14111 /* Watermarks */ 14112 for (level = 0; level <= max_level; level++) { 14113 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 14114 &sw_plane_wm->wm[level]) || 14115 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level], 14116 &sw_plane_wm->sagv_wm0))) 14117 continue; 14118 14119 drm_err(&dev_priv->drm, 14120 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14121 pipe_name(pipe), plane + 1, level, 14122 sw_plane_wm->wm[level].plane_en, 14123 sw_plane_wm->wm[level].plane_res_b, 14124 sw_plane_wm->wm[level].plane_res_l, 14125 hw_plane_wm->wm[level].plane_en, 14126 hw_plane_wm->wm[level].plane_res_b, 14127 hw_plane_wm->wm[level].plane_res_l); 14128 } 14129 14130 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 14131 &sw_plane_wm->trans_wm)) { 14132 drm_err(&dev_priv->drm, 14133 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14134 pipe_name(pipe), plane + 1, 14135 sw_plane_wm->trans_wm.plane_en, 14136 sw_plane_wm->trans_wm.plane_res_b, 14137 sw_plane_wm->trans_wm.plane_res_l, 14138 hw_plane_wm->trans_wm.plane_en, 14139 hw_plane_wm->trans_wm.plane_res_b, 14140 hw_plane_wm->trans_wm.plane_res_l); 14141 } 14142 14143 /* DDB */ 14144 hw_ddb_entry = &hw->ddb_y[plane]; 14145 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; 14146 14147 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 14148 drm_err(&dev_priv->drm, 14149 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 14150 pipe_name(pipe), plane + 1, 14151 sw_ddb_entry->start, sw_ddb_entry->end, 14152 hw_ddb_entry->start, hw_ddb_entry->end); 14153 } 14154 } 14155 14156 /* 14157 * cursor 14158 * If the cursor plane isn't active, we may not have updated it's ddb 14159 * allocation. In that case since the ddb allocation will be updated 14160 * once the plane becomes visible, we can skip this check 14161 */ 14162 if (1) { 14163 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 14164 14165 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; 14166 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 14167 14168 /* Watermarks */ 14169 for (level = 0; level <= max_level; level++) { 14170 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 14171 &sw_plane_wm->wm[level]) || 14172 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level], 14173 &sw_plane_wm->sagv_wm0))) 14174 continue; 14175 14176 drm_err(&dev_priv->drm, 14177 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14178 pipe_name(pipe), level, 14179 sw_plane_wm->wm[level].plane_en, 14180 sw_plane_wm->wm[level].plane_res_b, 14181 sw_plane_wm->wm[level].plane_res_l, 14182 hw_plane_wm->wm[level].plane_en, 14183 hw_plane_wm->wm[level].plane_res_b, 14184 hw_plane_wm->wm[level].plane_res_l); 14185 } 14186 14187 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 14188 &sw_plane_wm->trans_wm)) { 14189 drm_err(&dev_priv->drm, 14190 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14191 pipe_name(pipe), 14192 sw_plane_wm->trans_wm.plane_en, 14193 sw_plane_wm->trans_wm.plane_res_b, 14194 sw_plane_wm->trans_wm.plane_res_l, 14195 hw_plane_wm->trans_wm.plane_en, 14196 hw_plane_wm->trans_wm.plane_res_b, 14197 hw_plane_wm->trans_wm.plane_res_l); 14198 } 14199 14200 /* DDB */ 14201 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; 14202 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 14203 14204 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 14205 drm_err(&dev_priv->drm, 14206 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 14207 pipe_name(pipe), 14208 sw_ddb_entry->start, sw_ddb_entry->end, 14209 hw_ddb_entry->start, hw_ddb_entry->end); 14210 } 14211 } 14212 14213 kfree(hw); 14214 } 14215 14216 static void 14217 verify_connector_state(struct intel_atomic_state *state, 14218 struct intel_crtc *crtc) 14219 { 14220 struct drm_connector *connector; 14221 struct drm_connector_state *new_conn_state; 14222 int i; 14223 14224 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 14225 struct drm_encoder *encoder = connector->encoder; 14226 struct intel_crtc_state *crtc_state = NULL; 14227 14228 if (new_conn_state->crtc != &crtc->base) 14229 continue; 14230 14231 if (crtc) 14232 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 14233 14234 intel_connector_verify_state(crtc_state, new_conn_state); 14235 14236 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 14237 "connector's atomic encoder doesn't match legacy encoder\n"); 14238 } 14239 } 14240 14241 static void 14242 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 14243 { 14244 struct intel_encoder *encoder; 14245 struct drm_connector *connector; 14246 struct drm_connector_state *old_conn_state, *new_conn_state; 14247 int i; 14248 14249 for_each_intel_encoder(&dev_priv->drm, encoder) { 14250 bool enabled = false, found = false; 14251 enum pipe pipe; 14252 14253 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n", 14254 encoder->base.base.id, 14255 encoder->base.name); 14256 14257 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 14258 new_conn_state, i) { 14259 if (old_conn_state->best_encoder == &encoder->base) 14260 found = true; 14261 14262 if (new_conn_state->best_encoder != &encoder->base) 14263 continue; 14264 found = enabled = true; 14265 14266 I915_STATE_WARN(new_conn_state->crtc != 14267 encoder->base.crtc, 14268 "connector's crtc doesn't match encoder crtc\n"); 14269 } 14270 14271 if (!found) 14272 continue; 14273 14274 I915_STATE_WARN(!!encoder->base.crtc != enabled, 14275 "encoder's enabled state mismatch " 14276 "(expected %i, found %i)\n", 14277 !!encoder->base.crtc, enabled); 14278 14279 if (!encoder->base.crtc) { 14280 bool active; 14281 14282 active = encoder->get_hw_state(encoder, &pipe); 14283 I915_STATE_WARN(active, 14284 "encoder detached but still enabled on pipe %c.\n", 14285 pipe_name(pipe)); 14286 } 14287 } 14288 } 14289 14290 static void 14291 verify_crtc_state(struct intel_crtc *crtc, 14292 struct intel_crtc_state *old_crtc_state, 14293 struct intel_crtc_state *new_crtc_state) 14294 { 14295 struct drm_device *dev = crtc->base.dev; 14296 struct drm_i915_private *dev_priv = to_i915(dev); 14297 struct intel_encoder *encoder; 14298 struct intel_crtc_state *pipe_config = old_crtc_state; 14299 struct drm_atomic_state *state = old_crtc_state->uapi.state; 14300 14301 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); 14302 intel_crtc_free_hw_state(old_crtc_state); 14303 intel_crtc_state_reset(old_crtc_state, crtc); 14304 old_crtc_state->uapi.state = state; 14305 14306 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, 14307 crtc->base.name); 14308 14309 pipe_config->hw.enable = new_crtc_state->hw.enable; 14310 14311 pipe_config->hw.active = 14312 dev_priv->display.get_pipe_config(crtc, pipe_config); 14313 14314 /* we keep both pipes enabled on 830 */ 14315 if (IS_I830(dev_priv) && pipe_config->hw.active) 14316 pipe_config->hw.active = new_crtc_state->hw.active; 14317 14318 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active, 14319 "crtc active state doesn't match with hw state " 14320 "(expected %i, found %i)\n", 14321 new_crtc_state->hw.active, pipe_config->hw.active); 14322 14323 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, 14324 "transitional active state does not match atomic hw state " 14325 "(expected %i, found %i)\n", 14326 new_crtc_state->hw.active, crtc->active); 14327 14328 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 14329 enum pipe pipe; 14330 bool active; 14331 14332 active = encoder->get_hw_state(encoder, &pipe); 14333 I915_STATE_WARN(active != new_crtc_state->hw.active, 14334 "[ENCODER:%i] active %i with crtc active %i\n", 14335 encoder->base.base.id, active, 14336 new_crtc_state->hw.active); 14337 14338 I915_STATE_WARN(active && crtc->pipe != pipe, 14339 "Encoder connected to wrong pipe %c\n", 14340 pipe_name(pipe)); 14341 14342 if (active) 14343 encoder->get_config(encoder, pipe_config); 14344 } 14345 14346 intel_crtc_compute_pixel_rate(pipe_config); 14347 14348 if (!new_crtc_state->hw.active) 14349 return; 14350 14351 intel_pipe_config_sanity_check(dev_priv, pipe_config); 14352 14353 if (!intel_pipe_config_compare(new_crtc_state, 14354 pipe_config, false)) { 14355 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 14356 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 14357 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 14358 } 14359 } 14360 14361 static void 14362 intel_verify_planes(struct intel_atomic_state *state) 14363 { 14364 struct intel_plane *plane; 14365 const struct intel_plane_state *plane_state; 14366 int i; 14367 14368 for_each_new_intel_plane_in_state(state, plane, 14369 plane_state, i) 14370 assert_plane(plane, plane_state->planar_slave || 14371 plane_state->uapi.visible); 14372 } 14373 14374 static void 14375 verify_single_dpll_state(struct drm_i915_private *dev_priv, 14376 struct intel_shared_dpll *pll, 14377 struct intel_crtc *crtc, 14378 struct intel_crtc_state *new_crtc_state) 14379 { 14380 struct intel_dpll_hw_state dpll_hw_state; 14381 unsigned int crtc_mask; 14382 bool active; 14383 14384 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 14385 14386 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name); 14387 14388 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); 14389 14390 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 14391 I915_STATE_WARN(!pll->on && pll->active_mask, 14392 "pll in active use but not on in sw tracking\n"); 14393 I915_STATE_WARN(pll->on && !pll->active_mask, 14394 "pll is on but not used by any active crtc\n"); 14395 I915_STATE_WARN(pll->on != active, 14396 "pll on state mismatch (expected %i, found %i)\n", 14397 pll->on, active); 14398 } 14399 14400 if (!crtc) { 14401 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 14402 "more active pll users than references: %x vs %x\n", 14403 pll->active_mask, pll->state.crtc_mask); 14404 14405 return; 14406 } 14407 14408 crtc_mask = drm_crtc_mask(&crtc->base); 14409 14410 if (new_crtc_state->hw.active) 14411 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 14412 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 14413 pipe_name(crtc->pipe), pll->active_mask); 14414 else 14415 I915_STATE_WARN(pll->active_mask & crtc_mask, 14416 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 14417 pipe_name(crtc->pipe), pll->active_mask); 14418 14419 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 14420 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 14421 crtc_mask, pll->state.crtc_mask); 14422 14423 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 14424 &dpll_hw_state, 14425 sizeof(dpll_hw_state)), 14426 "pll hw state mismatch\n"); 14427 } 14428 14429 static void 14430 verify_shared_dpll_state(struct intel_crtc *crtc, 14431 struct intel_crtc_state *old_crtc_state, 14432 struct intel_crtc_state *new_crtc_state) 14433 { 14434 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14435 14436 if (new_crtc_state->shared_dpll) 14437 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 14438 14439 if (old_crtc_state->shared_dpll && 14440 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 14441 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 14442 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 14443 14444 I915_STATE_WARN(pll->active_mask & crtc_mask, 14445 "pll active mismatch (didn't expect pipe %c in active mask)\n", 14446 pipe_name(crtc->pipe)); 14447 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 14448 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 14449 pipe_name(crtc->pipe)); 14450 } 14451 } 14452 14453 static void 14454 intel_modeset_verify_crtc(struct intel_crtc *crtc, 14455 struct intel_atomic_state *state, 14456 struct intel_crtc_state *old_crtc_state, 14457 struct intel_crtc_state *new_crtc_state) 14458 { 14459 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 14460 return; 14461 14462 verify_wm_state(crtc, new_crtc_state); 14463 verify_connector_state(state, crtc); 14464 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 14465 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 14466 } 14467 14468 static void 14469 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 14470 { 14471 int i; 14472 14473 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) 14474 verify_single_dpll_state(dev_priv, 14475 &dev_priv->dpll.shared_dplls[i], 14476 NULL, NULL); 14477 } 14478 14479 static void 14480 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 14481 struct intel_atomic_state *state) 14482 { 14483 verify_encoder_state(dev_priv, state); 14484 verify_connector_state(state, NULL); 14485 verify_disabled_dpll_state(dev_priv); 14486 } 14487 14488 static void 14489 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 14490 { 14491 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 14492 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14493 const struct drm_display_mode *adjusted_mode = 14494 &crtc_state->hw.adjusted_mode; 14495 14496 drm_calc_timestamping_constants(&crtc->base, adjusted_mode); 14497 14498 crtc->mode_flags = crtc_state->mode_flags; 14499 14500 /* 14501 * The scanline counter increments at the leading edge of hsync. 14502 * 14503 * On most platforms it starts counting from vtotal-1 on the 14504 * first active line. That means the scanline counter value is 14505 * always one less than what we would expect. Ie. just after 14506 * start of vblank, which also occurs at start of hsync (on the 14507 * last active line), the scanline counter will read vblank_start-1. 14508 * 14509 * On gen2 the scanline counter starts counting from 1 instead 14510 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 14511 * to keep the value positive), instead of adding one. 14512 * 14513 * On HSW+ the behaviour of the scanline counter depends on the output 14514 * type. For DP ports it behaves like most other platforms, but on HDMI 14515 * there's an extra 1 line difference. So we need to add two instead of 14516 * one to the value. 14517 * 14518 * On VLV/CHV DSI the scanline counter would appear to increment 14519 * approx. 1/3 of a scanline before start of vblank. Unfortunately 14520 * that means we can't tell whether we're in vblank or not while 14521 * we're on that particular line. We must still set scanline_offset 14522 * to 1 so that the vblank timestamps come out correct when we query 14523 * the scanline counter from within the vblank interrupt handler. 14524 * However if queried just before the start of vblank we'll get an 14525 * answer that's slightly in the future. 14526 */ 14527 if (IS_GEN(dev_priv, 2)) { 14528 int vtotal; 14529 14530 vtotal = adjusted_mode->crtc_vtotal; 14531 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 14532 vtotal /= 2; 14533 14534 crtc->scanline_offset = vtotal - 1; 14535 } else if (HAS_DDI(dev_priv) && 14536 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 14537 crtc->scanline_offset = 2; 14538 } else { 14539 crtc->scanline_offset = 1; 14540 } 14541 } 14542 14543 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 14544 { 14545 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14546 struct intel_crtc_state *new_crtc_state; 14547 struct intel_crtc *crtc; 14548 int i; 14549 14550 if (!dev_priv->display.crtc_compute_clock) 14551 return; 14552 14553 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14554 if (!needs_modeset(new_crtc_state)) 14555 continue; 14556 14557 intel_release_shared_dplls(state, crtc); 14558 } 14559 } 14560 14561 /* 14562 * This implements the workaround described in the "notes" section of the mode 14563 * set sequence documentation. When going from no pipes or single pipe to 14564 * multiple pipes, and planes are enabled after the pipe, we need to wait at 14565 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 14566 */ 14567 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 14568 { 14569 struct intel_crtc_state *crtc_state; 14570 struct intel_crtc *crtc; 14571 struct intel_crtc_state *first_crtc_state = NULL; 14572 struct intel_crtc_state *other_crtc_state = NULL; 14573 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 14574 int i; 14575 14576 /* look at all crtc's that are going to be enabled in during modeset */ 14577 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14578 if (!crtc_state->hw.active || 14579 !needs_modeset(crtc_state)) 14580 continue; 14581 14582 if (first_crtc_state) { 14583 other_crtc_state = crtc_state; 14584 break; 14585 } else { 14586 first_crtc_state = crtc_state; 14587 first_pipe = crtc->pipe; 14588 } 14589 } 14590 14591 /* No workaround needed? */ 14592 if (!first_crtc_state) 14593 return 0; 14594 14595 /* w/a possibly needed, check how many crtc's are already enabled. */ 14596 for_each_intel_crtc(state->base.dev, crtc) { 14597 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 14598 if (IS_ERR(crtc_state)) 14599 return PTR_ERR(crtc_state); 14600 14601 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 14602 14603 if (!crtc_state->hw.active || 14604 needs_modeset(crtc_state)) 14605 continue; 14606 14607 /* 2 or more enabled crtcs means no need for w/a */ 14608 if (enabled_pipe != INVALID_PIPE) 14609 return 0; 14610 14611 enabled_pipe = crtc->pipe; 14612 } 14613 14614 if (enabled_pipe != INVALID_PIPE) 14615 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 14616 else if (other_crtc_state) 14617 other_crtc_state->hsw_workaround_pipe = first_pipe; 14618 14619 return 0; 14620 } 14621 14622 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 14623 u8 active_pipes) 14624 { 14625 const struct intel_crtc_state *crtc_state; 14626 struct intel_crtc *crtc; 14627 int i; 14628 14629 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14630 if (crtc_state->hw.active) 14631 active_pipes |= BIT(crtc->pipe); 14632 else 14633 active_pipes &= ~BIT(crtc->pipe); 14634 } 14635 14636 return active_pipes; 14637 } 14638 14639 static int intel_modeset_checks(struct intel_atomic_state *state) 14640 { 14641 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14642 14643 state->modeset = true; 14644 14645 if (IS_HASWELL(dev_priv)) 14646 return hsw_mode_set_planes_workaround(state); 14647 14648 return 0; 14649 } 14650 14651 /* 14652 * Handle calculation of various watermark data at the end of the atomic check 14653 * phase. The code here should be run after the per-crtc and per-plane 'check' 14654 * handlers to ensure that all derived state has been updated. 14655 */ 14656 static int calc_watermark_data(struct intel_atomic_state *state) 14657 { 14658 struct drm_device *dev = state->base.dev; 14659 struct drm_i915_private *dev_priv = to_i915(dev); 14660 14661 /* Is there platform-specific watermark information to calculate? */ 14662 if (dev_priv->display.compute_global_watermarks) 14663 return dev_priv->display.compute_global_watermarks(state); 14664 14665 return 0; 14666 } 14667 14668 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 14669 struct intel_crtc_state *new_crtc_state) 14670 { 14671 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 14672 return; 14673 14674 new_crtc_state->uapi.mode_changed = false; 14675 new_crtc_state->update_pipe = true; 14676 } 14677 14678 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, 14679 struct intel_crtc_state *new_crtc_state) 14680 { 14681 /* 14682 * If we're not doing the full modeset we want to 14683 * keep the current M/N values as they may be 14684 * sufficiently different to the computed values 14685 * to cause problems. 14686 * 14687 * FIXME: should really copy more fuzzy state here 14688 */ 14689 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 14690 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 14691 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 14692 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 14693 } 14694 14695 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 14696 struct intel_crtc *crtc, 14697 u8 plane_ids_mask) 14698 { 14699 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14700 struct intel_plane *plane; 14701 14702 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 14703 struct intel_plane_state *plane_state; 14704 14705 if ((plane_ids_mask & BIT(plane->id)) == 0) 14706 continue; 14707 14708 plane_state = intel_atomic_get_plane_state(state, plane); 14709 if (IS_ERR(plane_state)) 14710 return PTR_ERR(plane_state); 14711 } 14712 14713 return 0; 14714 } 14715 14716 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 14717 { 14718 /* See {hsw,vlv,ivb}_plane_ratio() */ 14719 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 14720 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 14721 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11); 14722 } 14723 14724 static int intel_atomic_check_planes(struct intel_atomic_state *state) 14725 { 14726 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14727 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14728 struct intel_plane_state *plane_state; 14729 struct intel_plane *plane; 14730 struct intel_crtc *crtc; 14731 int i, ret; 14732 14733 ret = icl_add_linked_planes(state); 14734 if (ret) 14735 return ret; 14736 14737 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 14738 ret = intel_plane_atomic_check(state, plane); 14739 if (ret) { 14740 drm_dbg_atomic(&dev_priv->drm, 14741 "[PLANE:%d:%s] atomic driver check failed\n", 14742 plane->base.base.id, plane->base.name); 14743 return ret; 14744 } 14745 } 14746 14747 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14748 new_crtc_state, i) { 14749 u8 old_active_planes, new_active_planes; 14750 14751 ret = icl_check_nv12_planes(new_crtc_state); 14752 if (ret) 14753 return ret; 14754 14755 /* 14756 * On some platforms the number of active planes affects 14757 * the planes' minimum cdclk calculation. Add such planes 14758 * to the state before we compute the minimum cdclk. 14759 */ 14760 if (!active_planes_affects_min_cdclk(dev_priv)) 14761 continue; 14762 14763 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 14764 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 14765 14766 /* 14767 * Not only the number of planes, but if the plane configuration had 14768 * changed might already mean we need to recompute min CDCLK, 14769 * because different planes might consume different amount of Dbuf bandwidth 14770 * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor 14771 */ 14772 if (old_active_planes == new_active_planes) 14773 continue; 14774 14775 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 14776 if (ret) 14777 return ret; 14778 } 14779 14780 return 0; 14781 } 14782 14783 static int intel_atomic_check_cdclk(struct intel_atomic_state *state, 14784 bool *need_cdclk_calc) 14785 { 14786 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14787 const struct intel_cdclk_state *old_cdclk_state; 14788 const struct intel_cdclk_state *new_cdclk_state; 14789 struct intel_plane_state *plane_state; 14790 struct intel_bw_state *new_bw_state; 14791 struct intel_plane *plane; 14792 int min_cdclk = 0; 14793 enum pipe pipe; 14794 int ret; 14795 int i; 14796 /* 14797 * active_planes bitmask has been updated, and potentially 14798 * affected planes are part of the state. We can now 14799 * compute the minimum cdclk for each plane. 14800 */ 14801 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 14802 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); 14803 if (ret) 14804 return ret; 14805 } 14806 14807 old_cdclk_state = intel_atomic_get_old_cdclk_state(state); 14808 new_cdclk_state = intel_atomic_get_new_cdclk_state(state); 14809 14810 if (new_cdclk_state && 14811 old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) 14812 *need_cdclk_calc = true; 14813 14814 ret = dev_priv->display.bw_calc_min_cdclk(state); 14815 if (ret) 14816 return ret; 14817 14818 new_bw_state = intel_atomic_get_new_bw_state(state); 14819 14820 if (!new_cdclk_state || !new_bw_state) 14821 return 0; 14822 14823 for_each_pipe(dev_priv, pipe) { 14824 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); 14825 14826 /* 14827 * Currently do this change only if we need to increase 14828 */ 14829 if (new_bw_state->min_cdclk > min_cdclk) 14830 *need_cdclk_calc = true; 14831 } 14832 14833 return 0; 14834 } 14835 14836 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 14837 { 14838 struct intel_crtc_state *crtc_state; 14839 struct intel_crtc *crtc; 14840 int i; 14841 14842 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14843 int ret = intel_crtc_atomic_check(state, crtc); 14844 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 14845 if (ret) { 14846 drm_dbg_atomic(&i915->drm, 14847 "[CRTC:%d:%s] atomic driver check failed\n", 14848 crtc->base.base.id, crtc->base.name); 14849 return ret; 14850 } 14851 } 14852 14853 return 0; 14854 } 14855 14856 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 14857 u8 transcoders) 14858 { 14859 const struct intel_crtc_state *new_crtc_state; 14860 struct intel_crtc *crtc; 14861 int i; 14862 14863 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14864 if (new_crtc_state->hw.enable && 14865 transcoders & BIT(new_crtc_state->cpu_transcoder) && 14866 needs_modeset(new_crtc_state)) 14867 return true; 14868 } 14869 14870 return false; 14871 } 14872 14873 /** 14874 * intel_atomic_check - validate state object 14875 * @dev: drm device 14876 * @_state: state to validate 14877 */ 14878 static int intel_atomic_check(struct drm_device *dev, 14879 struct drm_atomic_state *_state) 14880 { 14881 struct drm_i915_private *dev_priv = to_i915(dev); 14882 struct intel_atomic_state *state = to_intel_atomic_state(_state); 14883 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14884 struct intel_crtc *crtc; 14885 int ret, i; 14886 bool any_ms = false; 14887 14888 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14889 new_crtc_state, i) { 14890 if (new_crtc_state->inherited != old_crtc_state->inherited) 14891 new_crtc_state->uapi.mode_changed = true; 14892 } 14893 14894 ret = drm_atomic_helper_check_modeset(dev, &state->base); 14895 if (ret) 14896 goto fail; 14897 14898 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14899 new_crtc_state, i) { 14900 if (!needs_modeset(new_crtc_state)) { 14901 /* Light copy */ 14902 intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state); 14903 14904 continue; 14905 } 14906 14907 ret = intel_crtc_prepare_cleared_state(new_crtc_state); 14908 if (ret) 14909 goto fail; 14910 14911 if (!new_crtc_state->hw.enable) 14912 continue; 14913 14914 ret = intel_modeset_pipe_config(new_crtc_state); 14915 if (ret) 14916 goto fail; 14917 } 14918 14919 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14920 new_crtc_state, i) { 14921 if (!needs_modeset(new_crtc_state)) 14922 continue; 14923 14924 ret = intel_modeset_pipe_config_late(new_crtc_state); 14925 if (ret) 14926 goto fail; 14927 14928 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 14929 } 14930 14931 /** 14932 * Check if fastset is allowed by external dependencies like other 14933 * pipes and transcoders. 14934 * 14935 * Right now it only forces a fullmodeset when the MST master 14936 * transcoder did not changed but the pipe of the master transcoder 14937 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 14938 * in case of port synced crtcs, if one of the synced crtcs 14939 * needs a full modeset, all other synced crtcs should be 14940 * forced a full modeset. 14941 */ 14942 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14943 if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state)) 14944 continue; 14945 14946 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 14947 enum transcoder master = new_crtc_state->mst_master_transcoder; 14948 14949 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { 14950 new_crtc_state->uapi.mode_changed = true; 14951 new_crtc_state->update_pipe = false; 14952 } 14953 } 14954 14955 if (is_trans_port_sync_mode(new_crtc_state)) { 14956 u8 trans = new_crtc_state->sync_mode_slaves_mask; 14957 14958 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 14959 trans |= BIT(new_crtc_state->master_transcoder); 14960 14961 if (intel_cpu_transcoders_need_modeset(state, trans)) { 14962 new_crtc_state->uapi.mode_changed = true; 14963 new_crtc_state->update_pipe = false; 14964 } 14965 } 14966 } 14967 14968 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14969 new_crtc_state, i) { 14970 if (needs_modeset(new_crtc_state)) { 14971 any_ms = true; 14972 continue; 14973 } 14974 14975 if (!new_crtc_state->update_pipe) 14976 continue; 14977 14978 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); 14979 } 14980 14981 if (any_ms && !check_digital_port_conflicts(state)) { 14982 drm_dbg_kms(&dev_priv->drm, 14983 "rejecting conflicting digital port configuration\n"); 14984 ret = -EINVAL; 14985 goto fail; 14986 } 14987 14988 ret = drm_dp_mst_atomic_check(&state->base); 14989 if (ret) 14990 goto fail; 14991 14992 ret = intel_atomic_check_planes(state); 14993 if (ret) 14994 goto fail; 14995 14996 /* 14997 * distrust_bios_wm will force a full dbuf recomputation 14998 * but the hardware state will only get updated accordingly 14999 * if state->modeset==true. Hence distrust_bios_wm==true && 15000 * state->modeset==false is an invalid combination which 15001 * would cause the hardware and software dbuf state to get 15002 * out of sync. We must prevent that. 15003 * 15004 * FIXME clean up this mess and introduce better 15005 * state tracking for dbuf. 15006 */ 15007 if (dev_priv->wm.distrust_bios_wm) 15008 any_ms = true; 15009 15010 intel_fbc_choose_crtc(dev_priv, state); 15011 ret = calc_watermark_data(state); 15012 if (ret) 15013 goto fail; 15014 15015 ret = intel_bw_atomic_check(state); 15016 if (ret) 15017 goto fail; 15018 15019 ret = intel_atomic_check_cdclk(state, &any_ms); 15020 if (ret) 15021 goto fail; 15022 15023 if (any_ms) { 15024 ret = intel_modeset_checks(state); 15025 if (ret) 15026 goto fail; 15027 15028 ret = intel_modeset_calc_cdclk(state); 15029 if (ret) 15030 return ret; 15031 15032 intel_modeset_clear_plls(state); 15033 } 15034 15035 ret = intel_atomic_check_crtcs(state); 15036 if (ret) 15037 goto fail; 15038 15039 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15040 new_crtc_state, i) { 15041 if (!needs_modeset(new_crtc_state) && 15042 !new_crtc_state->update_pipe) 15043 continue; 15044 15045 intel_dump_pipe_config(new_crtc_state, state, 15046 needs_modeset(new_crtc_state) ? 15047 "[modeset]" : "[fastset]"); 15048 } 15049 15050 return 0; 15051 15052 fail: 15053 if (ret == -EDEADLK) 15054 return ret; 15055 15056 /* 15057 * FIXME would probably be nice to know which crtc specifically 15058 * caused the failure, in cases where we can pinpoint it. 15059 */ 15060 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15061 new_crtc_state, i) 15062 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 15063 15064 return ret; 15065 } 15066 15067 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 15068 { 15069 struct intel_crtc_state *crtc_state; 15070 struct intel_crtc *crtc; 15071 int i, ret; 15072 15073 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 15074 if (ret < 0) 15075 return ret; 15076 15077 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 15078 bool mode_changed = needs_modeset(crtc_state); 15079 15080 if (mode_changed || crtc_state->update_pipe || 15081 crtc_state->uapi.color_mgmt_changed) { 15082 intel_dsb_prepare(crtc_state); 15083 } 15084 } 15085 15086 return 0; 15087 } 15088 15089 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 15090 { 15091 struct drm_device *dev = crtc->base.dev; 15092 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; 15093 15094 if (!vblank->max_vblank_count) 15095 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 15096 15097 return crtc->base.funcs->get_vblank_counter(&crtc->base); 15098 } 15099 15100 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 15101 struct intel_crtc_state *crtc_state) 15102 { 15103 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15104 15105 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes) 15106 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 15107 15108 if (crtc_state->has_pch_encoder) { 15109 enum pipe pch_transcoder = 15110 intel_crtc_pch_transcoder(crtc); 15111 15112 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 15113 } 15114 } 15115 15116 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 15117 const struct intel_crtc_state *new_crtc_state) 15118 { 15119 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 15120 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15121 15122 /* 15123 * Update pipe size and adjust fitter if needed: the reason for this is 15124 * that in compute_mode_changes we check the native mode (not the pfit 15125 * mode) to see if we can flip rather than do a full mode set. In the 15126 * fastboot case, we'll flip, but if we don't update the pipesrc and 15127 * pfit state, we'll end up with a big fb scanned out into the wrong 15128 * sized surface. 15129 */ 15130 intel_set_pipe_src_size(new_crtc_state); 15131 15132 /* on skylake this is done by detaching scalers */ 15133 if (INTEL_GEN(dev_priv) >= 9) { 15134 skl_detach_scalers(new_crtc_state); 15135 15136 if (new_crtc_state->pch_pfit.enabled) 15137 skl_pfit_enable(new_crtc_state); 15138 } else if (HAS_PCH_SPLIT(dev_priv)) { 15139 if (new_crtc_state->pch_pfit.enabled) 15140 ilk_pfit_enable(new_crtc_state); 15141 else if (old_crtc_state->pch_pfit.enabled) 15142 ilk_pfit_disable(old_crtc_state); 15143 } 15144 15145 /* 15146 * The register is supposedly single buffered so perhaps 15147 * not 100% correct to do this here. But SKL+ calculate 15148 * this based on the adjust pixel rate so pfit changes do 15149 * affect it and so it must be updated for fastsets. 15150 * HSW/BDW only really need this here for fastboot, after 15151 * that the value should not change without a full modeset. 15152 */ 15153 if (INTEL_GEN(dev_priv) >= 9 || 15154 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 15155 hsw_set_linetime_wm(new_crtc_state); 15156 15157 if (INTEL_GEN(dev_priv) >= 11) 15158 icl_set_pipe_chicken(crtc); 15159 } 15160 15161 static void commit_pipe_config(struct intel_atomic_state *state, 15162 struct intel_crtc *crtc) 15163 { 15164 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15165 const struct intel_crtc_state *old_crtc_state = 15166 intel_atomic_get_old_crtc_state(state, crtc); 15167 const struct intel_crtc_state *new_crtc_state = 15168 intel_atomic_get_new_crtc_state(state, crtc); 15169 bool modeset = needs_modeset(new_crtc_state); 15170 15171 /* 15172 * During modesets pipe configuration was programmed as the 15173 * CRTC was enabled. 15174 */ 15175 if (!modeset) { 15176 if (new_crtc_state->uapi.color_mgmt_changed || 15177 new_crtc_state->update_pipe) 15178 intel_color_commit(new_crtc_state); 15179 15180 if (INTEL_GEN(dev_priv) >= 9) 15181 skl_detach_scalers(new_crtc_state); 15182 15183 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 15184 bdw_set_pipemisc(new_crtc_state); 15185 15186 if (new_crtc_state->update_pipe) 15187 intel_pipe_fastset(old_crtc_state, new_crtc_state); 15188 15189 intel_psr2_program_trans_man_trk_ctl(new_crtc_state); 15190 } 15191 15192 if (dev_priv->display.atomic_update_watermarks) 15193 dev_priv->display.atomic_update_watermarks(state, crtc); 15194 } 15195 15196 static void intel_enable_crtc(struct intel_atomic_state *state, 15197 struct intel_crtc *crtc) 15198 { 15199 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15200 const struct intel_crtc_state *new_crtc_state = 15201 intel_atomic_get_new_crtc_state(state, crtc); 15202 15203 if (!needs_modeset(new_crtc_state)) 15204 return; 15205 15206 intel_crtc_update_active_timings(new_crtc_state); 15207 15208 dev_priv->display.crtc_enable(state, crtc); 15209 15210 /* vblanks work again, re-enable pipe CRC. */ 15211 intel_crtc_enable_pipe_crc(crtc); 15212 } 15213 15214 static void intel_update_crtc(struct intel_atomic_state *state, 15215 struct intel_crtc *crtc) 15216 { 15217 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15218 const struct intel_crtc_state *old_crtc_state = 15219 intel_atomic_get_old_crtc_state(state, crtc); 15220 struct intel_crtc_state *new_crtc_state = 15221 intel_atomic_get_new_crtc_state(state, crtc); 15222 bool modeset = needs_modeset(new_crtc_state); 15223 15224 if (!modeset) { 15225 if (new_crtc_state->preload_luts && 15226 (new_crtc_state->uapi.color_mgmt_changed || 15227 new_crtc_state->update_pipe)) 15228 intel_color_load_luts(new_crtc_state); 15229 15230 intel_pre_plane_update(state, crtc); 15231 15232 if (new_crtc_state->update_pipe) 15233 intel_encoders_update_pipe(state, crtc); 15234 } 15235 15236 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 15237 intel_fbc_disable(crtc); 15238 else 15239 intel_fbc_enable(state, crtc); 15240 15241 /* Perform vblank evasion around commit operation */ 15242 intel_pipe_update_start(new_crtc_state); 15243 15244 commit_pipe_config(state, crtc); 15245 15246 if (INTEL_GEN(dev_priv) >= 9) 15247 skl_update_planes_on_crtc(state, crtc); 15248 else 15249 i9xx_update_planes_on_crtc(state, crtc); 15250 15251 intel_pipe_update_end(new_crtc_state); 15252 15253 /* 15254 * We usually enable FIFO underrun interrupts as part of the 15255 * CRTC enable sequence during modesets. But when we inherit a 15256 * valid pipe configuration from the BIOS we need to take care 15257 * of enabling them on the CRTC's first fastset. 15258 */ 15259 if (new_crtc_state->update_pipe && !modeset && 15260 old_crtc_state->inherited) 15261 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 15262 } 15263 15264 15265 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 15266 struct intel_crtc_state *old_crtc_state, 15267 struct intel_crtc_state *new_crtc_state, 15268 struct intel_crtc *crtc) 15269 { 15270 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15271 15272 intel_crtc_disable_planes(state, crtc); 15273 15274 /* 15275 * We need to disable pipe CRC before disabling the pipe, 15276 * or we race against vblank off. 15277 */ 15278 intel_crtc_disable_pipe_crc(crtc); 15279 15280 dev_priv->display.crtc_disable(state, crtc); 15281 crtc->active = false; 15282 intel_fbc_disable(crtc); 15283 intel_disable_shared_dpll(old_crtc_state); 15284 15285 /* FIXME unify this for all platforms */ 15286 if (!new_crtc_state->hw.active && 15287 !HAS_GMCH(dev_priv) && 15288 dev_priv->display.initial_watermarks) 15289 dev_priv->display.initial_watermarks(state, crtc); 15290 } 15291 15292 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 15293 { 15294 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 15295 struct intel_crtc *crtc; 15296 u32 handled = 0; 15297 int i; 15298 15299 /* Only disable port sync and MST slaves */ 15300 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15301 new_crtc_state, i) { 15302 if (!needs_modeset(new_crtc_state)) 15303 continue; 15304 15305 if (!old_crtc_state->hw.active) 15306 continue; 15307 15308 /* In case of Transcoder port Sync master slave CRTCs can be 15309 * assigned in any order and we need to make sure that 15310 * slave CRTCs are disabled first and then master CRTC since 15311 * Slave vblanks are masked till Master Vblanks. 15312 */ 15313 if (!is_trans_port_sync_slave(old_crtc_state) && 15314 !intel_dp_mst_is_slave_trans(old_crtc_state)) 15315 continue; 15316 15317 intel_pre_plane_update(state, crtc); 15318 intel_old_crtc_state_disables(state, old_crtc_state, 15319 new_crtc_state, crtc); 15320 handled |= BIT(crtc->pipe); 15321 } 15322 15323 /* Disable everything else left on */ 15324 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15325 new_crtc_state, i) { 15326 if (!needs_modeset(new_crtc_state) || 15327 (handled & BIT(crtc->pipe))) 15328 continue; 15329 15330 intel_pre_plane_update(state, crtc); 15331 if (old_crtc_state->hw.active) 15332 intel_old_crtc_state_disables(state, old_crtc_state, 15333 new_crtc_state, crtc); 15334 } 15335 } 15336 15337 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 15338 { 15339 struct intel_crtc_state *new_crtc_state; 15340 struct intel_crtc *crtc; 15341 int i; 15342 15343 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15344 if (!new_crtc_state->hw.active) 15345 continue; 15346 15347 intel_enable_crtc(state, crtc); 15348 intel_update_crtc(state, crtc); 15349 } 15350 } 15351 15352 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 15353 { 15354 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15355 struct intel_crtc *crtc; 15356 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 15357 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 15358 u8 update_pipes = 0, modeset_pipes = 0; 15359 int i; 15360 15361 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 15362 enum pipe pipe = crtc->pipe; 15363 15364 if (!new_crtc_state->hw.active) 15365 continue; 15366 15367 /* ignore allocations for crtc's that have been turned off. */ 15368 if (!needs_modeset(new_crtc_state)) { 15369 entries[pipe] = old_crtc_state->wm.skl.ddb; 15370 update_pipes |= BIT(pipe); 15371 } else { 15372 modeset_pipes |= BIT(pipe); 15373 } 15374 } 15375 15376 /* 15377 * Whenever the number of active pipes changes, we need to make sure we 15378 * update the pipes in the right order so that their ddb allocations 15379 * never overlap with each other between CRTC updates. Otherwise we'll 15380 * cause pipe underruns and other bad stuff. 15381 * 15382 * So first lets enable all pipes that do not need a fullmodeset as 15383 * those don't have any external dependency. 15384 */ 15385 while (update_pipes) { 15386 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15387 new_crtc_state, i) { 15388 enum pipe pipe = crtc->pipe; 15389 15390 if ((update_pipes & BIT(pipe)) == 0) 15391 continue; 15392 15393 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 15394 entries, I915_MAX_PIPES, pipe)) 15395 continue; 15396 15397 entries[pipe] = new_crtc_state->wm.skl.ddb; 15398 update_pipes &= ~BIT(pipe); 15399 15400 intel_update_crtc(state, crtc); 15401 15402 /* 15403 * If this is an already active pipe, it's DDB changed, 15404 * and this isn't the last pipe that needs updating 15405 * then we need to wait for a vblank to pass for the 15406 * new ddb allocation to take effect. 15407 */ 15408 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 15409 &old_crtc_state->wm.skl.ddb) && 15410 (update_pipes | modeset_pipes)) 15411 intel_wait_for_vblank(dev_priv, pipe); 15412 } 15413 } 15414 15415 update_pipes = modeset_pipes; 15416 15417 /* 15418 * Enable all pipes that needs a modeset and do not depends on other 15419 * pipes 15420 */ 15421 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15422 enum pipe pipe = crtc->pipe; 15423 15424 if ((modeset_pipes & BIT(pipe)) == 0) 15425 continue; 15426 15427 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 15428 is_trans_port_sync_master(new_crtc_state)) 15429 continue; 15430 15431 modeset_pipes &= ~BIT(pipe); 15432 15433 intel_enable_crtc(state, crtc); 15434 } 15435 15436 /* 15437 * Then we enable all remaining pipes that depend on other 15438 * pipes: MST slaves and port sync masters. 15439 */ 15440 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15441 enum pipe pipe = crtc->pipe; 15442 15443 if ((modeset_pipes & BIT(pipe)) == 0) 15444 continue; 15445 15446 modeset_pipes &= ~BIT(pipe); 15447 15448 intel_enable_crtc(state, crtc); 15449 } 15450 15451 /* 15452 * Finally we do the plane updates/etc. for all pipes that got enabled. 15453 */ 15454 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15455 enum pipe pipe = crtc->pipe; 15456 15457 if ((update_pipes & BIT(pipe)) == 0) 15458 continue; 15459 15460 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 15461 entries, I915_MAX_PIPES, pipe)); 15462 15463 entries[pipe] = new_crtc_state->wm.skl.ddb; 15464 update_pipes &= ~BIT(pipe); 15465 15466 intel_update_crtc(state, crtc); 15467 } 15468 15469 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 15470 drm_WARN_ON(&dev_priv->drm, update_pipes); 15471 } 15472 15473 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 15474 { 15475 struct intel_atomic_state *state, *next; 15476 struct llist_node *freed; 15477 15478 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 15479 llist_for_each_entry_safe(state, next, freed, freed) 15480 drm_atomic_state_put(&state->base); 15481 } 15482 15483 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 15484 { 15485 struct drm_i915_private *dev_priv = 15486 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 15487 15488 intel_atomic_helper_free_state(dev_priv); 15489 } 15490 15491 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 15492 { 15493 struct wait_queue_entry wait_fence, wait_reset; 15494 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 15495 15496 init_wait_entry(&wait_fence, 0); 15497 init_wait_entry(&wait_reset, 0); 15498 for (;;) { 15499 prepare_to_wait(&intel_state->commit_ready.wait, 15500 &wait_fence, TASK_UNINTERRUPTIBLE); 15501 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 15502 I915_RESET_MODESET), 15503 &wait_reset, TASK_UNINTERRUPTIBLE); 15504 15505 15506 if (i915_sw_fence_done(&intel_state->commit_ready) || 15507 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 15508 break; 15509 15510 schedule(); 15511 } 15512 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 15513 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 15514 I915_RESET_MODESET), 15515 &wait_reset); 15516 } 15517 15518 static void intel_cleanup_dsbs(struct intel_atomic_state *state) 15519 { 15520 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 15521 struct intel_crtc *crtc; 15522 int i; 15523 15524 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15525 new_crtc_state, i) 15526 intel_dsb_cleanup(old_crtc_state); 15527 } 15528 15529 static void intel_atomic_cleanup_work(struct work_struct *work) 15530 { 15531 struct intel_atomic_state *state = 15532 container_of(work, struct intel_atomic_state, base.commit_work); 15533 struct drm_i915_private *i915 = to_i915(state->base.dev); 15534 15535 intel_cleanup_dsbs(state); 15536 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 15537 drm_atomic_helper_commit_cleanup_done(&state->base); 15538 drm_atomic_state_put(&state->base); 15539 15540 intel_atomic_helper_free_state(i915); 15541 } 15542 15543 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 15544 { 15545 struct drm_device *dev = state->base.dev; 15546 struct drm_i915_private *dev_priv = to_i915(dev); 15547 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 15548 struct intel_crtc *crtc; 15549 u64 put_domains[I915_MAX_PIPES] = {}; 15550 intel_wakeref_t wakeref = 0; 15551 int i; 15552 15553 intel_atomic_commit_fence_wait(state); 15554 15555 drm_atomic_helper_wait_for_dependencies(&state->base); 15556 15557 if (state->modeset) 15558 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 15559 15560 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15561 new_crtc_state, i) { 15562 if (needs_modeset(new_crtc_state) || 15563 new_crtc_state->update_pipe) { 15564 15565 put_domains[crtc->pipe] = 15566 modeset_get_crtc_power_domains(new_crtc_state); 15567 } 15568 } 15569 15570 intel_commit_modeset_disables(state); 15571 15572 /* FIXME: Eventually get rid of our crtc->config pointer */ 15573 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15574 crtc->config = new_crtc_state; 15575 15576 if (state->modeset) { 15577 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 15578 15579 intel_set_cdclk_pre_plane_update(state); 15580 15581 intel_modeset_verify_disabled(dev_priv, state); 15582 } 15583 15584 intel_sagv_pre_plane_update(state); 15585 15586 /* Complete the events for pipes that have now been disabled */ 15587 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15588 bool modeset = needs_modeset(new_crtc_state); 15589 15590 /* Complete events for now disable pipes here. */ 15591 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 15592 spin_lock_irq(&dev->event_lock); 15593 drm_crtc_send_vblank_event(&crtc->base, 15594 new_crtc_state->uapi.event); 15595 spin_unlock_irq(&dev->event_lock); 15596 15597 new_crtc_state->uapi.event = NULL; 15598 } 15599 } 15600 15601 if (state->modeset) 15602 intel_encoders_update_prepare(state); 15603 15604 intel_dbuf_pre_plane_update(state); 15605 15606 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 15607 dev_priv->display.commit_modeset_enables(state); 15608 15609 if (state->modeset) { 15610 intel_encoders_update_complete(state); 15611 15612 intel_set_cdclk_post_plane_update(state); 15613 } 15614 15615 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 15616 * already, but still need the state for the delayed optimization. To 15617 * fix this: 15618 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 15619 * - schedule that vblank worker _before_ calling hw_done 15620 * - at the start of commit_tail, cancel it _synchrously 15621 * - switch over to the vblank wait helper in the core after that since 15622 * we don't need out special handling any more. 15623 */ 15624 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 15625 15626 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15627 if (new_crtc_state->hw.active && 15628 !needs_modeset(new_crtc_state) && 15629 !new_crtc_state->preload_luts && 15630 (new_crtc_state->uapi.color_mgmt_changed || 15631 new_crtc_state->update_pipe)) 15632 intel_color_load_luts(new_crtc_state); 15633 } 15634 15635 /* 15636 * Now that the vblank has passed, we can go ahead and program the 15637 * optimal watermarks on platforms that need two-step watermark 15638 * programming. 15639 * 15640 * TODO: Move this (and other cleanup) to an async worker eventually. 15641 */ 15642 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15643 new_crtc_state, i) { 15644 /* 15645 * Gen2 reports pipe underruns whenever all planes are disabled. 15646 * So re-enable underrun reporting after some planes get enabled. 15647 * 15648 * We do this before .optimize_watermarks() so that we have a 15649 * chance of catching underruns with the intermediate watermarks 15650 * vs. the new plane configuration. 15651 */ 15652 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state)) 15653 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 15654 15655 if (dev_priv->display.optimize_watermarks) 15656 dev_priv->display.optimize_watermarks(state, crtc); 15657 } 15658 15659 intel_dbuf_post_plane_update(state); 15660 15661 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 15662 intel_post_plane_update(state, crtc); 15663 15664 if (put_domains[i]) 15665 modeset_put_power_domains(dev_priv, put_domains[i]); 15666 15667 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 15668 15669 /* 15670 * DSB cleanup is done in cleanup_work aligning with framebuffer 15671 * cleanup. So copy and reset the dsb structure to sync with 15672 * commit_done and later do dsb cleanup in cleanup_work. 15673 */ 15674 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); 15675 } 15676 15677 /* Underruns don't always raise interrupts, so check manually */ 15678 intel_check_cpu_fifo_underruns(dev_priv); 15679 intel_check_pch_fifo_underruns(dev_priv); 15680 15681 if (state->modeset) 15682 intel_verify_planes(state); 15683 15684 intel_sagv_post_plane_update(state); 15685 15686 drm_atomic_helper_commit_hw_done(&state->base); 15687 15688 if (state->modeset) { 15689 /* As one of the primary mmio accessors, KMS has a high 15690 * likelihood of triggering bugs in unclaimed access. After we 15691 * finish modesetting, see if an error has been flagged, and if 15692 * so enable debugging for the next modeset - and hope we catch 15693 * the culprit. 15694 */ 15695 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 15696 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 15697 } 15698 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15699 15700 /* 15701 * Defer the cleanup of the old state to a separate worker to not 15702 * impede the current task (userspace for blocking modesets) that 15703 * are executed inline. For out-of-line asynchronous modesets/flips, 15704 * deferring to a new worker seems overkill, but we would place a 15705 * schedule point (cond_resched()) here anyway to keep latencies 15706 * down. 15707 */ 15708 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 15709 queue_work(system_highpri_wq, &state->base.commit_work); 15710 } 15711 15712 static void intel_atomic_commit_work(struct work_struct *work) 15713 { 15714 struct intel_atomic_state *state = 15715 container_of(work, struct intel_atomic_state, base.commit_work); 15716 15717 intel_atomic_commit_tail(state); 15718 } 15719 15720 static int __i915_sw_fence_call 15721 intel_atomic_commit_ready(struct i915_sw_fence *fence, 15722 enum i915_sw_fence_notify notify) 15723 { 15724 struct intel_atomic_state *state = 15725 container_of(fence, struct intel_atomic_state, commit_ready); 15726 15727 switch (notify) { 15728 case FENCE_COMPLETE: 15729 /* we do blocking waits in the worker, nothing to do here */ 15730 break; 15731 case FENCE_FREE: 15732 { 15733 struct intel_atomic_helper *helper = 15734 &to_i915(state->base.dev)->atomic_helper; 15735 15736 if (llist_add(&state->freed, &helper->free_list)) 15737 schedule_work(&helper->free_work); 15738 break; 15739 } 15740 } 15741 15742 return NOTIFY_DONE; 15743 } 15744 15745 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 15746 { 15747 struct intel_plane_state *old_plane_state, *new_plane_state; 15748 struct intel_plane *plane; 15749 int i; 15750 15751 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 15752 new_plane_state, i) 15753 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 15754 to_intel_frontbuffer(new_plane_state->hw.fb), 15755 plane->frontbuffer_bit); 15756 } 15757 15758 static int intel_atomic_commit(struct drm_device *dev, 15759 struct drm_atomic_state *_state, 15760 bool nonblock) 15761 { 15762 struct intel_atomic_state *state = to_intel_atomic_state(_state); 15763 struct drm_i915_private *dev_priv = to_i915(dev); 15764 int ret = 0; 15765 15766 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 15767 15768 drm_atomic_state_get(&state->base); 15769 i915_sw_fence_init(&state->commit_ready, 15770 intel_atomic_commit_ready); 15771 15772 /* 15773 * The intel_legacy_cursor_update() fast path takes care 15774 * of avoiding the vblank waits for simple cursor 15775 * movement and flips. For cursor on/off and size changes, 15776 * we want to perform the vblank waits so that watermark 15777 * updates happen during the correct frames. Gen9+ have 15778 * double buffered watermarks and so shouldn't need this. 15779 * 15780 * Unset state->legacy_cursor_update before the call to 15781 * drm_atomic_helper_setup_commit() because otherwise 15782 * drm_atomic_helper_wait_for_flip_done() is a noop and 15783 * we get FIFO underruns because we didn't wait 15784 * for vblank. 15785 * 15786 * FIXME doing watermarks and fb cleanup from a vblank worker 15787 * (assuming we had any) would solve these problems. 15788 */ 15789 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { 15790 struct intel_crtc_state *new_crtc_state; 15791 struct intel_crtc *crtc; 15792 int i; 15793 15794 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15795 if (new_crtc_state->wm.need_postvbl_update || 15796 new_crtc_state->update_wm_post) 15797 state->base.legacy_cursor_update = false; 15798 } 15799 15800 ret = intel_atomic_prepare_commit(state); 15801 if (ret) { 15802 drm_dbg_atomic(&dev_priv->drm, 15803 "Preparing state failed with %i\n", ret); 15804 i915_sw_fence_commit(&state->commit_ready); 15805 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15806 return ret; 15807 } 15808 15809 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 15810 if (!ret) 15811 ret = drm_atomic_helper_swap_state(&state->base, true); 15812 if (!ret) 15813 intel_atomic_swap_global_state(state); 15814 15815 if (ret) { 15816 struct intel_crtc_state *new_crtc_state; 15817 struct intel_crtc *crtc; 15818 int i; 15819 15820 i915_sw_fence_commit(&state->commit_ready); 15821 15822 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15823 intel_dsb_cleanup(new_crtc_state); 15824 15825 drm_atomic_helper_cleanup_planes(dev, &state->base); 15826 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15827 return ret; 15828 } 15829 dev_priv->wm.distrust_bios_wm = false; 15830 intel_shared_dpll_swap_state(state); 15831 intel_atomic_track_fbs(state); 15832 15833 drm_atomic_state_get(&state->base); 15834 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 15835 15836 i915_sw_fence_commit(&state->commit_ready); 15837 if (nonblock && state->modeset) { 15838 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 15839 } else if (nonblock) { 15840 queue_work(dev_priv->flip_wq, &state->base.commit_work); 15841 } else { 15842 if (state->modeset) 15843 flush_workqueue(dev_priv->modeset_wq); 15844 intel_atomic_commit_tail(state); 15845 } 15846 15847 return 0; 15848 } 15849 15850 struct wait_rps_boost { 15851 struct wait_queue_entry wait; 15852 15853 struct drm_crtc *crtc; 15854 struct i915_request *request; 15855 }; 15856 15857 static int do_rps_boost(struct wait_queue_entry *_wait, 15858 unsigned mode, int sync, void *key) 15859 { 15860 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 15861 struct i915_request *rq = wait->request; 15862 15863 /* 15864 * If we missed the vblank, but the request is already running it 15865 * is reasonable to assume that it will complete before the next 15866 * vblank without our intervention, so leave RPS alone. 15867 */ 15868 if (!i915_request_started(rq)) 15869 intel_rps_boost(rq); 15870 i915_request_put(rq); 15871 15872 drm_crtc_vblank_put(wait->crtc); 15873 15874 list_del(&wait->wait.entry); 15875 kfree(wait); 15876 return 1; 15877 } 15878 15879 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 15880 struct dma_fence *fence) 15881 { 15882 struct wait_rps_boost *wait; 15883 15884 if (!dma_fence_is_i915(fence)) 15885 return; 15886 15887 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 15888 return; 15889 15890 if (drm_crtc_vblank_get(crtc)) 15891 return; 15892 15893 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 15894 if (!wait) { 15895 drm_crtc_vblank_put(crtc); 15896 return; 15897 } 15898 15899 wait->request = to_request(dma_fence_get(fence)); 15900 wait->crtc = crtc; 15901 15902 wait->wait.func = do_rps_boost; 15903 wait->wait.flags = 0; 15904 15905 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 15906 } 15907 15908 static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 15909 { 15910 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 15911 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15912 struct drm_framebuffer *fb = plane_state->hw.fb; 15913 struct i915_vma *vma; 15914 15915 if (plane->id == PLANE_CURSOR && 15916 INTEL_INFO(dev_priv)->display.cursor_needs_physical) { 15917 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15918 const int align = intel_cursor_alignment(dev_priv); 15919 int err; 15920 15921 err = i915_gem_object_attach_phys(obj, align); 15922 if (err) 15923 return err; 15924 } 15925 15926 vma = intel_pin_and_fence_fb_obj(fb, 15927 &plane_state->view, 15928 intel_plane_uses_fence(plane_state), 15929 &plane_state->flags); 15930 if (IS_ERR(vma)) 15931 return PTR_ERR(vma); 15932 15933 plane_state->vma = vma; 15934 15935 return 0; 15936 } 15937 15938 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 15939 { 15940 struct i915_vma *vma; 15941 15942 vma = fetch_and_zero(&old_plane_state->vma); 15943 if (vma) 15944 intel_unpin_fb_vma(vma, old_plane_state->flags); 15945 } 15946 15947 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) 15948 { 15949 struct i915_sched_attr attr = { 15950 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), 15951 }; 15952 15953 i915_gem_object_wait_priority(obj, 0, &attr); 15954 } 15955 15956 /** 15957 * intel_prepare_plane_fb - Prepare fb for usage on plane 15958 * @_plane: drm plane to prepare for 15959 * @_new_plane_state: the plane state being prepared 15960 * 15961 * Prepares a framebuffer for usage on a display plane. Generally this 15962 * involves pinning the underlying object and updating the frontbuffer tracking 15963 * bits. Some older platforms need special physical address handling for 15964 * cursor planes. 15965 * 15966 * Returns 0 on success, negative error code on failure. 15967 */ 15968 int 15969 intel_prepare_plane_fb(struct drm_plane *_plane, 15970 struct drm_plane_state *_new_plane_state) 15971 { 15972 struct intel_plane *plane = to_intel_plane(_plane); 15973 struct intel_plane_state *new_plane_state = 15974 to_intel_plane_state(_new_plane_state); 15975 struct intel_atomic_state *state = 15976 to_intel_atomic_state(new_plane_state->uapi.state); 15977 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15978 const struct intel_plane_state *old_plane_state = 15979 intel_atomic_get_old_plane_state(state, plane); 15980 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); 15981 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); 15982 int ret; 15983 15984 if (old_obj) { 15985 const struct intel_crtc_state *crtc_state = 15986 intel_atomic_get_new_crtc_state(state, 15987 to_intel_crtc(old_plane_state->hw.crtc)); 15988 15989 /* Big Hammer, we also need to ensure that any pending 15990 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 15991 * current scanout is retired before unpinning the old 15992 * framebuffer. Note that we rely on userspace rendering 15993 * into the buffer attached to the pipe they are waiting 15994 * on. If not, userspace generates a GPU hang with IPEHR 15995 * point to the MI_WAIT_FOR_EVENT. 15996 * 15997 * This should only fail upon a hung GPU, in which case we 15998 * can safely continue. 15999 */ 16000 if (needs_modeset(crtc_state)) { 16001 ret = i915_sw_fence_await_reservation(&state->commit_ready, 16002 old_obj->base.resv, NULL, 16003 false, 0, 16004 GFP_KERNEL); 16005 if (ret < 0) 16006 return ret; 16007 } 16008 } 16009 16010 if (new_plane_state->uapi.fence) { /* explicit fencing */ 16011 ret = i915_sw_fence_await_dma_fence(&state->commit_ready, 16012 new_plane_state->uapi.fence, 16013 i915_fence_timeout(dev_priv), 16014 GFP_KERNEL); 16015 if (ret < 0) 16016 return ret; 16017 } 16018 16019 if (!obj) 16020 return 0; 16021 16022 ret = i915_gem_object_pin_pages(obj); 16023 if (ret) 16024 return ret; 16025 16026 ret = intel_plane_pin_fb(new_plane_state); 16027 16028 i915_gem_object_unpin_pages(obj); 16029 if (ret) 16030 return ret; 16031 16032 fb_obj_bump_render_priority(obj); 16033 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB); 16034 16035 if (!new_plane_state->uapi.fence) { /* implicit fencing */ 16036 struct dma_fence *fence; 16037 16038 ret = i915_sw_fence_await_reservation(&state->commit_ready, 16039 obj->base.resv, NULL, 16040 false, 16041 i915_fence_timeout(dev_priv), 16042 GFP_KERNEL); 16043 if (ret < 0) 16044 goto unpin_fb; 16045 16046 fence = dma_resv_get_excl_rcu(obj->base.resv); 16047 if (fence) { 16048 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 16049 fence); 16050 dma_fence_put(fence); 16051 } 16052 } else { 16053 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 16054 new_plane_state->uapi.fence); 16055 } 16056 16057 /* 16058 * We declare pageflips to be interactive and so merit a small bias 16059 * towards upclocking to deliver the frame on time. By only changing 16060 * the RPS thresholds to sample more regularly and aim for higher 16061 * clocks we can hopefully deliver low power workloads (like kodi) 16062 * that are not quite steady state without resorting to forcing 16063 * maximum clocks following a vblank miss (see do_rps_boost()). 16064 */ 16065 if (!state->rps_interactive) { 16066 intel_rps_mark_interactive(&dev_priv->gt.rps, true); 16067 state->rps_interactive = true; 16068 } 16069 16070 return 0; 16071 16072 unpin_fb: 16073 intel_plane_unpin_fb(new_plane_state); 16074 16075 return ret; 16076 } 16077 16078 /** 16079 * intel_cleanup_plane_fb - Cleans up an fb after plane use 16080 * @plane: drm plane to clean up for 16081 * @_old_plane_state: the state from the previous modeset 16082 * 16083 * Cleans up a framebuffer that has just been removed from a plane. 16084 */ 16085 void 16086 intel_cleanup_plane_fb(struct drm_plane *plane, 16087 struct drm_plane_state *_old_plane_state) 16088 { 16089 struct intel_plane_state *old_plane_state = 16090 to_intel_plane_state(_old_plane_state); 16091 struct intel_atomic_state *state = 16092 to_intel_atomic_state(old_plane_state->uapi.state); 16093 struct drm_i915_private *dev_priv = to_i915(plane->dev); 16094 struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb); 16095 16096 if (!obj) 16097 return; 16098 16099 if (state->rps_interactive) { 16100 intel_rps_mark_interactive(&dev_priv->gt.rps, false); 16101 state->rps_interactive = false; 16102 } 16103 16104 /* Should only be called after a successful intel_prepare_plane_fb()! */ 16105 intel_plane_unpin_fb(old_plane_state); 16106 } 16107 16108 /** 16109 * intel_plane_destroy - destroy a plane 16110 * @plane: plane to destroy 16111 * 16112 * Common destruction function for all types of planes (primary, cursor, 16113 * sprite). 16114 */ 16115 void intel_plane_destroy(struct drm_plane *plane) 16116 { 16117 drm_plane_cleanup(plane); 16118 kfree(to_intel_plane(plane)); 16119 } 16120 16121 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 16122 u32 format, u64 modifier) 16123 { 16124 switch (modifier) { 16125 case DRM_FORMAT_MOD_LINEAR: 16126 case I915_FORMAT_MOD_X_TILED: 16127 break; 16128 default: 16129 return false; 16130 } 16131 16132 switch (format) { 16133 case DRM_FORMAT_C8: 16134 case DRM_FORMAT_RGB565: 16135 case DRM_FORMAT_XRGB1555: 16136 case DRM_FORMAT_XRGB8888: 16137 return modifier == DRM_FORMAT_MOD_LINEAR || 16138 modifier == I915_FORMAT_MOD_X_TILED; 16139 default: 16140 return false; 16141 } 16142 } 16143 16144 static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 16145 u32 format, u64 modifier) 16146 { 16147 switch (modifier) { 16148 case DRM_FORMAT_MOD_LINEAR: 16149 case I915_FORMAT_MOD_X_TILED: 16150 break; 16151 default: 16152 return false; 16153 } 16154 16155 switch (format) { 16156 case DRM_FORMAT_C8: 16157 case DRM_FORMAT_RGB565: 16158 case DRM_FORMAT_XRGB8888: 16159 case DRM_FORMAT_XBGR8888: 16160 case DRM_FORMAT_ARGB8888: 16161 case DRM_FORMAT_ABGR8888: 16162 case DRM_FORMAT_XRGB2101010: 16163 case DRM_FORMAT_XBGR2101010: 16164 case DRM_FORMAT_ARGB2101010: 16165 case DRM_FORMAT_ABGR2101010: 16166 case DRM_FORMAT_XBGR16161616F: 16167 return modifier == DRM_FORMAT_MOD_LINEAR || 16168 modifier == I915_FORMAT_MOD_X_TILED; 16169 default: 16170 return false; 16171 } 16172 } 16173 16174 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 16175 u32 format, u64 modifier) 16176 { 16177 return modifier == DRM_FORMAT_MOD_LINEAR && 16178 format == DRM_FORMAT_ARGB8888; 16179 } 16180 16181 static const struct drm_plane_funcs i965_plane_funcs = { 16182 .update_plane = drm_atomic_helper_update_plane, 16183 .disable_plane = drm_atomic_helper_disable_plane, 16184 .destroy = intel_plane_destroy, 16185 .atomic_duplicate_state = intel_plane_duplicate_state, 16186 .atomic_destroy_state = intel_plane_destroy_state, 16187 .format_mod_supported = i965_plane_format_mod_supported, 16188 }; 16189 16190 static const struct drm_plane_funcs i8xx_plane_funcs = { 16191 .update_plane = drm_atomic_helper_update_plane, 16192 .disable_plane = drm_atomic_helper_disable_plane, 16193 .destroy = intel_plane_destroy, 16194 .atomic_duplicate_state = intel_plane_duplicate_state, 16195 .atomic_destroy_state = intel_plane_destroy_state, 16196 .format_mod_supported = i8xx_plane_format_mod_supported, 16197 }; 16198 16199 static int 16200 intel_legacy_cursor_update(struct drm_plane *_plane, 16201 struct drm_crtc *_crtc, 16202 struct drm_framebuffer *fb, 16203 int crtc_x, int crtc_y, 16204 unsigned int crtc_w, unsigned int crtc_h, 16205 u32 src_x, u32 src_y, 16206 u32 src_w, u32 src_h, 16207 struct drm_modeset_acquire_ctx *ctx) 16208 { 16209 struct intel_plane *plane = to_intel_plane(_plane); 16210 struct intel_crtc *crtc = to_intel_crtc(_crtc); 16211 struct intel_plane_state *old_plane_state = 16212 to_intel_plane_state(plane->base.state); 16213 struct intel_plane_state *new_plane_state; 16214 struct intel_crtc_state *crtc_state = 16215 to_intel_crtc_state(crtc->base.state); 16216 struct intel_crtc_state *new_crtc_state; 16217 int ret; 16218 16219 /* 16220 * When crtc is inactive or there is a modeset pending, 16221 * wait for it to complete in the slowpath 16222 */ 16223 if (!crtc_state->hw.active || needs_modeset(crtc_state) || 16224 crtc_state->update_pipe) 16225 goto slow; 16226 16227 /* 16228 * Don't do an async update if there is an outstanding commit modifying 16229 * the plane. This prevents our async update's changes from getting 16230 * overridden by a previous synchronous update's state. 16231 */ 16232 if (old_plane_state->uapi.commit && 16233 !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) 16234 goto slow; 16235 16236 /* 16237 * If any parameters change that may affect watermarks, 16238 * take the slowpath. Only changing fb or position should be 16239 * in the fastpath. 16240 */ 16241 if (old_plane_state->uapi.crtc != &crtc->base || 16242 old_plane_state->uapi.src_w != src_w || 16243 old_plane_state->uapi.src_h != src_h || 16244 old_plane_state->uapi.crtc_w != crtc_w || 16245 old_plane_state->uapi.crtc_h != crtc_h || 16246 !old_plane_state->uapi.fb != !fb) 16247 goto slow; 16248 16249 new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); 16250 if (!new_plane_state) 16251 return -ENOMEM; 16252 16253 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); 16254 if (!new_crtc_state) { 16255 ret = -ENOMEM; 16256 goto out_free; 16257 } 16258 16259 drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); 16260 16261 new_plane_state->uapi.src_x = src_x; 16262 new_plane_state->uapi.src_y = src_y; 16263 new_plane_state->uapi.src_w = src_w; 16264 new_plane_state->uapi.src_h = src_h; 16265 new_plane_state->uapi.crtc_x = crtc_x; 16266 new_plane_state->uapi.crtc_y = crtc_y; 16267 new_plane_state->uapi.crtc_w = crtc_w; 16268 new_plane_state->uapi.crtc_h = crtc_h; 16269 16270 intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state); 16271 16272 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 16273 old_plane_state, new_plane_state); 16274 if (ret) 16275 goto out_free; 16276 16277 ret = intel_plane_pin_fb(new_plane_state); 16278 if (ret) 16279 goto out_free; 16280 16281 intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), 16282 ORIGIN_FLIP); 16283 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 16284 to_intel_frontbuffer(new_plane_state->hw.fb), 16285 plane->frontbuffer_bit); 16286 16287 /* Swap plane state */ 16288 plane->base.state = &new_plane_state->uapi; 16289 16290 /* 16291 * We cannot swap crtc_state as it may be in use by an atomic commit or 16292 * page flip that's running simultaneously. If we swap crtc_state and 16293 * destroy the old state, we will cause a use-after-free there. 16294 * 16295 * Only update active_planes, which is needed for our internal 16296 * bookkeeping. Either value will do the right thing when updating 16297 * planes atomically. If the cursor was part of the atomic update then 16298 * we would have taken the slowpath. 16299 */ 16300 crtc_state->active_planes = new_crtc_state->active_planes; 16301 16302 if (new_plane_state->uapi.visible) 16303 intel_update_plane(plane, crtc_state, new_plane_state); 16304 else 16305 intel_disable_plane(plane, crtc_state); 16306 16307 intel_plane_unpin_fb(old_plane_state); 16308 16309 out_free: 16310 if (new_crtc_state) 16311 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); 16312 if (ret) 16313 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); 16314 else 16315 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); 16316 return ret; 16317 16318 slow: 16319 return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, 16320 crtc_x, crtc_y, crtc_w, crtc_h, 16321 src_x, src_y, src_w, src_h, ctx); 16322 } 16323 16324 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 16325 .update_plane = intel_legacy_cursor_update, 16326 .disable_plane = drm_atomic_helper_disable_plane, 16327 .destroy = intel_plane_destroy, 16328 .atomic_duplicate_state = intel_plane_duplicate_state, 16329 .atomic_destroy_state = intel_plane_destroy_state, 16330 .format_mod_supported = intel_cursor_format_mod_supported, 16331 }; 16332 16333 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 16334 enum i9xx_plane_id i9xx_plane) 16335 { 16336 if (!HAS_FBC(dev_priv)) 16337 return false; 16338 16339 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 16340 return i9xx_plane == PLANE_A; /* tied to pipe A */ 16341 else if (IS_IVYBRIDGE(dev_priv)) 16342 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 16343 i9xx_plane == PLANE_C; 16344 else if (INTEL_GEN(dev_priv) >= 4) 16345 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 16346 else 16347 return i9xx_plane == PLANE_A; 16348 } 16349 16350 static struct intel_plane * 16351 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 16352 { 16353 struct intel_plane *plane; 16354 const struct drm_plane_funcs *plane_funcs; 16355 unsigned int supported_rotations; 16356 const u32 *formats; 16357 int num_formats; 16358 int ret, zpos; 16359 16360 if (INTEL_GEN(dev_priv) >= 9) 16361 return skl_universal_plane_create(dev_priv, pipe, 16362 PLANE_PRIMARY); 16363 16364 plane = intel_plane_alloc(); 16365 if (IS_ERR(plane)) 16366 return plane; 16367 16368 plane->pipe = pipe; 16369 /* 16370 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 16371 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 16372 */ 16373 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 && 16374 INTEL_NUM_PIPES(dev_priv) == 2) 16375 plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 16376 else 16377 plane->i9xx_plane = (enum i9xx_plane_id) pipe; 16378 plane->id = PLANE_PRIMARY; 16379 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 16380 16381 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 16382 if (plane->has_fbc) { 16383 struct intel_fbc *fbc = &dev_priv->fbc; 16384 16385 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 16386 } 16387 16388 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16389 formats = vlv_primary_formats; 16390 num_formats = ARRAY_SIZE(vlv_primary_formats); 16391 } else if (INTEL_GEN(dev_priv) >= 4) { 16392 /* 16393 * WaFP16GammaEnabling:ivb 16394 * "Workaround : When using the 64-bit format, the plane 16395 * output on each color channel has one quarter amplitude. 16396 * It can be brought up to full amplitude by using pipe 16397 * gamma correction or pipe color space conversion to 16398 * multiply the plane output by four." 16399 * 16400 * There is no dedicated plane gamma for the primary plane, 16401 * and using the pipe gamma/csc could conflict with other 16402 * planes, so we choose not to expose fp16 on IVB primary 16403 * planes. HSW primary planes no longer have this problem. 16404 */ 16405 if (IS_IVYBRIDGE(dev_priv)) { 16406 formats = ivb_primary_formats; 16407 num_formats = ARRAY_SIZE(ivb_primary_formats); 16408 } else { 16409 formats = i965_primary_formats; 16410 num_formats = ARRAY_SIZE(i965_primary_formats); 16411 } 16412 } else { 16413 formats = i8xx_primary_formats; 16414 num_formats = ARRAY_SIZE(i8xx_primary_formats); 16415 } 16416 16417 if (INTEL_GEN(dev_priv) >= 4) 16418 plane_funcs = &i965_plane_funcs; 16419 else 16420 plane_funcs = &i8xx_plane_funcs; 16421 16422 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16423 plane->min_cdclk = vlv_plane_min_cdclk; 16424 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 16425 plane->min_cdclk = hsw_plane_min_cdclk; 16426 else if (IS_IVYBRIDGE(dev_priv)) 16427 plane->min_cdclk = ivb_plane_min_cdclk; 16428 else 16429 plane->min_cdclk = i9xx_plane_min_cdclk; 16430 16431 plane->max_stride = i9xx_plane_max_stride; 16432 plane->update_plane = i9xx_update_plane; 16433 plane->disable_plane = i9xx_disable_plane; 16434 plane->get_hw_state = i9xx_plane_get_hw_state; 16435 plane->check_plane = i9xx_plane_check; 16436 16437 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 16438 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 16439 0, plane_funcs, 16440 formats, num_formats, 16441 i9xx_format_modifiers, 16442 DRM_PLANE_TYPE_PRIMARY, 16443 "primary %c", pipe_name(pipe)); 16444 else 16445 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 16446 0, plane_funcs, 16447 formats, num_formats, 16448 i9xx_format_modifiers, 16449 DRM_PLANE_TYPE_PRIMARY, 16450 "plane %c", 16451 plane_name(plane->i9xx_plane)); 16452 if (ret) 16453 goto fail; 16454 16455 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 16456 supported_rotations = 16457 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 16458 DRM_MODE_REFLECT_X; 16459 } else if (INTEL_GEN(dev_priv) >= 4) { 16460 supported_rotations = 16461 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 16462 } else { 16463 supported_rotations = DRM_MODE_ROTATE_0; 16464 } 16465 16466 if (INTEL_GEN(dev_priv) >= 4) 16467 drm_plane_create_rotation_property(&plane->base, 16468 DRM_MODE_ROTATE_0, 16469 supported_rotations); 16470 16471 zpos = 0; 16472 drm_plane_create_zpos_immutable_property(&plane->base, zpos); 16473 16474 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 16475 16476 return plane; 16477 16478 fail: 16479 intel_plane_free(plane); 16480 16481 return ERR_PTR(ret); 16482 } 16483 16484 static struct intel_plane * 16485 intel_cursor_plane_create(struct drm_i915_private *dev_priv, 16486 enum pipe pipe) 16487 { 16488 struct intel_plane *cursor; 16489 int ret, zpos; 16490 16491 cursor = intel_plane_alloc(); 16492 if (IS_ERR(cursor)) 16493 return cursor; 16494 16495 cursor->pipe = pipe; 16496 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 16497 cursor->id = PLANE_CURSOR; 16498 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 16499 16500 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 16501 cursor->max_stride = i845_cursor_max_stride; 16502 cursor->update_plane = i845_update_cursor; 16503 cursor->disable_plane = i845_disable_cursor; 16504 cursor->get_hw_state = i845_cursor_get_hw_state; 16505 cursor->check_plane = i845_check_cursor; 16506 } else { 16507 cursor->max_stride = i9xx_cursor_max_stride; 16508 cursor->update_plane = i9xx_update_cursor; 16509 cursor->disable_plane = i9xx_disable_cursor; 16510 cursor->get_hw_state = i9xx_cursor_get_hw_state; 16511 cursor->check_plane = i9xx_check_cursor; 16512 } 16513 16514 cursor->cursor.base = ~0; 16515 cursor->cursor.cntl = ~0; 16516 16517 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 16518 cursor->cursor.size = ~0; 16519 16520 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 16521 0, &intel_cursor_plane_funcs, 16522 intel_cursor_formats, 16523 ARRAY_SIZE(intel_cursor_formats), 16524 cursor_format_modifiers, 16525 DRM_PLANE_TYPE_CURSOR, 16526 "cursor %c", pipe_name(pipe)); 16527 if (ret) 16528 goto fail; 16529 16530 if (INTEL_GEN(dev_priv) >= 4) 16531 drm_plane_create_rotation_property(&cursor->base, 16532 DRM_MODE_ROTATE_0, 16533 DRM_MODE_ROTATE_0 | 16534 DRM_MODE_ROTATE_180); 16535 16536 zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; 16537 drm_plane_create_zpos_immutable_property(&cursor->base, zpos); 16538 16539 if (INTEL_GEN(dev_priv) >= 12) 16540 drm_plane_enable_fb_damage_clips(&cursor->base); 16541 16542 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 16543 16544 return cursor; 16545 16546 fail: 16547 intel_plane_free(cursor); 16548 16549 return ERR_PTR(ret); 16550 } 16551 16552 #define INTEL_CRTC_FUNCS \ 16553 .gamma_set = drm_atomic_helper_legacy_gamma_set, \ 16554 .set_config = drm_atomic_helper_set_config, \ 16555 .destroy = intel_crtc_destroy, \ 16556 .page_flip = drm_atomic_helper_page_flip, \ 16557 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 16558 .atomic_destroy_state = intel_crtc_destroy_state, \ 16559 .set_crc_source = intel_crtc_set_crc_source, \ 16560 .verify_crc_source = intel_crtc_verify_crc_source, \ 16561 .get_crc_sources = intel_crtc_get_crc_sources 16562 16563 static const struct drm_crtc_funcs bdw_crtc_funcs = { 16564 INTEL_CRTC_FUNCS, 16565 16566 .get_vblank_counter = g4x_get_vblank_counter, 16567 .enable_vblank = bdw_enable_vblank, 16568 .disable_vblank = bdw_disable_vblank, 16569 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16570 }; 16571 16572 static const struct drm_crtc_funcs ilk_crtc_funcs = { 16573 INTEL_CRTC_FUNCS, 16574 16575 .get_vblank_counter = g4x_get_vblank_counter, 16576 .enable_vblank = ilk_enable_vblank, 16577 .disable_vblank = ilk_disable_vblank, 16578 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16579 }; 16580 16581 static const struct drm_crtc_funcs g4x_crtc_funcs = { 16582 INTEL_CRTC_FUNCS, 16583 16584 .get_vblank_counter = g4x_get_vblank_counter, 16585 .enable_vblank = i965_enable_vblank, 16586 .disable_vblank = i965_disable_vblank, 16587 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16588 }; 16589 16590 static const struct drm_crtc_funcs i965_crtc_funcs = { 16591 INTEL_CRTC_FUNCS, 16592 16593 .get_vblank_counter = i915_get_vblank_counter, 16594 .enable_vblank = i965_enable_vblank, 16595 .disable_vblank = i965_disable_vblank, 16596 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16597 }; 16598 16599 static const struct drm_crtc_funcs i915gm_crtc_funcs = { 16600 INTEL_CRTC_FUNCS, 16601 16602 .get_vblank_counter = i915_get_vblank_counter, 16603 .enable_vblank = i915gm_enable_vblank, 16604 .disable_vblank = i915gm_disable_vblank, 16605 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16606 }; 16607 16608 static const struct drm_crtc_funcs i915_crtc_funcs = { 16609 INTEL_CRTC_FUNCS, 16610 16611 .get_vblank_counter = i915_get_vblank_counter, 16612 .enable_vblank = i8xx_enable_vblank, 16613 .disable_vblank = i8xx_disable_vblank, 16614 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16615 }; 16616 16617 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 16618 INTEL_CRTC_FUNCS, 16619 16620 /* no hw vblank counter */ 16621 .enable_vblank = i8xx_enable_vblank, 16622 .disable_vblank = i8xx_disable_vblank, 16623 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16624 }; 16625 16626 static struct intel_crtc *intel_crtc_alloc(void) 16627 { 16628 struct intel_crtc_state *crtc_state; 16629 struct intel_crtc *crtc; 16630 16631 crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); 16632 if (!crtc) 16633 return ERR_PTR(-ENOMEM); 16634 16635 crtc_state = intel_crtc_state_alloc(crtc); 16636 if (!crtc_state) { 16637 kfree(crtc); 16638 return ERR_PTR(-ENOMEM); 16639 } 16640 16641 crtc->base.state = &crtc_state->uapi; 16642 crtc->config = crtc_state; 16643 16644 return crtc; 16645 } 16646 16647 static void intel_crtc_free(struct intel_crtc *crtc) 16648 { 16649 intel_crtc_destroy_state(&crtc->base, crtc->base.state); 16650 kfree(crtc); 16651 } 16652 16653 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) 16654 { 16655 struct intel_plane *plane; 16656 16657 for_each_intel_plane(&dev_priv->drm, plane) { 16658 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, 16659 plane->pipe); 16660 16661 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); 16662 } 16663 } 16664 16665 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 16666 { 16667 struct intel_plane *primary, *cursor; 16668 const struct drm_crtc_funcs *funcs; 16669 struct intel_crtc *crtc; 16670 int sprite, ret; 16671 16672 crtc = intel_crtc_alloc(); 16673 if (IS_ERR(crtc)) 16674 return PTR_ERR(crtc); 16675 16676 crtc->pipe = pipe; 16677 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe]; 16678 16679 primary = intel_primary_plane_create(dev_priv, pipe); 16680 if (IS_ERR(primary)) { 16681 ret = PTR_ERR(primary); 16682 goto fail; 16683 } 16684 crtc->plane_ids_mask |= BIT(primary->id); 16685 16686 for_each_sprite(dev_priv, pipe, sprite) { 16687 struct intel_plane *plane; 16688 16689 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 16690 if (IS_ERR(plane)) { 16691 ret = PTR_ERR(plane); 16692 goto fail; 16693 } 16694 crtc->plane_ids_mask |= BIT(plane->id); 16695 } 16696 16697 cursor = intel_cursor_plane_create(dev_priv, pipe); 16698 if (IS_ERR(cursor)) { 16699 ret = PTR_ERR(cursor); 16700 goto fail; 16701 } 16702 crtc->plane_ids_mask |= BIT(cursor->id); 16703 16704 if (HAS_GMCH(dev_priv)) { 16705 if (IS_CHERRYVIEW(dev_priv) || 16706 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 16707 funcs = &g4x_crtc_funcs; 16708 else if (IS_GEN(dev_priv, 4)) 16709 funcs = &i965_crtc_funcs; 16710 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) 16711 funcs = &i915gm_crtc_funcs; 16712 else if (IS_GEN(dev_priv, 3)) 16713 funcs = &i915_crtc_funcs; 16714 else 16715 funcs = &i8xx_crtc_funcs; 16716 } else { 16717 if (INTEL_GEN(dev_priv) >= 8) 16718 funcs = &bdw_crtc_funcs; 16719 else 16720 funcs = &ilk_crtc_funcs; 16721 } 16722 16723 ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base, 16724 &primary->base, &cursor->base, 16725 funcs, "pipe %c", pipe_name(pipe)); 16726 if (ret) 16727 goto fail; 16728 16729 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || 16730 dev_priv->pipe_to_crtc_mapping[pipe] != NULL); 16731 dev_priv->pipe_to_crtc_mapping[pipe] = crtc; 16732 16733 if (INTEL_GEN(dev_priv) < 9) { 16734 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; 16735 16736 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 16737 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); 16738 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; 16739 } 16740 16741 intel_color_init(crtc); 16742 16743 intel_crtc_crc_init(crtc); 16744 16745 drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); 16746 16747 return 0; 16748 16749 fail: 16750 intel_crtc_free(crtc); 16751 16752 return ret; 16753 } 16754 16755 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 16756 struct drm_file *file) 16757 { 16758 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 16759 struct drm_crtc *drmmode_crtc; 16760 struct intel_crtc *crtc; 16761 16762 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 16763 if (!drmmode_crtc) 16764 return -ENOENT; 16765 16766 crtc = to_intel_crtc(drmmode_crtc); 16767 pipe_from_crtc_id->pipe = crtc->pipe; 16768 16769 return 0; 16770 } 16771 16772 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 16773 { 16774 struct drm_device *dev = encoder->base.dev; 16775 struct intel_encoder *source_encoder; 16776 u32 possible_clones = 0; 16777 16778 for_each_intel_encoder(dev, source_encoder) { 16779 if (encoders_cloneable(encoder, source_encoder)) 16780 possible_clones |= drm_encoder_mask(&source_encoder->base); 16781 } 16782 16783 return possible_clones; 16784 } 16785 16786 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 16787 { 16788 struct drm_device *dev = encoder->base.dev; 16789 struct intel_crtc *crtc; 16790 u32 possible_crtcs = 0; 16791 16792 for_each_intel_crtc(dev, crtc) { 16793 if (encoder->pipe_mask & BIT(crtc->pipe)) 16794 possible_crtcs |= drm_crtc_mask(&crtc->base); 16795 } 16796 16797 return possible_crtcs; 16798 } 16799 16800 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 16801 { 16802 if (!IS_MOBILE(dev_priv)) 16803 return false; 16804 16805 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 16806 return false; 16807 16808 if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 16809 return false; 16810 16811 return true; 16812 } 16813 16814 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 16815 { 16816 if (INTEL_GEN(dev_priv) >= 9) 16817 return false; 16818 16819 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 16820 return false; 16821 16822 if (HAS_PCH_LPT_H(dev_priv) && 16823 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 16824 return false; 16825 16826 /* DDI E can't be used if DDI A requires 4 lanes */ 16827 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 16828 return false; 16829 16830 if (!dev_priv->vbt.int_crt_support) 16831 return false; 16832 16833 return true; 16834 } 16835 16836 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 16837 { 16838 int pps_num; 16839 int pps_idx; 16840 16841 if (HAS_DDI(dev_priv)) 16842 return; 16843 /* 16844 * This w/a is needed at least on CPT/PPT, but to be sure apply it 16845 * everywhere where registers can be write protected. 16846 */ 16847 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16848 pps_num = 2; 16849 else 16850 pps_num = 1; 16851 16852 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 16853 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx)); 16854 16855 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 16856 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val); 16857 } 16858 } 16859 16860 static void intel_pps_init(struct drm_i915_private *dev_priv) 16861 { 16862 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 16863 dev_priv->pps_mmio_base = PCH_PPS_BASE; 16864 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16865 dev_priv->pps_mmio_base = VLV_PPS_BASE; 16866 else 16867 dev_priv->pps_mmio_base = PPS_BASE; 16868 16869 intel_pps_unlock_regs_wa(dev_priv); 16870 } 16871 16872 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 16873 { 16874 struct intel_encoder *encoder; 16875 bool dpd_is_edp = false; 16876 16877 intel_pps_init(dev_priv); 16878 16879 if (!HAS_DISPLAY(dev_priv)) 16880 return; 16881 16882 if (IS_ROCKETLAKE(dev_priv)) { 16883 intel_ddi_init(dev_priv, PORT_A); 16884 intel_ddi_init(dev_priv, PORT_B); 16885 intel_ddi_init(dev_priv, PORT_D); /* DDI TC1 */ 16886 intel_ddi_init(dev_priv, PORT_E); /* DDI TC2 */ 16887 } else if (INTEL_GEN(dev_priv) >= 12) { 16888 intel_ddi_init(dev_priv, PORT_A); 16889 intel_ddi_init(dev_priv, PORT_B); 16890 intel_ddi_init(dev_priv, PORT_D); 16891 intel_ddi_init(dev_priv, PORT_E); 16892 intel_ddi_init(dev_priv, PORT_F); 16893 intel_ddi_init(dev_priv, PORT_G); 16894 intel_ddi_init(dev_priv, PORT_H); 16895 intel_ddi_init(dev_priv, PORT_I); 16896 icl_dsi_init(dev_priv); 16897 } else if (IS_ELKHARTLAKE(dev_priv)) { 16898 intel_ddi_init(dev_priv, PORT_A); 16899 intel_ddi_init(dev_priv, PORT_B); 16900 intel_ddi_init(dev_priv, PORT_C); 16901 intel_ddi_init(dev_priv, PORT_D); 16902 icl_dsi_init(dev_priv); 16903 } else if (IS_GEN(dev_priv, 11)) { 16904 intel_ddi_init(dev_priv, PORT_A); 16905 intel_ddi_init(dev_priv, PORT_B); 16906 intel_ddi_init(dev_priv, PORT_C); 16907 intel_ddi_init(dev_priv, PORT_D); 16908 intel_ddi_init(dev_priv, PORT_E); 16909 /* 16910 * On some ICL SKUs port F is not present. No strap bits for 16911 * this, so rely on VBT. 16912 * Work around broken VBTs on SKUs known to have no port F. 16913 */ 16914 if (IS_ICL_WITH_PORT_F(dev_priv) && 16915 intel_bios_is_port_present(dev_priv, PORT_F)) 16916 intel_ddi_init(dev_priv, PORT_F); 16917 16918 icl_dsi_init(dev_priv); 16919 } else if (IS_GEN9_LP(dev_priv)) { 16920 /* 16921 * FIXME: Broxton doesn't support port detection via the 16922 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 16923 * detect the ports. 16924 */ 16925 intel_ddi_init(dev_priv, PORT_A); 16926 intel_ddi_init(dev_priv, PORT_B); 16927 intel_ddi_init(dev_priv, PORT_C); 16928 16929 vlv_dsi_init(dev_priv); 16930 } else if (HAS_DDI(dev_priv)) { 16931 int found; 16932 16933 if (intel_ddi_crt_present(dev_priv)) 16934 intel_crt_init(dev_priv); 16935 16936 /* 16937 * Haswell uses DDI functions to detect digital outputs. 16938 * On SKL pre-D0 the strap isn't connected, so we assume 16939 * it's there. 16940 */ 16941 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 16942 /* WaIgnoreDDIAStrap: skl */ 16943 if (found || IS_GEN9_BC(dev_priv)) 16944 intel_ddi_init(dev_priv, PORT_A); 16945 16946 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP 16947 * register */ 16948 found = intel_de_read(dev_priv, SFUSE_STRAP); 16949 16950 if (found & SFUSE_STRAP_DDIB_DETECTED) 16951 intel_ddi_init(dev_priv, PORT_B); 16952 if (found & SFUSE_STRAP_DDIC_DETECTED) 16953 intel_ddi_init(dev_priv, PORT_C); 16954 if (found & SFUSE_STRAP_DDID_DETECTED) 16955 intel_ddi_init(dev_priv, PORT_D); 16956 if (found & SFUSE_STRAP_DDIF_DETECTED) 16957 intel_ddi_init(dev_priv, PORT_F); 16958 /* 16959 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 16960 */ 16961 if (IS_GEN9_BC(dev_priv) && 16962 intel_bios_is_port_present(dev_priv, PORT_E)) 16963 intel_ddi_init(dev_priv, PORT_E); 16964 16965 } else if (HAS_PCH_SPLIT(dev_priv)) { 16966 int found; 16967 16968 /* 16969 * intel_edp_init_connector() depends on this completing first, 16970 * to prevent the registration of both eDP and LVDS and the 16971 * incorrect sharing of the PPS. 16972 */ 16973 intel_lvds_init(dev_priv); 16974 intel_crt_init(dev_priv); 16975 16976 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 16977 16978 if (ilk_has_edp_a(dev_priv)) 16979 intel_dp_init(dev_priv, DP_A, PORT_A); 16980 16981 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 16982 /* PCH SDVOB multiplex with HDMIB */ 16983 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 16984 if (!found) 16985 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 16986 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 16987 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 16988 } 16989 16990 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 16991 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 16992 16993 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 16994 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 16995 16996 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 16997 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 16998 16999 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 17000 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 17001 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 17002 bool has_edp, has_port; 17003 17004 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 17005 intel_crt_init(dev_priv); 17006 17007 /* 17008 * The DP_DETECTED bit is the latched state of the DDC 17009 * SDA pin at boot. However since eDP doesn't require DDC 17010 * (no way to plug in a DP->HDMI dongle) the DDC pins for 17011 * eDP ports may have been muxed to an alternate function. 17012 * Thus we can't rely on the DP_DETECTED bit alone to detect 17013 * eDP ports. Consult the VBT as well as DP_DETECTED to 17014 * detect eDP ports. 17015 * 17016 * Sadly the straps seem to be missing sometimes even for HDMI 17017 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 17018 * and VBT for the presence of the port. Additionally we can't 17019 * trust the port type the VBT declares as we've seen at least 17020 * HDMI ports that the VBT claim are DP or eDP. 17021 */ 17022 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 17023 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 17024 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 17025 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 17026 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 17027 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 17028 17029 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 17030 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 17031 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 17032 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 17033 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 17034 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 17035 17036 if (IS_CHERRYVIEW(dev_priv)) { 17037 /* 17038 * eDP not supported on port D, 17039 * so no need to worry about it 17040 */ 17041 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 17042 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 17043 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 17044 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 17045 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 17046 } 17047 17048 vlv_dsi_init(dev_priv); 17049 } else if (IS_PINEVIEW(dev_priv)) { 17050 intel_lvds_init(dev_priv); 17051 intel_crt_init(dev_priv); 17052 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { 17053 bool found = false; 17054 17055 if (IS_MOBILE(dev_priv)) 17056 intel_lvds_init(dev_priv); 17057 17058 intel_crt_init(dev_priv); 17059 17060 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 17061 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 17062 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 17063 if (!found && IS_G4X(dev_priv)) { 17064 drm_dbg_kms(&dev_priv->drm, 17065 "probing HDMI on SDVOB\n"); 17066 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 17067 } 17068 17069 if (!found && IS_G4X(dev_priv)) 17070 intel_dp_init(dev_priv, DP_B, PORT_B); 17071 } 17072 17073 /* Before G4X SDVOC doesn't have its own detect register */ 17074 17075 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 17076 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 17077 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 17078 } 17079 17080 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 17081 17082 if (IS_G4X(dev_priv)) { 17083 drm_dbg_kms(&dev_priv->drm, 17084 "probing HDMI on SDVOC\n"); 17085 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 17086 } 17087 if (IS_G4X(dev_priv)) 17088 intel_dp_init(dev_priv, DP_C, PORT_C); 17089 } 17090 17091 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 17092 intel_dp_init(dev_priv, DP_D, PORT_D); 17093 17094 if (SUPPORTS_TV(dev_priv)) 17095 intel_tv_init(dev_priv); 17096 } else if (IS_GEN(dev_priv, 2)) { 17097 if (IS_I85X(dev_priv)) 17098 intel_lvds_init(dev_priv); 17099 17100 intel_crt_init(dev_priv); 17101 intel_dvo_init(dev_priv); 17102 } 17103 17104 intel_psr_init(dev_priv); 17105 17106 for_each_intel_encoder(&dev_priv->drm, encoder) { 17107 encoder->base.possible_crtcs = 17108 intel_encoder_possible_crtcs(encoder); 17109 encoder->base.possible_clones = 17110 intel_encoder_possible_clones(encoder); 17111 } 17112 17113 intel_init_pch_refclk(dev_priv); 17114 17115 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 17116 } 17117 17118 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 17119 { 17120 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 17121 17122 drm_framebuffer_cleanup(fb); 17123 intel_frontbuffer_put(intel_fb->frontbuffer); 17124 17125 kfree(intel_fb); 17126 } 17127 17128 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 17129 struct drm_file *file, 17130 unsigned int *handle) 17131 { 17132 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 17133 struct drm_i915_private *i915 = to_i915(obj->base.dev); 17134 17135 if (obj->userptr.mm) { 17136 drm_dbg(&i915->drm, 17137 "attempting to use a userptr for a framebuffer, denied\n"); 17138 return -EINVAL; 17139 } 17140 17141 return drm_gem_handle_create(file, &obj->base, handle); 17142 } 17143 17144 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 17145 struct drm_file *file, 17146 unsigned flags, unsigned color, 17147 struct drm_clip_rect *clips, 17148 unsigned num_clips) 17149 { 17150 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 17151 17152 i915_gem_object_flush_if_display(obj); 17153 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 17154 17155 return 0; 17156 } 17157 17158 static const struct drm_framebuffer_funcs intel_fb_funcs = { 17159 .destroy = intel_user_framebuffer_destroy, 17160 .create_handle = intel_user_framebuffer_create_handle, 17161 .dirty = intel_user_framebuffer_dirty, 17162 }; 17163 17164 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 17165 struct drm_i915_gem_object *obj, 17166 struct drm_mode_fb_cmd2 *mode_cmd) 17167 { 17168 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 17169 struct drm_framebuffer *fb = &intel_fb->base; 17170 u32 max_stride; 17171 unsigned int tiling, stride; 17172 int ret = -EINVAL; 17173 int i; 17174 17175 intel_fb->frontbuffer = intel_frontbuffer_get(obj); 17176 if (!intel_fb->frontbuffer) 17177 return -ENOMEM; 17178 17179 i915_gem_object_lock(obj, NULL); 17180 tiling = i915_gem_object_get_tiling(obj); 17181 stride = i915_gem_object_get_stride(obj); 17182 i915_gem_object_unlock(obj); 17183 17184 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 17185 /* 17186 * If there's a fence, enforce that 17187 * the fb modifier and tiling mode match. 17188 */ 17189 if (tiling != I915_TILING_NONE && 17190 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 17191 drm_dbg_kms(&dev_priv->drm, 17192 "tiling_mode doesn't match fb modifier\n"); 17193 goto err; 17194 } 17195 } else { 17196 if (tiling == I915_TILING_X) { 17197 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 17198 } else if (tiling == I915_TILING_Y) { 17199 drm_dbg_kms(&dev_priv->drm, 17200 "No Y tiling for legacy addfb\n"); 17201 goto err; 17202 } 17203 } 17204 17205 if (!drm_any_plane_has_format(&dev_priv->drm, 17206 mode_cmd->pixel_format, 17207 mode_cmd->modifier[0])) { 17208 struct drm_format_name_buf format_name; 17209 17210 drm_dbg_kms(&dev_priv->drm, 17211 "unsupported pixel format %s / modifier 0x%llx\n", 17212 drm_get_format_name(mode_cmd->pixel_format, 17213 &format_name), 17214 mode_cmd->modifier[0]); 17215 goto err; 17216 } 17217 17218 /* 17219 * gen2/3 display engine uses the fence if present, 17220 * so the tiling mode must match the fb modifier exactly. 17221 */ 17222 if (INTEL_GEN(dev_priv) < 4 && 17223 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 17224 drm_dbg_kms(&dev_priv->drm, 17225 "tiling_mode must match fb modifier exactly on gen2/3\n"); 17226 goto err; 17227 } 17228 17229 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, 17230 mode_cmd->modifier[0]); 17231 if (mode_cmd->pitches[0] > max_stride) { 17232 drm_dbg_kms(&dev_priv->drm, 17233 "%s pitch (%u) must be at most %d\n", 17234 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 17235 "tiled" : "linear", 17236 mode_cmd->pitches[0], max_stride); 17237 goto err; 17238 } 17239 17240 /* 17241 * If there's a fence, enforce that 17242 * the fb pitch and fence stride match. 17243 */ 17244 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 17245 drm_dbg_kms(&dev_priv->drm, 17246 "pitch (%d) must match tiling stride (%d)\n", 17247 mode_cmd->pitches[0], stride); 17248 goto err; 17249 } 17250 17251 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 17252 if (mode_cmd->offsets[0] != 0) { 17253 drm_dbg_kms(&dev_priv->drm, 17254 "plane 0 offset (0x%08x) must be 0\n", 17255 mode_cmd->offsets[0]); 17256 goto err; 17257 } 17258 17259 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 17260 17261 for (i = 0; i < fb->format->num_planes; i++) { 17262 u32 stride_alignment; 17263 17264 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 17265 drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n", 17266 i); 17267 goto err; 17268 } 17269 17270 stride_alignment = intel_fb_stride_alignment(fb, i); 17271 if (fb->pitches[i] & (stride_alignment - 1)) { 17272 drm_dbg_kms(&dev_priv->drm, 17273 "plane %d pitch (%d) must be at least %u byte aligned\n", 17274 i, fb->pitches[i], stride_alignment); 17275 goto err; 17276 } 17277 17278 if (is_gen12_ccs_plane(fb, i)) { 17279 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i); 17280 17281 if (fb->pitches[i] != ccs_aux_stride) { 17282 drm_dbg_kms(&dev_priv->drm, 17283 "ccs aux plane %d pitch (%d) must be %d\n", 17284 i, 17285 fb->pitches[i], ccs_aux_stride); 17286 goto err; 17287 } 17288 } 17289 17290 fb->obj[i] = &obj->base; 17291 } 17292 17293 ret = intel_fill_fb_info(dev_priv, fb); 17294 if (ret) 17295 goto err; 17296 17297 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 17298 if (ret) { 17299 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); 17300 goto err; 17301 } 17302 17303 return 0; 17304 17305 err: 17306 intel_frontbuffer_put(intel_fb->frontbuffer); 17307 return ret; 17308 } 17309 17310 static struct drm_framebuffer * 17311 intel_user_framebuffer_create(struct drm_device *dev, 17312 struct drm_file *filp, 17313 const struct drm_mode_fb_cmd2 *user_mode_cmd) 17314 { 17315 struct drm_framebuffer *fb; 17316 struct drm_i915_gem_object *obj; 17317 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 17318 17319 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 17320 if (!obj) 17321 return ERR_PTR(-ENOENT); 17322 17323 fb = intel_framebuffer_create(obj, &mode_cmd); 17324 i915_gem_object_put(obj); 17325 17326 return fb; 17327 } 17328 17329 static enum drm_mode_status 17330 intel_mode_valid(struct drm_device *dev, 17331 const struct drm_display_mode *mode) 17332 { 17333 struct drm_i915_private *dev_priv = to_i915(dev); 17334 int hdisplay_max, htotal_max; 17335 int vdisplay_max, vtotal_max; 17336 17337 /* 17338 * Can't reject DBLSCAN here because Xorg ddxen can add piles 17339 * of DBLSCAN modes to the output's mode list when they detect 17340 * the scaling mode property on the connector. And they don't 17341 * ask the kernel to validate those modes in any way until 17342 * modeset time at which point the client gets a protocol error. 17343 * So in order to not upset those clients we silently ignore the 17344 * DBLSCAN flag on such connectors. For other connectors we will 17345 * reject modes with the DBLSCAN flag in encoder->compute_config(). 17346 * And we always reject DBLSCAN modes in connector->mode_valid() 17347 * as we never want such modes on the connector's mode list. 17348 */ 17349 17350 if (mode->vscan > 1) 17351 return MODE_NO_VSCAN; 17352 17353 if (mode->flags & DRM_MODE_FLAG_HSKEW) 17354 return MODE_H_ILLEGAL; 17355 17356 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 17357 DRM_MODE_FLAG_NCSYNC | 17358 DRM_MODE_FLAG_PCSYNC)) 17359 return MODE_HSYNC; 17360 17361 if (mode->flags & (DRM_MODE_FLAG_BCAST | 17362 DRM_MODE_FLAG_PIXMUX | 17363 DRM_MODE_FLAG_CLKDIV2)) 17364 return MODE_BAD; 17365 17366 /* Transcoder timing limits */ 17367 if (INTEL_GEN(dev_priv) >= 11) { 17368 hdisplay_max = 16384; 17369 vdisplay_max = 8192; 17370 htotal_max = 16384; 17371 vtotal_max = 8192; 17372 } else if (INTEL_GEN(dev_priv) >= 9 || 17373 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 17374 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 17375 vdisplay_max = 4096; 17376 htotal_max = 8192; 17377 vtotal_max = 8192; 17378 } else if (INTEL_GEN(dev_priv) >= 3) { 17379 hdisplay_max = 4096; 17380 vdisplay_max = 4096; 17381 htotal_max = 8192; 17382 vtotal_max = 8192; 17383 } else { 17384 hdisplay_max = 2048; 17385 vdisplay_max = 2048; 17386 htotal_max = 4096; 17387 vtotal_max = 4096; 17388 } 17389 17390 if (mode->hdisplay > hdisplay_max || 17391 mode->hsync_start > htotal_max || 17392 mode->hsync_end > htotal_max || 17393 mode->htotal > htotal_max) 17394 return MODE_H_ILLEGAL; 17395 17396 if (mode->vdisplay > vdisplay_max || 17397 mode->vsync_start > vtotal_max || 17398 mode->vsync_end > vtotal_max || 17399 mode->vtotal > vtotal_max) 17400 return MODE_V_ILLEGAL; 17401 17402 if (INTEL_GEN(dev_priv) >= 5) { 17403 if (mode->hdisplay < 64 || 17404 mode->htotal - mode->hdisplay < 32) 17405 return MODE_H_ILLEGAL; 17406 17407 if (mode->vtotal - mode->vdisplay < 5) 17408 return MODE_V_ILLEGAL; 17409 } else { 17410 if (mode->htotal - mode->hdisplay < 32) 17411 return MODE_H_ILLEGAL; 17412 17413 if (mode->vtotal - mode->vdisplay < 3) 17414 return MODE_V_ILLEGAL; 17415 } 17416 17417 return MODE_OK; 17418 } 17419 17420 enum drm_mode_status 17421 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 17422 const struct drm_display_mode *mode) 17423 { 17424 int plane_width_max, plane_height_max; 17425 17426 /* 17427 * intel_mode_valid() should be 17428 * sufficient on older platforms. 17429 */ 17430 if (INTEL_GEN(dev_priv) < 9) 17431 return MODE_OK; 17432 17433 /* 17434 * Most people will probably want a fullscreen 17435 * plane so let's not advertize modes that are 17436 * too big for that. 17437 */ 17438 if (INTEL_GEN(dev_priv) >= 11) { 17439 plane_width_max = 5120; 17440 plane_height_max = 4320; 17441 } else { 17442 plane_width_max = 5120; 17443 plane_height_max = 4096; 17444 } 17445 17446 if (mode->hdisplay > plane_width_max) 17447 return MODE_H_ILLEGAL; 17448 17449 if (mode->vdisplay > plane_height_max) 17450 return MODE_V_ILLEGAL; 17451 17452 return MODE_OK; 17453 } 17454 17455 static const struct drm_mode_config_funcs intel_mode_funcs = { 17456 .fb_create = intel_user_framebuffer_create, 17457 .get_format_info = intel_get_format_info, 17458 .output_poll_changed = intel_fbdev_output_poll_changed, 17459 .mode_valid = intel_mode_valid, 17460 .atomic_check = intel_atomic_check, 17461 .atomic_commit = intel_atomic_commit, 17462 .atomic_state_alloc = intel_atomic_state_alloc, 17463 .atomic_state_clear = intel_atomic_state_clear, 17464 .atomic_state_free = intel_atomic_state_free, 17465 }; 17466 17467 /** 17468 * intel_init_display_hooks - initialize the display modesetting hooks 17469 * @dev_priv: device private 17470 */ 17471 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 17472 { 17473 intel_init_cdclk_hooks(dev_priv); 17474 17475 if (INTEL_GEN(dev_priv) >= 9) { 17476 dev_priv->display.get_pipe_config = hsw_get_pipe_config; 17477 dev_priv->display.get_initial_plane_config = 17478 skl_get_initial_plane_config; 17479 dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock; 17480 dev_priv->display.crtc_enable = hsw_crtc_enable; 17481 dev_priv->display.crtc_disable = hsw_crtc_disable; 17482 } else if (HAS_DDI(dev_priv)) { 17483 dev_priv->display.get_pipe_config = hsw_get_pipe_config; 17484 dev_priv->display.get_initial_plane_config = 17485 i9xx_get_initial_plane_config; 17486 dev_priv->display.crtc_compute_clock = 17487 hsw_crtc_compute_clock; 17488 dev_priv->display.crtc_enable = hsw_crtc_enable; 17489 dev_priv->display.crtc_disable = hsw_crtc_disable; 17490 } else if (HAS_PCH_SPLIT(dev_priv)) { 17491 dev_priv->display.get_pipe_config = ilk_get_pipe_config; 17492 dev_priv->display.get_initial_plane_config = 17493 i9xx_get_initial_plane_config; 17494 dev_priv->display.crtc_compute_clock = 17495 ilk_crtc_compute_clock; 17496 dev_priv->display.crtc_enable = ilk_crtc_enable; 17497 dev_priv->display.crtc_disable = ilk_crtc_disable; 17498 } else if (IS_CHERRYVIEW(dev_priv)) { 17499 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17500 dev_priv->display.get_initial_plane_config = 17501 i9xx_get_initial_plane_config; 17502 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 17503 dev_priv->display.crtc_enable = valleyview_crtc_enable; 17504 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17505 } else if (IS_VALLEYVIEW(dev_priv)) { 17506 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17507 dev_priv->display.get_initial_plane_config = 17508 i9xx_get_initial_plane_config; 17509 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 17510 dev_priv->display.crtc_enable = valleyview_crtc_enable; 17511 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17512 } else if (IS_G4X(dev_priv)) { 17513 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17514 dev_priv->display.get_initial_plane_config = 17515 i9xx_get_initial_plane_config; 17516 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 17517 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17518 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17519 } else if (IS_PINEVIEW(dev_priv)) { 17520 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17521 dev_priv->display.get_initial_plane_config = 17522 i9xx_get_initial_plane_config; 17523 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 17524 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17525 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17526 } else if (!IS_GEN(dev_priv, 2)) { 17527 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17528 dev_priv->display.get_initial_plane_config = 17529 i9xx_get_initial_plane_config; 17530 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 17531 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17532 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17533 } else { 17534 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17535 dev_priv->display.get_initial_plane_config = 17536 i9xx_get_initial_plane_config; 17537 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 17538 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17539 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17540 } 17541 17542 if (IS_GEN(dev_priv, 5)) { 17543 dev_priv->display.fdi_link_train = ilk_fdi_link_train; 17544 } else if (IS_GEN(dev_priv, 6)) { 17545 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 17546 } else if (IS_IVYBRIDGE(dev_priv)) { 17547 /* FIXME: detect B0+ stepping and use auto training */ 17548 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 17549 } 17550 17551 if (INTEL_GEN(dev_priv) >= 9) 17552 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; 17553 else 17554 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; 17555 17556 } 17557 17558 void intel_modeset_init_hw(struct drm_i915_private *i915) 17559 { 17560 struct intel_cdclk_state *cdclk_state = 17561 to_intel_cdclk_state(i915->cdclk.obj.state); 17562 struct intel_dbuf_state *dbuf_state = 17563 to_intel_dbuf_state(i915->dbuf.obj.state); 17564 17565 intel_update_cdclk(i915); 17566 intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK"); 17567 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; 17568 17569 dbuf_state->enabled_slices = i915->dbuf.enabled_slices; 17570 } 17571 17572 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) 17573 { 17574 struct drm_plane *plane; 17575 struct intel_crtc *crtc; 17576 17577 for_each_intel_crtc(state->dev, crtc) { 17578 struct intel_crtc_state *crtc_state; 17579 17580 crtc_state = intel_atomic_get_crtc_state(state, crtc); 17581 if (IS_ERR(crtc_state)) 17582 return PTR_ERR(crtc_state); 17583 17584 if (crtc_state->hw.active) { 17585 /* 17586 * Preserve the inherited flag to avoid 17587 * taking the full modeset path. 17588 */ 17589 crtc_state->inherited = true; 17590 } 17591 } 17592 17593 drm_for_each_plane(plane, state->dev) { 17594 struct drm_plane_state *plane_state; 17595 17596 plane_state = drm_atomic_get_plane_state(state, plane); 17597 if (IS_ERR(plane_state)) 17598 return PTR_ERR(plane_state); 17599 } 17600 17601 return 0; 17602 } 17603 17604 /* 17605 * Calculate what we think the watermarks should be for the state we've read 17606 * out of the hardware and then immediately program those watermarks so that 17607 * we ensure the hardware settings match our internal state. 17608 * 17609 * We can calculate what we think WM's should be by creating a duplicate of the 17610 * current state (which was constructed during hardware readout) and running it 17611 * through the atomic check code to calculate new watermark values in the 17612 * state object. 17613 */ 17614 static void sanitize_watermarks(struct drm_i915_private *dev_priv) 17615 { 17616 struct drm_atomic_state *state; 17617 struct intel_atomic_state *intel_state; 17618 struct intel_crtc *crtc; 17619 struct intel_crtc_state *crtc_state; 17620 struct drm_modeset_acquire_ctx ctx; 17621 int ret; 17622 int i; 17623 17624 /* Only supported on platforms that use atomic watermark design */ 17625 if (!dev_priv->display.optimize_watermarks) 17626 return; 17627 17628 state = drm_atomic_state_alloc(&dev_priv->drm); 17629 if (drm_WARN_ON(&dev_priv->drm, !state)) 17630 return; 17631 17632 intel_state = to_intel_atomic_state(state); 17633 17634 drm_modeset_acquire_init(&ctx, 0); 17635 17636 retry: 17637 state->acquire_ctx = &ctx; 17638 17639 /* 17640 * Hardware readout is the only time we don't want to calculate 17641 * intermediate watermarks (since we don't trust the current 17642 * watermarks). 17643 */ 17644 if (!HAS_GMCH(dev_priv)) 17645 intel_state->skip_intermediate_wm = true; 17646 17647 ret = sanitize_watermarks_add_affected(state); 17648 if (ret) 17649 goto fail; 17650 17651 ret = intel_atomic_check(&dev_priv->drm, state); 17652 if (ret) 17653 goto fail; 17654 17655 /* Write calculated watermark values back */ 17656 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 17657 crtc_state->wm.need_postvbl_update = true; 17658 dev_priv->display.optimize_watermarks(intel_state, crtc); 17659 17660 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 17661 } 17662 17663 fail: 17664 if (ret == -EDEADLK) { 17665 drm_atomic_state_clear(state); 17666 drm_modeset_backoff(&ctx); 17667 goto retry; 17668 } 17669 17670 /* 17671 * If we fail here, it means that the hardware appears to be 17672 * programmed in a way that shouldn't be possible, given our 17673 * understanding of watermark requirements. This might mean a 17674 * mistake in the hardware readout code or a mistake in the 17675 * watermark calculations for a given platform. Raise a WARN 17676 * so that this is noticeable. 17677 * 17678 * If this actually happens, we'll have to just leave the 17679 * BIOS-programmed watermarks untouched and hope for the best. 17680 */ 17681 drm_WARN(&dev_priv->drm, ret, 17682 "Could not determine valid watermarks for inherited state\n"); 17683 17684 drm_atomic_state_put(state); 17685 17686 drm_modeset_drop_locks(&ctx); 17687 drm_modeset_acquire_fini(&ctx); 17688 } 17689 17690 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 17691 { 17692 if (IS_GEN(dev_priv, 5)) { 17693 u32 fdi_pll_clk = 17694 intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 17695 17696 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 17697 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { 17698 dev_priv->fdi_pll_freq = 270000; 17699 } else { 17700 return; 17701 } 17702 17703 drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); 17704 } 17705 17706 static int intel_initial_commit(struct drm_device *dev) 17707 { 17708 struct drm_atomic_state *state = NULL; 17709 struct drm_modeset_acquire_ctx ctx; 17710 struct intel_crtc *crtc; 17711 int ret = 0; 17712 17713 state = drm_atomic_state_alloc(dev); 17714 if (!state) 17715 return -ENOMEM; 17716 17717 drm_modeset_acquire_init(&ctx, 0); 17718 17719 retry: 17720 state->acquire_ctx = &ctx; 17721 17722 for_each_intel_crtc(dev, crtc) { 17723 struct intel_crtc_state *crtc_state = 17724 intel_atomic_get_crtc_state(state, crtc); 17725 17726 if (IS_ERR(crtc_state)) { 17727 ret = PTR_ERR(crtc_state); 17728 goto out; 17729 } 17730 17731 if (crtc_state->hw.active) { 17732 /* 17733 * We've not yet detected sink capabilities 17734 * (audio,infoframes,etc.) and thus we don't want to 17735 * force a full state recomputation yet. We want that to 17736 * happen only for the first real commit from userspace. 17737 * So preserve the inherited flag for the time being. 17738 */ 17739 crtc_state->inherited = true; 17740 17741 ret = drm_atomic_add_affected_planes(state, &crtc->base); 17742 if (ret) 17743 goto out; 17744 17745 /* 17746 * FIXME hack to force a LUT update to avoid the 17747 * plane update forcing the pipe gamma on without 17748 * having a proper LUT loaded. Remove once we 17749 * have readout for pipe gamma enable. 17750 */ 17751 crtc_state->uapi.color_mgmt_changed = true; 17752 17753 /* 17754 * FIXME hack to force full modeset when DSC is being 17755 * used. 17756 * 17757 * As long as we do not have full state readout and 17758 * config comparison of crtc_state->dsc, we have no way 17759 * to ensure reliable fastset. Remove once we have 17760 * readout for DSC. 17761 */ 17762 if (crtc_state->dsc.compression_enable) { 17763 ret = drm_atomic_add_affected_connectors(state, 17764 &crtc->base); 17765 if (ret) 17766 goto out; 17767 crtc_state->uapi.mode_changed = true; 17768 drm_dbg_kms(dev, "Force full modeset for DSC\n"); 17769 } 17770 } 17771 } 17772 17773 ret = drm_atomic_commit(state); 17774 17775 out: 17776 if (ret == -EDEADLK) { 17777 drm_atomic_state_clear(state); 17778 drm_modeset_backoff(&ctx); 17779 goto retry; 17780 } 17781 17782 drm_atomic_state_put(state); 17783 17784 drm_modeset_drop_locks(&ctx); 17785 drm_modeset_acquire_fini(&ctx); 17786 17787 return ret; 17788 } 17789 17790 static void intel_mode_config_init(struct drm_i915_private *i915) 17791 { 17792 struct drm_mode_config *mode_config = &i915->drm.mode_config; 17793 17794 drm_mode_config_init(&i915->drm); 17795 INIT_LIST_HEAD(&i915->global_obj_list); 17796 17797 mode_config->min_width = 0; 17798 mode_config->min_height = 0; 17799 17800 mode_config->preferred_depth = 24; 17801 mode_config->prefer_shadow = 1; 17802 17803 mode_config->allow_fb_modifiers = true; 17804 17805 mode_config->funcs = &intel_mode_funcs; 17806 17807 /* 17808 * Maximum framebuffer dimensions, chosen to match 17809 * the maximum render engine surface size on gen4+. 17810 */ 17811 if (INTEL_GEN(i915) >= 7) { 17812 mode_config->max_width = 16384; 17813 mode_config->max_height = 16384; 17814 } else if (INTEL_GEN(i915) >= 4) { 17815 mode_config->max_width = 8192; 17816 mode_config->max_height = 8192; 17817 } else if (IS_GEN(i915, 3)) { 17818 mode_config->max_width = 4096; 17819 mode_config->max_height = 4096; 17820 } else { 17821 mode_config->max_width = 2048; 17822 mode_config->max_height = 2048; 17823 } 17824 17825 if (IS_I845G(i915) || IS_I865G(i915)) { 17826 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; 17827 mode_config->cursor_height = 1023; 17828 } else if (IS_I830(i915) || IS_I85X(i915) || 17829 IS_I915G(i915) || IS_I915GM(i915)) { 17830 mode_config->cursor_width = 64; 17831 mode_config->cursor_height = 64; 17832 } else { 17833 mode_config->cursor_width = 256; 17834 mode_config->cursor_height = 256; 17835 } 17836 } 17837 17838 static void intel_mode_config_cleanup(struct drm_i915_private *i915) 17839 { 17840 intel_atomic_global_obj_cleanup(i915); 17841 drm_mode_config_cleanup(&i915->drm); 17842 } 17843 17844 static void plane_config_fini(struct intel_initial_plane_config *plane_config) 17845 { 17846 if (plane_config->fb) { 17847 struct drm_framebuffer *fb = &plane_config->fb->base; 17848 17849 /* We may only have the stub and not a full framebuffer */ 17850 if (drm_framebuffer_read_refcount(fb)) 17851 drm_framebuffer_put(fb); 17852 else 17853 kfree(fb); 17854 } 17855 17856 if (plane_config->vma) 17857 i915_vma_put(plane_config->vma); 17858 } 17859 17860 /* part #1: call before irq install */ 17861 int intel_modeset_init_noirq(struct drm_i915_private *i915) 17862 { 17863 int ret; 17864 17865 if (i915_inject_probe_failure(i915)) 17866 return -ENODEV; 17867 17868 if (HAS_DISPLAY(i915)) { 17869 ret = drm_vblank_init(&i915->drm, 17870 INTEL_NUM_PIPES(i915)); 17871 if (ret) 17872 return ret; 17873 } 17874 17875 intel_bios_init(i915); 17876 17877 ret = intel_vga_register(i915); 17878 if (ret) 17879 goto cleanup_bios; 17880 17881 /* FIXME: completely on the wrong abstraction layer */ 17882 intel_power_domains_init_hw(i915, false); 17883 17884 intel_csr_ucode_init(i915); 17885 17886 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 17887 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | 17888 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 17889 17890 intel_mode_config_init(i915); 17891 17892 ret = intel_cdclk_init(i915); 17893 if (ret) 17894 goto cleanup_vga_client_pw_domain_csr; 17895 17896 ret = intel_dbuf_init(i915); 17897 if (ret) 17898 goto cleanup_vga_client_pw_domain_csr; 17899 17900 ret = intel_bw_init(i915); 17901 if (ret) 17902 goto cleanup_vga_client_pw_domain_csr; 17903 17904 init_llist_head(&i915->atomic_helper.free_list); 17905 INIT_WORK(&i915->atomic_helper.free_work, 17906 intel_atomic_helper_free_state_worker); 17907 17908 intel_init_quirks(i915); 17909 17910 intel_fbc_init(i915); 17911 17912 return 0; 17913 17914 cleanup_vga_client_pw_domain_csr: 17915 intel_csr_ucode_fini(i915); 17916 intel_power_domains_driver_remove(i915); 17917 intel_vga_unregister(i915); 17918 cleanup_bios: 17919 intel_bios_driver_remove(i915); 17920 17921 return ret; 17922 } 17923 17924 /* part #2: call after irq install, but before gem init */ 17925 int intel_modeset_init_nogem(struct drm_i915_private *i915) 17926 { 17927 struct drm_device *dev = &i915->drm; 17928 enum pipe pipe; 17929 struct intel_crtc *crtc; 17930 int ret; 17931 17932 intel_init_pm(i915); 17933 17934 intel_panel_sanitize_ssc(i915); 17935 17936 intel_gmbus_setup(i915); 17937 17938 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", 17939 INTEL_NUM_PIPES(i915), 17940 INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); 17941 17942 if (HAS_DISPLAY(i915)) { 17943 for_each_pipe(i915, pipe) { 17944 ret = intel_crtc_init(i915, pipe); 17945 if (ret) { 17946 intel_mode_config_cleanup(i915); 17947 return ret; 17948 } 17949 } 17950 } 17951 17952 intel_plane_possible_crtcs_init(i915); 17953 intel_shared_dpll_init(dev); 17954 intel_update_fdi_pll_freq(i915); 17955 17956 intel_update_czclk(i915); 17957 intel_modeset_init_hw(i915); 17958 17959 intel_hdcp_component_init(i915); 17960 17961 if (i915->max_cdclk_freq == 0) 17962 intel_update_max_cdclk(i915); 17963 17964 /* 17965 * If the platform has HTI, we need to find out whether it has reserved 17966 * any display resources before we create our display outputs. 17967 */ 17968 if (INTEL_INFO(i915)->display.has_hti) 17969 i915->hti_state = intel_de_read(i915, HDPORT_STATE); 17970 17971 /* Just disable it once at startup */ 17972 intel_vga_disable(i915); 17973 intel_setup_outputs(i915); 17974 17975 drm_modeset_lock_all(dev); 17976 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 17977 drm_modeset_unlock_all(dev); 17978 17979 for_each_intel_crtc(dev, crtc) { 17980 struct intel_initial_plane_config plane_config = {}; 17981 17982 if (!crtc->active) 17983 continue; 17984 17985 /* 17986 * Note that reserving the BIOS fb up front prevents us 17987 * from stuffing other stolen allocations like the ring 17988 * on top. This prevents some ugliness at boot time, and 17989 * can even allow for smooth boot transitions if the BIOS 17990 * fb is large enough for the active pipe configuration. 17991 */ 17992 i915->display.get_initial_plane_config(crtc, &plane_config); 17993 17994 /* 17995 * If the fb is shared between multiple heads, we'll 17996 * just get the first one. 17997 */ 17998 intel_find_initial_plane_obj(crtc, &plane_config); 17999 18000 plane_config_fini(&plane_config); 18001 } 18002 18003 /* 18004 * Make sure hardware watermarks really match the state we read out. 18005 * Note that we need to do this after reconstructing the BIOS fb's 18006 * since the watermark calculation done here will use pstate->fb. 18007 */ 18008 if (!HAS_GMCH(i915)) 18009 sanitize_watermarks(i915); 18010 18011 /* 18012 * Force all active planes to recompute their states. So that on 18013 * mode_setcrtc after probe, all the intel_plane_state variables 18014 * are already calculated and there is no assert_plane warnings 18015 * during bootup. 18016 */ 18017 ret = intel_initial_commit(dev); 18018 if (ret) 18019 drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n"); 18020 18021 return 0; 18022 } 18023 18024 /* part #3: call after gem init */ 18025 int intel_modeset_init(struct drm_i915_private *i915) 18026 { 18027 int ret; 18028 18029 intel_overlay_setup(i915); 18030 18031 if (!HAS_DISPLAY(i915)) 18032 return 0; 18033 18034 ret = intel_fbdev_init(&i915->drm); 18035 if (ret) 18036 return ret; 18037 18038 /* Only enable hotplug handling once the fbdev is fully set up. */ 18039 intel_hpd_init(i915); 18040 18041 intel_init_ipc(i915); 18042 18043 intel_psr_set_force_mode_changed(i915->psr.dp); 18044 18045 return 0; 18046 } 18047 18048 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 18049 { 18050 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18051 /* 640x480@60Hz, ~25175 kHz */ 18052 struct dpll clock = { 18053 .m1 = 18, 18054 .m2 = 7, 18055 .p1 = 13, 18056 .p2 = 4, 18057 .n = 2, 18058 }; 18059 u32 dpll, fp; 18060 int i; 18061 18062 drm_WARN_ON(&dev_priv->drm, 18063 i9xx_calc_dpll_params(48000, &clock) != 25154); 18064 18065 drm_dbg_kms(&dev_priv->drm, 18066 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 18067 pipe_name(pipe), clock.vco, clock.dot); 18068 18069 fp = i9xx_dpll_compute_fp(&clock); 18070 dpll = DPLL_DVO_2X_MODE | 18071 DPLL_VGA_MODE_DIS | 18072 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 18073 PLL_P2_DIVIDE_BY_4 | 18074 PLL_REF_INPUT_DREFCLK | 18075 DPLL_VCO_ENABLE; 18076 18077 intel_de_write(dev_priv, FP0(pipe), fp); 18078 intel_de_write(dev_priv, FP1(pipe), fp); 18079 18080 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 18081 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 18082 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 18083 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 18084 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 18085 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 18086 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 18087 18088 /* 18089 * Apparently we need to have VGA mode enabled prior to changing 18090 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 18091 * dividers, even though the register value does change. 18092 */ 18093 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 18094 intel_de_write(dev_priv, DPLL(pipe), dpll); 18095 18096 /* Wait for the clocks to stabilize. */ 18097 intel_de_posting_read(dev_priv, DPLL(pipe)); 18098 udelay(150); 18099 18100 /* The pixel multiplier can only be updated once the 18101 * DPLL is enabled and the clocks are stable. 18102 * 18103 * So write it again. 18104 */ 18105 intel_de_write(dev_priv, DPLL(pipe), dpll); 18106 18107 /* We do this three times for luck */ 18108 for (i = 0; i < 3 ; i++) { 18109 intel_de_write(dev_priv, DPLL(pipe), dpll); 18110 intel_de_posting_read(dev_priv, DPLL(pipe)); 18111 udelay(150); /* wait for warmup */ 18112 } 18113 18114 intel_de_write(dev_priv, PIPECONF(pipe), 18115 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 18116 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 18117 18118 intel_wait_for_pipe_scanline_moving(crtc); 18119 } 18120 18121 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 18122 { 18123 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18124 18125 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", 18126 pipe_name(pipe)); 18127 18128 drm_WARN_ON(&dev_priv->drm, 18129 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & 18130 DISPLAY_PLANE_ENABLE); 18131 drm_WARN_ON(&dev_priv->drm, 18132 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & 18133 DISPLAY_PLANE_ENABLE); 18134 drm_WARN_ON(&dev_priv->drm, 18135 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & 18136 DISPLAY_PLANE_ENABLE); 18137 drm_WARN_ON(&dev_priv->drm, 18138 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE); 18139 drm_WARN_ON(&dev_priv->drm, 18140 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE); 18141 18142 intel_de_write(dev_priv, PIPECONF(pipe), 0); 18143 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 18144 18145 intel_wait_for_pipe_scanline_stopped(crtc); 18146 18147 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 18148 intel_de_posting_read(dev_priv, DPLL(pipe)); 18149 } 18150 18151 static void 18152 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 18153 { 18154 struct intel_crtc *crtc; 18155 18156 if (INTEL_GEN(dev_priv) >= 4) 18157 return; 18158 18159 for_each_intel_crtc(&dev_priv->drm, crtc) { 18160 struct intel_plane *plane = 18161 to_intel_plane(crtc->base.primary); 18162 struct intel_crtc *plane_crtc; 18163 enum pipe pipe; 18164 18165 if (!plane->get_hw_state(plane, &pipe)) 18166 continue; 18167 18168 if (pipe == crtc->pipe) 18169 continue; 18170 18171 drm_dbg_kms(&dev_priv->drm, 18172 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 18173 plane->base.base.id, plane->base.name); 18174 18175 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18176 intel_plane_disable_noatomic(plane_crtc, plane); 18177 } 18178 } 18179 18180 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 18181 { 18182 struct drm_device *dev = crtc->base.dev; 18183 struct intel_encoder *encoder; 18184 18185 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 18186 return true; 18187 18188 return false; 18189 } 18190 18191 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 18192 { 18193 struct drm_device *dev = encoder->base.dev; 18194 struct intel_connector *connector; 18195 18196 for_each_connector_on_encoder(dev, &encoder->base, connector) 18197 return connector; 18198 18199 return NULL; 18200 } 18201 18202 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 18203 enum pipe pch_transcoder) 18204 { 18205 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 18206 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 18207 } 18208 18209 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state) 18210 { 18211 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 18212 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 18213 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 18214 18215 if (INTEL_GEN(dev_priv) >= 9 || 18216 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 18217 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 18218 u32 val; 18219 18220 if (transcoder_is_dsi(cpu_transcoder)) 18221 return; 18222 18223 val = intel_de_read(dev_priv, reg); 18224 val &= ~HSW_FRAME_START_DELAY_MASK; 18225 val |= HSW_FRAME_START_DELAY(0); 18226 intel_de_write(dev_priv, reg, val); 18227 } else { 18228 i915_reg_t reg = PIPECONF(cpu_transcoder); 18229 u32 val; 18230 18231 val = intel_de_read(dev_priv, reg); 18232 val &= ~PIPECONF_FRAME_START_DELAY_MASK; 18233 val |= PIPECONF_FRAME_START_DELAY(0); 18234 intel_de_write(dev_priv, reg, val); 18235 } 18236 18237 if (!crtc_state->has_pch_encoder) 18238 return; 18239 18240 if (HAS_PCH_IBX(dev_priv)) { 18241 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe); 18242 u32 val; 18243 18244 val = intel_de_read(dev_priv, reg); 18245 val &= ~TRANS_FRAME_START_DELAY_MASK; 18246 val |= TRANS_FRAME_START_DELAY(0); 18247 intel_de_write(dev_priv, reg, val); 18248 } else { 18249 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc); 18250 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder); 18251 u32 val; 18252 18253 val = intel_de_read(dev_priv, reg); 18254 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 18255 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 18256 intel_de_write(dev_priv, reg, val); 18257 } 18258 } 18259 18260 static void intel_sanitize_crtc(struct intel_crtc *crtc, 18261 struct drm_modeset_acquire_ctx *ctx) 18262 { 18263 struct drm_device *dev = crtc->base.dev; 18264 struct drm_i915_private *dev_priv = to_i915(dev); 18265 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 18266 18267 if (crtc_state->hw.active) { 18268 struct intel_plane *plane; 18269 18270 /* Clear any frame start delays used for debugging left by the BIOS */ 18271 intel_sanitize_frame_start_delay(crtc_state); 18272 18273 /* Disable everything but the primary plane */ 18274 for_each_intel_plane_on_crtc(dev, crtc, plane) { 18275 const struct intel_plane_state *plane_state = 18276 to_intel_plane_state(plane->base.state); 18277 18278 if (plane_state->uapi.visible && 18279 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 18280 intel_plane_disable_noatomic(crtc, plane); 18281 } 18282 18283 /* 18284 * Disable any background color set by the BIOS, but enable the 18285 * gamma and CSC to match how we program our planes. 18286 */ 18287 if (INTEL_GEN(dev_priv) >= 9) 18288 intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe), 18289 SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE); 18290 } 18291 18292 /* Adjust the state of the output pipe according to whether we 18293 * have active connectors/encoders. */ 18294 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc)) 18295 intel_crtc_disable_noatomic(crtc, ctx); 18296 18297 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) { 18298 /* 18299 * We start out with underrun reporting disabled to avoid races. 18300 * For correct bookkeeping mark this on active crtcs. 18301 * 18302 * Also on gmch platforms we dont have any hardware bits to 18303 * disable the underrun reporting. Which means we need to start 18304 * out with underrun reporting disabled also on inactive pipes, 18305 * since otherwise we'll complain about the garbage we read when 18306 * e.g. coming up after runtime pm. 18307 * 18308 * No protection against concurrent access is required - at 18309 * worst a fifo underrun happens which also sets this to false. 18310 */ 18311 crtc->cpu_fifo_underrun_disabled = true; 18312 /* 18313 * We track the PCH trancoder underrun reporting state 18314 * within the crtc. With crtc for pipe A housing the underrun 18315 * reporting state for PCH transcoder A, crtc for pipe B housing 18316 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 18317 * and marking underrun reporting as disabled for the non-existing 18318 * PCH transcoders B and C would prevent enabling the south 18319 * error interrupt (see cpt_can_enable_serr_int()). 18320 */ 18321 if (has_pch_trancoder(dev_priv, crtc->pipe)) 18322 crtc->pch_fifo_underrun_disabled = true; 18323 } 18324 } 18325 18326 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 18327 { 18328 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 18329 18330 /* 18331 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 18332 * the hardware when a high res displays plugged in. DPLL P 18333 * divider is zero, and the pipe timings are bonkers. We'll 18334 * try to disable everything in that case. 18335 * 18336 * FIXME would be nice to be able to sanitize this state 18337 * without several WARNs, but for now let's take the easy 18338 * road. 18339 */ 18340 return IS_GEN(dev_priv, 6) && 18341 crtc_state->hw.active && 18342 crtc_state->shared_dpll && 18343 crtc_state->port_clock == 0; 18344 } 18345 18346 static void intel_sanitize_encoder(struct intel_encoder *encoder) 18347 { 18348 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 18349 struct intel_connector *connector; 18350 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 18351 struct intel_crtc_state *crtc_state = crtc ? 18352 to_intel_crtc_state(crtc->base.state) : NULL; 18353 18354 /* We need to check both for a crtc link (meaning that the 18355 * encoder is active and trying to read from a pipe) and the 18356 * pipe itself being active. */ 18357 bool has_active_crtc = crtc_state && 18358 crtc_state->hw.active; 18359 18360 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 18361 drm_dbg_kms(&dev_priv->drm, 18362 "BIOS has misprogrammed the hardware. Disabling pipe %c\n", 18363 pipe_name(crtc->pipe)); 18364 has_active_crtc = false; 18365 } 18366 18367 connector = intel_encoder_find_connector(encoder); 18368 if (connector && !has_active_crtc) { 18369 drm_dbg_kms(&dev_priv->drm, 18370 "[ENCODER:%d:%s] has active connectors but no active pipe!\n", 18371 encoder->base.base.id, 18372 encoder->base.name); 18373 18374 /* Connector is active, but has no active pipe. This is 18375 * fallout from our resume register restoring. Disable 18376 * the encoder manually again. */ 18377 if (crtc_state) { 18378 struct drm_encoder *best_encoder; 18379 18380 drm_dbg_kms(&dev_priv->drm, 18381 "[ENCODER:%d:%s] manually disabled\n", 18382 encoder->base.base.id, 18383 encoder->base.name); 18384 18385 /* avoid oopsing in case the hooks consult best_encoder */ 18386 best_encoder = connector->base.state->best_encoder; 18387 connector->base.state->best_encoder = &encoder->base; 18388 18389 /* FIXME NULL atomic state passed! */ 18390 if (encoder->disable) 18391 encoder->disable(NULL, encoder, crtc_state, 18392 connector->base.state); 18393 if (encoder->post_disable) 18394 encoder->post_disable(NULL, encoder, crtc_state, 18395 connector->base.state); 18396 18397 connector->base.state->best_encoder = best_encoder; 18398 } 18399 encoder->base.crtc = NULL; 18400 18401 /* Inconsistent output/port/pipe state happens presumably due to 18402 * a bug in one of the get_hw_state functions. Or someplace else 18403 * in our code, like the register restore mess on resume. Clamp 18404 * things to off as a safer default. */ 18405 18406 connector->base.dpms = DRM_MODE_DPMS_OFF; 18407 connector->base.encoder = NULL; 18408 } 18409 18410 /* notify opregion of the sanitized encoder state */ 18411 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 18412 18413 if (INTEL_GEN(dev_priv) >= 11) 18414 icl_sanitize_encoder_pll_mapping(encoder); 18415 } 18416 18417 /* FIXME read out full plane state for all planes */ 18418 static void readout_plane_state(struct drm_i915_private *dev_priv) 18419 { 18420 struct intel_plane *plane; 18421 struct intel_crtc *crtc; 18422 18423 for_each_intel_plane(&dev_priv->drm, plane) { 18424 struct intel_plane_state *plane_state = 18425 to_intel_plane_state(plane->base.state); 18426 struct intel_crtc_state *crtc_state; 18427 enum pipe pipe = PIPE_A; 18428 bool visible; 18429 18430 visible = plane->get_hw_state(plane, &pipe); 18431 18432 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18433 crtc_state = to_intel_crtc_state(crtc->base.state); 18434 18435 intel_set_plane_visible(crtc_state, plane_state, visible); 18436 18437 drm_dbg_kms(&dev_priv->drm, 18438 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 18439 plane->base.base.id, plane->base.name, 18440 enableddisabled(visible), pipe_name(pipe)); 18441 } 18442 18443 for_each_intel_crtc(&dev_priv->drm, crtc) { 18444 struct intel_crtc_state *crtc_state = 18445 to_intel_crtc_state(crtc->base.state); 18446 18447 fixup_active_planes(crtc_state); 18448 } 18449 } 18450 18451 static void intel_modeset_readout_hw_state(struct drm_device *dev) 18452 { 18453 struct drm_i915_private *dev_priv = to_i915(dev); 18454 struct intel_cdclk_state *cdclk_state = 18455 to_intel_cdclk_state(dev_priv->cdclk.obj.state); 18456 struct intel_dbuf_state *dbuf_state = 18457 to_intel_dbuf_state(dev_priv->dbuf.obj.state); 18458 enum pipe pipe; 18459 struct intel_crtc *crtc; 18460 struct intel_encoder *encoder; 18461 struct intel_connector *connector; 18462 struct drm_connector_list_iter conn_iter; 18463 u8 active_pipes = 0; 18464 18465 for_each_intel_crtc(dev, crtc) { 18466 struct intel_crtc_state *crtc_state = 18467 to_intel_crtc_state(crtc->base.state); 18468 18469 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); 18470 intel_crtc_free_hw_state(crtc_state); 18471 intel_crtc_state_reset(crtc_state, crtc); 18472 18473 crtc_state->hw.active = crtc_state->hw.enable = 18474 dev_priv->display.get_pipe_config(crtc, crtc_state); 18475 18476 crtc->base.enabled = crtc_state->hw.enable; 18477 crtc->active = crtc_state->hw.active; 18478 18479 if (crtc_state->hw.active) 18480 active_pipes |= BIT(crtc->pipe); 18481 18482 drm_dbg_kms(&dev_priv->drm, 18483 "[CRTC:%d:%s] hw state readout: %s\n", 18484 crtc->base.base.id, crtc->base.name, 18485 enableddisabled(crtc_state->hw.active)); 18486 } 18487 18488 dev_priv->active_pipes = cdclk_state->active_pipes = 18489 dbuf_state->active_pipes = active_pipes; 18490 18491 readout_plane_state(dev_priv); 18492 18493 intel_dpll_readout_hw_state(dev_priv); 18494 18495 for_each_intel_encoder(dev, encoder) { 18496 pipe = 0; 18497 18498 if (encoder->get_hw_state(encoder, &pipe)) { 18499 struct intel_crtc_state *crtc_state; 18500 18501 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18502 crtc_state = to_intel_crtc_state(crtc->base.state); 18503 18504 encoder->base.crtc = &crtc->base; 18505 encoder->get_config(encoder, crtc_state); 18506 } else { 18507 encoder->base.crtc = NULL; 18508 } 18509 18510 drm_dbg_kms(&dev_priv->drm, 18511 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 18512 encoder->base.base.id, encoder->base.name, 18513 enableddisabled(encoder->base.crtc), 18514 pipe_name(pipe)); 18515 } 18516 18517 drm_connector_list_iter_begin(dev, &conn_iter); 18518 for_each_intel_connector_iter(connector, &conn_iter) { 18519 if (connector->get_hw_state(connector)) { 18520 struct intel_crtc_state *crtc_state; 18521 struct intel_crtc *crtc; 18522 18523 connector->base.dpms = DRM_MODE_DPMS_ON; 18524 18525 encoder = intel_attached_encoder(connector); 18526 connector->base.encoder = &encoder->base; 18527 18528 crtc = to_intel_crtc(encoder->base.crtc); 18529 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; 18530 18531 if (crtc_state && crtc_state->hw.active) { 18532 /* 18533 * This has to be done during hardware readout 18534 * because anything calling .crtc_disable may 18535 * rely on the connector_mask being accurate. 18536 */ 18537 crtc_state->uapi.connector_mask |= 18538 drm_connector_mask(&connector->base); 18539 crtc_state->uapi.encoder_mask |= 18540 drm_encoder_mask(&encoder->base); 18541 } 18542 } else { 18543 connector->base.dpms = DRM_MODE_DPMS_OFF; 18544 connector->base.encoder = NULL; 18545 } 18546 drm_dbg_kms(&dev_priv->drm, 18547 "[CONNECTOR:%d:%s] hw state readout: %s\n", 18548 connector->base.base.id, connector->base.name, 18549 enableddisabled(connector->base.encoder)); 18550 } 18551 drm_connector_list_iter_end(&conn_iter); 18552 18553 for_each_intel_crtc(dev, crtc) { 18554 struct intel_bw_state *bw_state = 18555 to_intel_bw_state(dev_priv->bw_obj.state); 18556 struct intel_crtc_state *crtc_state = 18557 to_intel_crtc_state(crtc->base.state); 18558 struct intel_plane *plane; 18559 int min_cdclk = 0; 18560 18561 if (crtc_state->hw.active) { 18562 struct drm_display_mode *mode = &crtc_state->hw.mode; 18563 18564 intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode, 18565 crtc_state); 18566 18567 *mode = crtc_state->hw.adjusted_mode; 18568 mode->hdisplay = crtc_state->pipe_src_w; 18569 mode->vdisplay = crtc_state->pipe_src_h; 18570 18571 /* 18572 * The initial mode needs to be set in order to keep 18573 * the atomic core happy. It wants a valid mode if the 18574 * crtc's enabled, so we do the above call. 18575 * 18576 * But we don't set all the derived state fully, hence 18577 * set a flag to indicate that a full recalculation is 18578 * needed on the next commit. 18579 */ 18580 crtc_state->inherited = true; 18581 18582 intel_crtc_compute_pixel_rate(crtc_state); 18583 18584 intel_crtc_update_active_timings(crtc_state); 18585 18586 intel_crtc_copy_hw_to_uapi_state(crtc_state); 18587 } 18588 18589 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 18590 const struct intel_plane_state *plane_state = 18591 to_intel_plane_state(plane->base.state); 18592 18593 /* 18594 * FIXME don't have the fb yet, so can't 18595 * use intel_plane_data_rate() :( 18596 */ 18597 if (plane_state->uapi.visible) 18598 crtc_state->data_rate[plane->id] = 18599 4 * crtc_state->pixel_rate; 18600 /* 18601 * FIXME don't have the fb yet, so can't 18602 * use plane->min_cdclk() :( 18603 */ 18604 if (plane_state->uapi.visible && plane->min_cdclk) { 18605 if (crtc_state->double_wide || 18606 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 18607 crtc_state->min_cdclk[plane->id] = 18608 DIV_ROUND_UP(crtc_state->pixel_rate, 2); 18609 else 18610 crtc_state->min_cdclk[plane->id] = 18611 crtc_state->pixel_rate; 18612 } 18613 drm_dbg_kms(&dev_priv->drm, 18614 "[PLANE:%d:%s] min_cdclk %d kHz\n", 18615 plane->base.base.id, plane->base.name, 18616 crtc_state->min_cdclk[plane->id]); 18617 } 18618 18619 if (crtc_state->hw.active) { 18620 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 18621 if (drm_WARN_ON(dev, min_cdclk < 0)) 18622 min_cdclk = 0; 18623 } 18624 18625 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; 18626 cdclk_state->min_voltage_level[crtc->pipe] = 18627 crtc_state->min_voltage_level; 18628 18629 intel_bw_crtc_update(bw_state, crtc_state); 18630 18631 intel_pipe_config_sanity_check(dev_priv, crtc_state); 18632 } 18633 } 18634 18635 static void 18636 get_encoder_power_domains(struct drm_i915_private *dev_priv) 18637 { 18638 struct intel_encoder *encoder; 18639 18640 for_each_intel_encoder(&dev_priv->drm, encoder) { 18641 struct intel_crtc_state *crtc_state; 18642 18643 if (!encoder->get_power_domains) 18644 continue; 18645 18646 /* 18647 * MST-primary and inactive encoders don't have a crtc state 18648 * and neither of these require any power domain references. 18649 */ 18650 if (!encoder->base.crtc) 18651 continue; 18652 18653 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 18654 encoder->get_power_domains(encoder, crtc_state); 18655 } 18656 } 18657 18658 static void intel_early_display_was(struct drm_i915_private *dev_priv) 18659 { 18660 /* 18661 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl 18662 * Also known as Wa_14010480278. 18663 */ 18664 if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv)) 18665 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0, 18666 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); 18667 18668 if (IS_HASWELL(dev_priv)) { 18669 /* 18670 * WaRsPkgCStateDisplayPMReq:hsw 18671 * System hang if this isn't done before disabling all planes! 18672 */ 18673 intel_de_write(dev_priv, CHICKEN_PAR1_1, 18674 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 18675 } 18676 } 18677 18678 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 18679 enum port port, i915_reg_t hdmi_reg) 18680 { 18681 u32 val = intel_de_read(dev_priv, hdmi_reg); 18682 18683 if (val & SDVO_ENABLE || 18684 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 18685 return; 18686 18687 drm_dbg_kms(&dev_priv->drm, 18688 "Sanitizing transcoder select for HDMI %c\n", 18689 port_name(port)); 18690 18691 val &= ~SDVO_PIPE_SEL_MASK; 18692 val |= SDVO_PIPE_SEL(PIPE_A); 18693 18694 intel_de_write(dev_priv, hdmi_reg, val); 18695 } 18696 18697 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 18698 enum port port, i915_reg_t dp_reg) 18699 { 18700 u32 val = intel_de_read(dev_priv, dp_reg); 18701 18702 if (val & DP_PORT_EN || 18703 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 18704 return; 18705 18706 drm_dbg_kms(&dev_priv->drm, 18707 "Sanitizing transcoder select for DP %c\n", 18708 port_name(port)); 18709 18710 val &= ~DP_PIPE_SEL_MASK; 18711 val |= DP_PIPE_SEL(PIPE_A); 18712 18713 intel_de_write(dev_priv, dp_reg, val); 18714 } 18715 18716 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 18717 { 18718 /* 18719 * The BIOS may select transcoder B on some of the PCH 18720 * ports even it doesn't enable the port. This would trip 18721 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 18722 * Sanitize the transcoder select bits to prevent that. We 18723 * assume that the BIOS never actually enabled the port, 18724 * because if it did we'd actually have to toggle the port 18725 * on and back off to make the transcoder A select stick 18726 * (see. intel_dp_link_down(), intel_disable_hdmi(), 18727 * intel_disable_sdvo()). 18728 */ 18729 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 18730 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 18731 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 18732 18733 /* PCH SDVOB multiplex with HDMIB */ 18734 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 18735 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 18736 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 18737 } 18738 18739 /* Scan out the current hw modeset state, 18740 * and sanitizes it to the current state 18741 */ 18742 static void 18743 intel_modeset_setup_hw_state(struct drm_device *dev, 18744 struct drm_modeset_acquire_ctx *ctx) 18745 { 18746 struct drm_i915_private *dev_priv = to_i915(dev); 18747 struct intel_encoder *encoder; 18748 struct intel_crtc *crtc; 18749 intel_wakeref_t wakeref; 18750 18751 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 18752 18753 intel_early_display_was(dev_priv); 18754 intel_modeset_readout_hw_state(dev); 18755 18756 /* HW state is read out, now we need to sanitize this mess. */ 18757 18758 /* Sanitize the TypeC port mode upfront, encoders depend on this */ 18759 for_each_intel_encoder(dev, encoder) { 18760 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 18761 18762 /* We need to sanitize only the MST primary port. */ 18763 if (encoder->type != INTEL_OUTPUT_DP_MST && 18764 intel_phy_is_tc(dev_priv, phy)) 18765 intel_tc_port_sanitize(enc_to_dig_port(encoder)); 18766 } 18767 18768 get_encoder_power_domains(dev_priv); 18769 18770 if (HAS_PCH_IBX(dev_priv)) 18771 ibx_sanitize_pch_ports(dev_priv); 18772 18773 /* 18774 * intel_sanitize_plane_mapping() may need to do vblank 18775 * waits, so we need vblank interrupts restored beforehand. 18776 */ 18777 for_each_intel_crtc(&dev_priv->drm, crtc) { 18778 struct intel_crtc_state *crtc_state = 18779 to_intel_crtc_state(crtc->base.state); 18780 18781 drm_crtc_vblank_reset(&crtc->base); 18782 18783 if (crtc_state->hw.active) 18784 intel_crtc_vblank_on(crtc_state); 18785 } 18786 18787 intel_sanitize_plane_mapping(dev_priv); 18788 18789 for_each_intel_encoder(dev, encoder) 18790 intel_sanitize_encoder(encoder); 18791 18792 for_each_intel_crtc(&dev_priv->drm, crtc) { 18793 struct intel_crtc_state *crtc_state = 18794 to_intel_crtc_state(crtc->base.state); 18795 18796 intel_sanitize_crtc(crtc, ctx); 18797 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 18798 } 18799 18800 intel_modeset_update_connector_atomic_state(dev); 18801 18802 intel_dpll_sanitize_state(dev_priv); 18803 18804 if (IS_G4X(dev_priv)) { 18805 g4x_wm_get_hw_state(dev_priv); 18806 g4x_wm_sanitize(dev_priv); 18807 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 18808 vlv_wm_get_hw_state(dev_priv); 18809 vlv_wm_sanitize(dev_priv); 18810 } else if (INTEL_GEN(dev_priv) >= 9) { 18811 skl_wm_get_hw_state(dev_priv); 18812 } else if (HAS_PCH_SPLIT(dev_priv)) { 18813 ilk_wm_get_hw_state(dev_priv); 18814 } 18815 18816 for_each_intel_crtc(dev, crtc) { 18817 struct intel_crtc_state *crtc_state = 18818 to_intel_crtc_state(crtc->base.state); 18819 u64 put_domains; 18820 18821 put_domains = modeset_get_crtc_power_domains(crtc_state); 18822 if (drm_WARN_ON(dev, put_domains)) 18823 modeset_put_power_domains(dev_priv, put_domains); 18824 } 18825 18826 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 18827 } 18828 18829 void intel_display_resume(struct drm_device *dev) 18830 { 18831 struct drm_i915_private *dev_priv = to_i915(dev); 18832 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 18833 struct drm_modeset_acquire_ctx ctx; 18834 int ret; 18835 18836 dev_priv->modeset_restore_state = NULL; 18837 if (state) 18838 state->acquire_ctx = &ctx; 18839 18840 drm_modeset_acquire_init(&ctx, 0); 18841 18842 while (1) { 18843 ret = drm_modeset_lock_all_ctx(dev, &ctx); 18844 if (ret != -EDEADLK) 18845 break; 18846 18847 drm_modeset_backoff(&ctx); 18848 } 18849 18850 if (!ret) 18851 ret = __intel_display_resume(dev, state, &ctx); 18852 18853 intel_enable_ipc(dev_priv); 18854 drm_modeset_drop_locks(&ctx); 18855 drm_modeset_acquire_fini(&ctx); 18856 18857 if (ret) 18858 drm_err(&dev_priv->drm, 18859 "Restoring old state failed with %i\n", ret); 18860 if (state) 18861 drm_atomic_state_put(state); 18862 } 18863 18864 static void intel_hpd_poll_fini(struct drm_i915_private *i915) 18865 { 18866 struct intel_connector *connector; 18867 struct drm_connector_list_iter conn_iter; 18868 18869 /* Kill all the work that may have been queued by hpd. */ 18870 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 18871 for_each_intel_connector_iter(connector, &conn_iter) { 18872 if (connector->modeset_retry_work.func) 18873 cancel_work_sync(&connector->modeset_retry_work); 18874 if (connector->hdcp.shim) { 18875 cancel_delayed_work_sync(&connector->hdcp.check_work); 18876 cancel_work_sync(&connector->hdcp.prop_work); 18877 } 18878 } 18879 drm_connector_list_iter_end(&conn_iter); 18880 } 18881 18882 /* part #1: call before irq uninstall */ 18883 void intel_modeset_driver_remove(struct drm_i915_private *i915) 18884 { 18885 flush_workqueue(i915->flip_wq); 18886 flush_workqueue(i915->modeset_wq); 18887 18888 flush_work(&i915->atomic_helper.free_work); 18889 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list)); 18890 } 18891 18892 /* part #2: call after irq uninstall */ 18893 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) 18894 { 18895 /* 18896 * Due to the hpd irq storm handling the hotplug work can re-arm the 18897 * poll handlers. Hence disable polling after hpd handling is shut down. 18898 */ 18899 intel_hpd_poll_fini(i915); 18900 18901 /* 18902 * MST topology needs to be suspended so we don't have any calls to 18903 * fbdev after it's finalized. MST will be destroyed later as part of 18904 * drm_mode_config_cleanup() 18905 */ 18906 intel_dp_mst_suspend(i915); 18907 18908 /* poll work can call into fbdev, hence clean that up afterwards */ 18909 intel_fbdev_fini(i915); 18910 18911 intel_unregister_dsm_handler(); 18912 18913 intel_fbc_global_disable(i915); 18914 18915 /* flush any delayed tasks or pending work */ 18916 flush_scheduled_work(); 18917 18918 intel_hdcp_component_fini(i915); 18919 18920 intel_mode_config_cleanup(i915); 18921 18922 intel_overlay_cleanup(i915); 18923 18924 intel_gmbus_teardown(i915); 18925 18926 destroy_workqueue(i915->flip_wq); 18927 destroy_workqueue(i915->modeset_wq); 18928 18929 intel_fbc_cleanup_cfb(i915); 18930 } 18931 18932 /* part #3: call after gem init */ 18933 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) 18934 { 18935 intel_csr_ucode_fini(i915); 18936 18937 intel_power_domains_driver_remove(i915); 18938 18939 intel_vga_unregister(i915); 18940 18941 intel_bios_driver_remove(i915); 18942 } 18943 18944 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 18945 18946 struct intel_display_error_state { 18947 18948 u32 power_well_driver; 18949 18950 struct intel_cursor_error_state { 18951 u32 control; 18952 u32 position; 18953 u32 base; 18954 u32 size; 18955 } cursor[I915_MAX_PIPES]; 18956 18957 struct intel_pipe_error_state { 18958 bool power_domain_on; 18959 u32 source; 18960 u32 stat; 18961 } pipe[I915_MAX_PIPES]; 18962 18963 struct intel_plane_error_state { 18964 u32 control; 18965 u32 stride; 18966 u32 size; 18967 u32 pos; 18968 u32 addr; 18969 u32 surface; 18970 u32 tile_offset; 18971 } plane[I915_MAX_PIPES]; 18972 18973 struct intel_transcoder_error_state { 18974 bool available; 18975 bool power_domain_on; 18976 enum transcoder cpu_transcoder; 18977 18978 u32 conf; 18979 18980 u32 htotal; 18981 u32 hblank; 18982 u32 hsync; 18983 u32 vtotal; 18984 u32 vblank; 18985 u32 vsync; 18986 } transcoder[5]; 18987 }; 18988 18989 struct intel_display_error_state * 18990 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 18991 { 18992 struct intel_display_error_state *error; 18993 int transcoders[] = { 18994 TRANSCODER_A, 18995 TRANSCODER_B, 18996 TRANSCODER_C, 18997 TRANSCODER_D, 18998 TRANSCODER_EDP, 18999 }; 19000 int i; 19001 19002 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); 19003 19004 if (!HAS_DISPLAY(dev_priv)) 19005 return NULL; 19006 19007 error = kzalloc(sizeof(*error), GFP_ATOMIC); 19008 if (error == NULL) 19009 return NULL; 19010 19011 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 19012 error->power_well_driver = intel_de_read(dev_priv, 19013 HSW_PWR_WELL_CTL2); 19014 19015 for_each_pipe(dev_priv, i) { 19016 error->pipe[i].power_domain_on = 19017 __intel_display_power_is_enabled(dev_priv, 19018 POWER_DOMAIN_PIPE(i)); 19019 if (!error->pipe[i].power_domain_on) 19020 continue; 19021 19022 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i)); 19023 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i)); 19024 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i)); 19025 19026 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i)); 19027 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i)); 19028 if (INTEL_GEN(dev_priv) <= 3) { 19029 error->plane[i].size = intel_de_read(dev_priv, 19030 DSPSIZE(i)); 19031 error->plane[i].pos = intel_de_read(dev_priv, 19032 DSPPOS(i)); 19033 } 19034 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 19035 error->plane[i].addr = intel_de_read(dev_priv, 19036 DSPADDR(i)); 19037 if (INTEL_GEN(dev_priv) >= 4) { 19038 error->plane[i].surface = intel_de_read(dev_priv, 19039 DSPSURF(i)); 19040 error->plane[i].tile_offset = intel_de_read(dev_priv, 19041 DSPTILEOFF(i)); 19042 } 19043 19044 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i)); 19045 19046 if (HAS_GMCH(dev_priv)) 19047 error->pipe[i].stat = intel_de_read(dev_priv, 19048 PIPESTAT(i)); 19049 } 19050 19051 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 19052 enum transcoder cpu_transcoder = transcoders[i]; 19053 19054 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder)) 19055 continue; 19056 19057 error->transcoder[i].available = true; 19058 error->transcoder[i].power_domain_on = 19059 __intel_display_power_is_enabled(dev_priv, 19060 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 19061 if (!error->transcoder[i].power_domain_on) 19062 continue; 19063 19064 error->transcoder[i].cpu_transcoder = cpu_transcoder; 19065 19066 error->transcoder[i].conf = intel_de_read(dev_priv, 19067 PIPECONF(cpu_transcoder)); 19068 error->transcoder[i].htotal = intel_de_read(dev_priv, 19069 HTOTAL(cpu_transcoder)); 19070 error->transcoder[i].hblank = intel_de_read(dev_priv, 19071 HBLANK(cpu_transcoder)); 19072 error->transcoder[i].hsync = intel_de_read(dev_priv, 19073 HSYNC(cpu_transcoder)); 19074 error->transcoder[i].vtotal = intel_de_read(dev_priv, 19075 VTOTAL(cpu_transcoder)); 19076 error->transcoder[i].vblank = intel_de_read(dev_priv, 19077 VBLANK(cpu_transcoder)); 19078 error->transcoder[i].vsync = intel_de_read(dev_priv, 19079 VSYNC(cpu_transcoder)); 19080 } 19081 19082 return error; 19083 } 19084 19085 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 19086 19087 void 19088 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 19089 struct intel_display_error_state *error) 19090 { 19091 struct drm_i915_private *dev_priv = m->i915; 19092 int i; 19093 19094 if (!error) 19095 return; 19096 19097 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv)); 19098 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 19099 err_printf(m, "PWR_WELL_CTL2: %08x\n", 19100 error->power_well_driver); 19101 for_each_pipe(dev_priv, i) { 19102 err_printf(m, "Pipe [%d]:\n", i); 19103 err_printf(m, " Power: %s\n", 19104 onoff(error->pipe[i].power_domain_on)); 19105 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 19106 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 19107 19108 err_printf(m, "Plane [%d]:\n", i); 19109 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 19110 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 19111 if (INTEL_GEN(dev_priv) <= 3) { 19112 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 19113 err_printf(m, " POS: %08x\n", error->plane[i].pos); 19114 } 19115 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 19116 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 19117 if (INTEL_GEN(dev_priv) >= 4) { 19118 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 19119 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 19120 } 19121 19122 err_printf(m, "Cursor [%d]:\n", i); 19123 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 19124 err_printf(m, " POS: %08x\n", error->cursor[i].position); 19125 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 19126 } 19127 19128 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 19129 if (!error->transcoder[i].available) 19130 continue; 19131 19132 err_printf(m, "CPU transcoder: %s\n", 19133 transcoder_name(error->transcoder[i].cpu_transcoder)); 19134 err_printf(m, " Power: %s\n", 19135 onoff(error->transcoder[i].power_domain_on)); 19136 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 19137 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 19138 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 19139 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 19140 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 19141 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 19142 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 19143 } 19144 } 19145 19146 #endif 19147