1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/string_helpers.h> 8 9 #include "i915_reg.h" 10 #include "intel_atomic.h" 11 #include "intel_crtc.h" 12 #include "intel_cx0_phy.h" 13 #include "intel_de.h" 14 #include "intel_display.h" 15 #include "intel_display_types.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_dpll.h" 18 #include "intel_lvds.h" 19 #include "intel_lvds_regs.h" 20 #include "intel_panel.h" 21 #include "intel_pps.h" 22 #include "intel_snps_phy.h" 23 #include "vlv_dpio_phy_regs.h" 24 #include "vlv_sideband.h" 25 26 struct intel_dpll_funcs { 27 int (*crtc_compute_clock)(struct intel_atomic_state *state, 28 struct intel_crtc *crtc); 29 int (*crtc_get_shared_dpll)(struct intel_atomic_state *state, 30 struct intel_crtc *crtc); 31 }; 32 33 struct intel_limit { 34 struct { 35 int min, max; 36 } dot, vco, n, m, m1, m2, p, p1; 37 38 struct { 39 int dot_limit; 40 int p2_slow, p2_fast; 41 } p2; 42 }; 43 static const struct intel_limit intel_limits_i8xx_dac = { 44 .dot = { .min = 25000, .max = 350000 }, 45 .vco = { .min = 908000, .max = 1512000 }, 46 .n = { .min = 2, .max = 16 }, 47 .m = { .min = 96, .max = 140 }, 48 .m1 = { .min = 18, .max = 26 }, 49 .m2 = { .min = 6, .max = 16 }, 50 .p = { .min = 4, .max = 128 }, 51 .p1 = { .min = 2, .max = 33 }, 52 .p2 = { .dot_limit = 165000, 53 .p2_slow = 4, .p2_fast = 2 }, 54 }; 55 56 static const struct intel_limit intel_limits_i8xx_dvo = { 57 .dot = { .min = 25000, .max = 350000 }, 58 .vco = { .min = 908000, .max = 1512000 }, 59 .n = { .min = 2, .max = 16 }, 60 .m = { .min = 96, .max = 140 }, 61 .m1 = { .min = 18, .max = 26 }, 62 .m2 = { .min = 6, .max = 16 }, 63 .p = { .min = 4, .max = 128 }, 64 .p1 = { .min = 2, .max = 33 }, 65 .p2 = { .dot_limit = 165000, 66 .p2_slow = 4, .p2_fast = 4 }, 67 }; 68 69 static const struct intel_limit intel_limits_i8xx_lvds = { 70 .dot = { .min = 25000, .max = 350000 }, 71 .vco = { .min = 908000, .max = 1512000 }, 72 .n = { .min = 2, .max = 16 }, 73 .m = { .min = 96, .max = 140 }, 74 .m1 = { .min = 18, .max = 26 }, 75 .m2 = { .min = 6, .max = 16 }, 76 .p = { .min = 4, .max = 128 }, 77 .p1 = { .min = 1, .max = 6 }, 78 .p2 = { .dot_limit = 165000, 79 .p2_slow = 14, .p2_fast = 7 }, 80 }; 81 82 static const struct intel_limit intel_limits_i9xx_sdvo = { 83 .dot = { .min = 20000, .max = 400000 }, 84 .vco = { .min = 1400000, .max = 2800000 }, 85 .n = { .min = 1, .max = 6 }, 86 .m = { .min = 70, .max = 120 }, 87 .m1 = { .min = 8, .max = 18 }, 88 .m2 = { .min = 3, .max = 7 }, 89 .p = { .min = 5, .max = 80 }, 90 .p1 = { .min = 1, .max = 8 }, 91 .p2 = { .dot_limit = 200000, 92 .p2_slow = 10, .p2_fast = 5 }, 93 }; 94 95 static const struct intel_limit intel_limits_i9xx_lvds = { 96 .dot = { .min = 20000, .max = 400000 }, 97 .vco = { .min = 1400000, .max = 2800000 }, 98 .n = { .min = 1, .max = 6 }, 99 .m = { .min = 70, .max = 120 }, 100 .m1 = { .min = 8, .max = 18 }, 101 .m2 = { .min = 3, .max = 7 }, 102 .p = { .min = 7, .max = 98 }, 103 .p1 = { .min = 1, .max = 8 }, 104 .p2 = { .dot_limit = 112000, 105 .p2_slow = 14, .p2_fast = 7 }, 106 }; 107 108 109 static const struct intel_limit intel_limits_g4x_sdvo = { 110 .dot = { .min = 25000, .max = 270000 }, 111 .vco = { .min = 1750000, .max = 3500000}, 112 .n = { .min = 1, .max = 4 }, 113 .m = { .min = 104, .max = 138 }, 114 .m1 = { .min = 17, .max = 23 }, 115 .m2 = { .min = 5, .max = 11 }, 116 .p = { .min = 10, .max = 30 }, 117 .p1 = { .min = 1, .max = 3}, 118 .p2 = { .dot_limit = 270000, 119 .p2_slow = 10, 120 .p2_fast = 10 121 }, 122 }; 123 124 static const struct intel_limit intel_limits_g4x_hdmi = { 125 .dot = { .min = 22000, .max = 400000 }, 126 .vco = { .min = 1750000, .max = 3500000}, 127 .n = { .min = 1, .max = 4 }, 128 .m = { .min = 104, .max = 138 }, 129 .m1 = { .min = 16, .max = 23 }, 130 .m2 = { .min = 5, .max = 11 }, 131 .p = { .min = 5, .max = 80 }, 132 .p1 = { .min = 1, .max = 8}, 133 .p2 = { .dot_limit = 165000, 134 .p2_slow = 10, .p2_fast = 5 }, 135 }; 136 137 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 138 .dot = { .min = 20000, .max = 115000 }, 139 .vco = { .min = 1750000, .max = 3500000 }, 140 .n = { .min = 1, .max = 3 }, 141 .m = { .min = 104, .max = 138 }, 142 .m1 = { .min = 17, .max = 23 }, 143 .m2 = { .min = 5, .max = 11 }, 144 .p = { .min = 28, .max = 112 }, 145 .p1 = { .min = 2, .max = 8 }, 146 .p2 = { .dot_limit = 0, 147 .p2_slow = 14, .p2_fast = 14 148 }, 149 }; 150 151 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 152 .dot = { .min = 80000, .max = 224000 }, 153 .vco = { .min = 1750000, .max = 3500000 }, 154 .n = { .min = 1, .max = 3 }, 155 .m = { .min = 104, .max = 138 }, 156 .m1 = { .min = 17, .max = 23 }, 157 .m2 = { .min = 5, .max = 11 }, 158 .p = { .min = 14, .max = 42 }, 159 .p1 = { .min = 2, .max = 6 }, 160 .p2 = { .dot_limit = 0, 161 .p2_slow = 7, .p2_fast = 7 162 }, 163 }; 164 165 static const struct intel_limit pnv_limits_sdvo = { 166 .dot = { .min = 20000, .max = 400000}, 167 .vco = { .min = 1700000, .max = 3500000 }, 168 /* Pineview's Ncounter is a ring counter */ 169 .n = { .min = 3, .max = 6 }, 170 .m = { .min = 2, .max = 256 }, 171 /* Pineview only has one combined m divider, which we treat as m2. */ 172 .m1 = { .min = 0, .max = 0 }, 173 .m2 = { .min = 0, .max = 254 }, 174 .p = { .min = 5, .max = 80 }, 175 .p1 = { .min = 1, .max = 8 }, 176 .p2 = { .dot_limit = 200000, 177 .p2_slow = 10, .p2_fast = 5 }, 178 }; 179 180 static const struct intel_limit pnv_limits_lvds = { 181 .dot = { .min = 20000, .max = 400000 }, 182 .vco = { .min = 1700000, .max = 3500000 }, 183 .n = { .min = 3, .max = 6 }, 184 .m = { .min = 2, .max = 256 }, 185 .m1 = { .min = 0, .max = 0 }, 186 .m2 = { .min = 0, .max = 254 }, 187 .p = { .min = 7, .max = 112 }, 188 .p1 = { .min = 1, .max = 8 }, 189 .p2 = { .dot_limit = 112000, 190 .p2_slow = 14, .p2_fast = 14 }, 191 }; 192 193 /* Ironlake / Sandybridge 194 * 195 * We calculate clock using (register_value + 2) for N/M1/M2, so here 196 * the range value for them is (actual_value - 2). 197 */ 198 static const struct intel_limit ilk_limits_dac = { 199 .dot = { .min = 25000, .max = 350000 }, 200 .vco = { .min = 1760000, .max = 3510000 }, 201 .n = { .min = 1, .max = 5 }, 202 .m = { .min = 79, .max = 127 }, 203 .m1 = { .min = 12, .max = 22 }, 204 .m2 = { .min = 5, .max = 9 }, 205 .p = { .min = 5, .max = 80 }, 206 .p1 = { .min = 1, .max = 8 }, 207 .p2 = { .dot_limit = 225000, 208 .p2_slow = 10, .p2_fast = 5 }, 209 }; 210 211 static const struct intel_limit ilk_limits_single_lvds = { 212 .dot = { .min = 25000, .max = 350000 }, 213 .vco = { .min = 1760000, .max = 3510000 }, 214 .n = { .min = 1, .max = 3 }, 215 .m = { .min = 79, .max = 118 }, 216 .m1 = { .min = 12, .max = 22 }, 217 .m2 = { .min = 5, .max = 9 }, 218 .p = { .min = 28, .max = 112 }, 219 .p1 = { .min = 2, .max = 8 }, 220 .p2 = { .dot_limit = 225000, 221 .p2_slow = 14, .p2_fast = 14 }, 222 }; 223 224 static const struct intel_limit ilk_limits_dual_lvds = { 225 .dot = { .min = 25000, .max = 350000 }, 226 .vco = { .min = 1760000, .max = 3510000 }, 227 .n = { .min = 1, .max = 3 }, 228 .m = { .min = 79, .max = 127 }, 229 .m1 = { .min = 12, .max = 22 }, 230 .m2 = { .min = 5, .max = 9 }, 231 .p = { .min = 14, .max = 56 }, 232 .p1 = { .min = 2, .max = 8 }, 233 .p2 = { .dot_limit = 225000, 234 .p2_slow = 7, .p2_fast = 7 }, 235 }; 236 237 /* LVDS 100mhz refclk limits. */ 238 static const struct intel_limit ilk_limits_single_lvds_100m = { 239 .dot = { .min = 25000, .max = 350000 }, 240 .vco = { .min = 1760000, .max = 3510000 }, 241 .n = { .min = 1, .max = 2 }, 242 .m = { .min = 79, .max = 126 }, 243 .m1 = { .min = 12, .max = 22 }, 244 .m2 = { .min = 5, .max = 9 }, 245 .p = { .min = 28, .max = 112 }, 246 .p1 = { .min = 2, .max = 8 }, 247 .p2 = { .dot_limit = 225000, 248 .p2_slow = 14, .p2_fast = 14 }, 249 }; 250 251 static const struct intel_limit ilk_limits_dual_lvds_100m = { 252 .dot = { .min = 25000, .max = 350000 }, 253 .vco = { .min = 1760000, .max = 3510000 }, 254 .n = { .min = 1, .max = 3 }, 255 .m = { .min = 79, .max = 126 }, 256 .m1 = { .min = 12, .max = 22 }, 257 .m2 = { .min = 5, .max = 9 }, 258 .p = { .min = 14, .max = 42 }, 259 .p1 = { .min = 2, .max = 6 }, 260 .p2 = { .dot_limit = 225000, 261 .p2_slow = 7, .p2_fast = 7 }, 262 }; 263 264 static const struct intel_limit intel_limits_vlv = { 265 /* 266 * These are based on the data rate limits (measured in fast clocks) 267 * since those are the strictest limits we have. The fast 268 * clock and actual rate limits are more relaxed, so checking 269 * them would make no difference. 270 */ 271 .dot = { .min = 25000, .max = 270000 }, 272 .vco = { .min = 4000000, .max = 6000000 }, 273 .n = { .min = 1, .max = 7 }, 274 .m1 = { .min = 2, .max = 3 }, 275 .m2 = { .min = 11, .max = 156 }, 276 .p1 = { .min = 2, .max = 3 }, 277 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 278 }; 279 280 static const struct intel_limit intel_limits_chv = { 281 /* 282 * These are based on the data rate limits (measured in fast clocks) 283 * since those are the strictest limits we have. The fast 284 * clock and actual rate limits are more relaxed, so checking 285 * them would make no difference. 286 */ 287 .dot = { .min = 25000, .max = 540000 }, 288 .vco = { .min = 4800000, .max = 6480000 }, 289 .n = { .min = 1, .max = 1 }, 290 .m1 = { .min = 2, .max = 2 }, 291 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 292 .p1 = { .min = 2, .max = 4 }, 293 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 294 }; 295 296 static const struct intel_limit intel_limits_bxt = { 297 .dot = { .min = 25000, .max = 594000 }, 298 .vco = { .min = 4800000, .max = 6700000 }, 299 .n = { .min = 1, .max = 1 }, 300 .m1 = { .min = 2, .max = 2 }, 301 /* FIXME: find real m2 limits */ 302 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 303 .p1 = { .min = 2, .max = 4 }, 304 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 305 }; 306 307 /* 308 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 309 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 310 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 311 * The helpers' return value is the rate of the clock that is fed to the 312 * display engine's pipe which can be the above fast dot clock rate or a 313 * divided-down version of it. 314 */ 315 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 316 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 317 { 318 clock->m = clock->m2 + 2; 319 clock->p = clock->p1 * clock->p2; 320 321 clock->vco = clock->n == 0 ? 0 : 322 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 323 clock->dot = clock->p == 0 ? 0 : 324 DIV_ROUND_CLOSEST(clock->vco, clock->p); 325 326 return clock->dot; 327 } 328 329 static u32 i9xx_dpll_compute_m(const struct dpll *dpll) 330 { 331 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 332 } 333 334 int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 335 { 336 clock->m = i9xx_dpll_compute_m(clock); 337 clock->p = clock->p1 * clock->p2; 338 339 clock->vco = clock->n + 2 == 0 ? 0 : 340 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 341 clock->dot = clock->p == 0 ? 0 : 342 DIV_ROUND_CLOSEST(clock->vco, clock->p); 343 344 return clock->dot; 345 } 346 347 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 348 { 349 clock->m = clock->m1 * clock->m2; 350 clock->p = clock->p1 * clock->p2 * 5; 351 352 clock->vco = clock->n == 0 ? 0 : 353 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 354 clock->dot = clock->p == 0 ? 0 : 355 DIV_ROUND_CLOSEST(clock->vco, clock->p); 356 357 return clock->dot; 358 } 359 360 int chv_calc_dpll_params(int refclk, struct dpll *clock) 361 { 362 clock->m = clock->m1 * clock->m2; 363 clock->p = clock->p1 * clock->p2 * 5; 364 365 clock->vco = clock->n == 0 ? 0 : 366 DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22); 367 clock->dot = clock->p == 0 ? 0 : 368 DIV_ROUND_CLOSEST(clock->vco, clock->p); 369 370 return clock->dot; 371 } 372 373 static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state) 374 { 375 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 376 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 377 378 if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 379 return i915->display.vbt.lvds_ssc_freq; 380 else if (HAS_PCH_SPLIT(i915)) 381 return 120000; 382 else if (DISPLAY_VER(i915) != 2) 383 return 96000; 384 else 385 return 48000; 386 } 387 388 void i9xx_dpll_get_hw_state(struct intel_crtc *crtc, 389 struct intel_dpll_hw_state *dpll_hw_state) 390 { 391 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 392 struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx; 393 394 if (DISPLAY_VER(dev_priv) >= 4) { 395 u32 tmp; 396 397 /* No way to read it out on pipes B and C */ 398 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 399 tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe]; 400 else 401 tmp = intel_de_read(dev_priv, 402 DPLL_MD(dev_priv, crtc->pipe)); 403 404 hw_state->dpll_md = tmp; 405 } 406 407 hw_state->dpll = intel_de_read(dev_priv, DPLL(dev_priv, crtc->pipe)); 408 409 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 410 hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe)); 411 hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe)); 412 } else { 413 /* Mask out read-only status bits. */ 414 hw_state->dpll &= ~(DPLL_LOCK_VLV | 415 DPLL_PORTC_READY_MASK | 416 DPLL_PORTB_READY_MASK); 417 } 418 } 419 420 /* Returns the clock of the currently programmed mode of the given pipe. */ 421 void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state) 422 { 423 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 424 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 425 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 426 u32 dpll = hw_state->dpll; 427 u32 fp; 428 struct dpll clock; 429 int port_clock; 430 int refclk = i9xx_pll_refclk(crtc_state); 431 432 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 433 fp = hw_state->fp0; 434 else 435 fp = hw_state->fp1; 436 437 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 438 if (IS_PINEVIEW(dev_priv)) { 439 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 440 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 441 } else { 442 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 443 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 444 } 445 446 if (DISPLAY_VER(dev_priv) != 2) { 447 if (IS_PINEVIEW(dev_priv)) 448 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 449 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 450 else 451 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 452 DPLL_FPA01_P1_POST_DIV_SHIFT); 453 454 switch (dpll & DPLL_MODE_MASK) { 455 case DPLLB_MODE_DAC_SERIAL: 456 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 457 5 : 10; 458 break; 459 case DPLLB_MODE_LVDS: 460 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 461 7 : 14; 462 break; 463 default: 464 drm_dbg_kms(&dev_priv->drm, 465 "Unknown DPLL mode %08x in programmed " 466 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 467 return; 468 } 469 470 if (IS_PINEVIEW(dev_priv)) 471 port_clock = pnv_calc_dpll_params(refclk, &clock); 472 else 473 port_clock = i9xx_calc_dpll_params(refclk, &clock); 474 } else { 475 enum pipe lvds_pipe; 476 477 if (IS_I85X(dev_priv) && 478 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && 479 lvds_pipe == crtc->pipe) { 480 u32 lvds = intel_de_read(dev_priv, LVDS); 481 482 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 483 DPLL_FPA01_P1_POST_DIV_SHIFT); 484 485 if (lvds & LVDS_CLKB_POWER_UP) 486 clock.p2 = 7; 487 else 488 clock.p2 = 14; 489 } else { 490 if (dpll & PLL_P1_DIVIDE_BY_TWO) 491 clock.p1 = 2; 492 else { 493 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 494 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 495 } 496 if (dpll & PLL_P2_DIVIDE_BY_4) 497 clock.p2 = 4; 498 else 499 clock.p2 = 2; 500 } 501 502 port_clock = i9xx_calc_dpll_params(refclk, &clock); 503 } 504 505 /* 506 * This value includes pixel_multiplier. We will use 507 * port_clock to compute adjusted_mode.crtc_clock in the 508 * encoder's get_config() function. 509 */ 510 crtc_state->port_clock = port_clock; 511 } 512 513 void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state) 514 { 515 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 517 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); 518 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 519 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 520 int refclk = 100000; 521 struct dpll clock; 522 u32 tmp; 523 524 /* In case of DSI, DPLL will not be used */ 525 if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0) 526 return; 527 528 vlv_dpio_get(dev_priv); 529 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch)); 530 vlv_dpio_put(dev_priv); 531 532 clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp); 533 clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp); 534 clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp); 535 clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp); 536 clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp); 537 538 crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock); 539 } 540 541 void chv_crtc_clock_get(struct intel_crtc_state *crtc_state) 542 { 543 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 544 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 545 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); 546 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 547 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 548 struct dpll clock; 549 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 550 int refclk = 100000; 551 552 /* In case of DSI, DPLL will not be used */ 553 if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0) 554 return; 555 556 vlv_dpio_get(dev_priv); 557 cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch)); 558 pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch)); 559 pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch)); 560 pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch)); 561 pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch)); 562 vlv_dpio_put(dev_priv); 563 564 clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 565 clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22; 566 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 567 clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2); 568 clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1); 569 clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13); 570 clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13); 571 572 crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock); 573 } 574 575 /* 576 * Returns whether the given set of divisors are valid for a given refclk with 577 * the given connectors. 578 */ 579 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, 580 const struct intel_limit *limit, 581 const struct dpll *clock) 582 { 583 if (clock->n < limit->n.min || limit->n.max < clock->n) 584 return false; 585 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 586 return false; 587 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 588 return false; 589 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 590 return false; 591 592 if (!IS_PINEVIEW(dev_priv) && 593 !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 594 !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) 595 if (clock->m1 <= clock->m2) 596 return false; 597 598 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 599 !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) { 600 if (clock->p < limit->p.min || limit->p.max < clock->p) 601 return false; 602 if (clock->m < limit->m.min || limit->m.max < clock->m) 603 return false; 604 } 605 606 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 607 return false; 608 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 609 * connector, etc., rather than just a single range. 610 */ 611 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 612 return false; 613 614 return true; 615 } 616 617 static int 618 i9xx_select_p2_div(const struct intel_limit *limit, 619 const struct intel_crtc_state *crtc_state, 620 int target) 621 { 622 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 623 624 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 625 /* 626 * For LVDS just rely on its current settings for dual-channel. 627 * We haven't figured out how to reliably set up different 628 * single/dual channel state, if we even can. 629 */ 630 if (intel_is_dual_link_lvds(dev_priv)) 631 return limit->p2.p2_fast; 632 else 633 return limit->p2.p2_slow; 634 } else { 635 if (target < limit->p2.dot_limit) 636 return limit->p2.p2_slow; 637 else 638 return limit->p2.p2_fast; 639 } 640 } 641 642 /* 643 * Returns a set of divisors for the desired target clock with the given 644 * refclk, or FALSE. 645 * 646 * Target and reference clocks are specified in kHz. 647 * 648 * If match_clock is provided, then best_clock P divider must match the P 649 * divider from @match_clock used for LVDS downclocking. 650 */ 651 static bool 652 i9xx_find_best_dpll(const struct intel_limit *limit, 653 struct intel_crtc_state *crtc_state, 654 int target, int refclk, 655 const struct dpll *match_clock, 656 struct dpll *best_clock) 657 { 658 struct drm_device *dev = crtc_state->uapi.crtc->dev; 659 struct dpll clock; 660 int err = target; 661 662 memset(best_clock, 0, sizeof(*best_clock)); 663 664 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 665 666 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 667 clock.m1++) { 668 for (clock.m2 = limit->m2.min; 669 clock.m2 <= limit->m2.max; clock.m2++) { 670 if (clock.m2 >= clock.m1) 671 break; 672 for (clock.n = limit->n.min; 673 clock.n <= limit->n.max; clock.n++) { 674 for (clock.p1 = limit->p1.min; 675 clock.p1 <= limit->p1.max; clock.p1++) { 676 int this_err; 677 678 i9xx_calc_dpll_params(refclk, &clock); 679 if (!intel_pll_is_valid(to_i915(dev), 680 limit, 681 &clock)) 682 continue; 683 if (match_clock && 684 clock.p != match_clock->p) 685 continue; 686 687 this_err = abs(clock.dot - target); 688 if (this_err < err) { 689 *best_clock = clock; 690 err = this_err; 691 } 692 } 693 } 694 } 695 } 696 697 return (err != target); 698 } 699 700 /* 701 * Returns a set of divisors for the desired target clock with the given 702 * refclk, or FALSE. 703 * 704 * Target and reference clocks are specified in kHz. 705 * 706 * If match_clock is provided, then best_clock P divider must match the P 707 * divider from @match_clock used for LVDS downclocking. 708 */ 709 static bool 710 pnv_find_best_dpll(const struct intel_limit *limit, 711 struct intel_crtc_state *crtc_state, 712 int target, int refclk, 713 const struct dpll *match_clock, 714 struct dpll *best_clock) 715 { 716 struct drm_device *dev = crtc_state->uapi.crtc->dev; 717 struct dpll clock; 718 int err = target; 719 720 memset(best_clock, 0, sizeof(*best_clock)); 721 722 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 723 724 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 725 clock.m1++) { 726 for (clock.m2 = limit->m2.min; 727 clock.m2 <= limit->m2.max; clock.m2++) { 728 for (clock.n = limit->n.min; 729 clock.n <= limit->n.max; clock.n++) { 730 for (clock.p1 = limit->p1.min; 731 clock.p1 <= limit->p1.max; clock.p1++) { 732 int this_err; 733 734 pnv_calc_dpll_params(refclk, &clock); 735 if (!intel_pll_is_valid(to_i915(dev), 736 limit, 737 &clock)) 738 continue; 739 if (match_clock && 740 clock.p != match_clock->p) 741 continue; 742 743 this_err = abs(clock.dot - target); 744 if (this_err < err) { 745 *best_clock = clock; 746 err = this_err; 747 } 748 } 749 } 750 } 751 } 752 753 return (err != target); 754 } 755 756 /* 757 * Returns a set of divisors for the desired target clock with the given 758 * refclk, or FALSE. 759 * 760 * Target and reference clocks are specified in kHz. 761 * 762 * If match_clock is provided, then best_clock P divider must match the P 763 * divider from @match_clock used for LVDS downclocking. 764 */ 765 static bool 766 g4x_find_best_dpll(const struct intel_limit *limit, 767 struct intel_crtc_state *crtc_state, 768 int target, int refclk, 769 const struct dpll *match_clock, 770 struct dpll *best_clock) 771 { 772 struct drm_device *dev = crtc_state->uapi.crtc->dev; 773 struct dpll clock; 774 int max_n; 775 bool found = false; 776 /* approximately equals target * 0.00585 */ 777 int err_most = (target >> 8) + (target >> 9); 778 779 memset(best_clock, 0, sizeof(*best_clock)); 780 781 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 782 783 max_n = limit->n.max; 784 /* based on hardware requirement, prefer smaller n to precision */ 785 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 786 /* based on hardware requirement, prefer larger m1,m2 */ 787 for (clock.m1 = limit->m1.max; 788 clock.m1 >= limit->m1.min; clock.m1--) { 789 for (clock.m2 = limit->m2.max; 790 clock.m2 >= limit->m2.min; clock.m2--) { 791 for (clock.p1 = limit->p1.max; 792 clock.p1 >= limit->p1.min; clock.p1--) { 793 int this_err; 794 795 i9xx_calc_dpll_params(refclk, &clock); 796 if (!intel_pll_is_valid(to_i915(dev), 797 limit, 798 &clock)) 799 continue; 800 801 this_err = abs(clock.dot - target); 802 if (this_err < err_most) { 803 *best_clock = clock; 804 err_most = this_err; 805 max_n = clock.n; 806 found = true; 807 } 808 } 809 } 810 } 811 } 812 return found; 813 } 814 815 /* 816 * Check if the calculated PLL configuration is more optimal compared to the 817 * best configuration and error found so far. Return the calculated error. 818 */ 819 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 820 const struct dpll *calculated_clock, 821 const struct dpll *best_clock, 822 unsigned int best_error_ppm, 823 unsigned int *error_ppm) 824 { 825 /* 826 * For CHV ignore the error and consider only the P value. 827 * Prefer a bigger P value based on HW requirements. 828 */ 829 if (IS_CHERRYVIEW(to_i915(dev))) { 830 *error_ppm = 0; 831 832 return calculated_clock->p > best_clock->p; 833 } 834 835 if (drm_WARN_ON_ONCE(dev, !target_freq)) 836 return false; 837 838 *error_ppm = div_u64(1000000ULL * 839 abs(target_freq - calculated_clock->dot), 840 target_freq); 841 /* 842 * Prefer a better P value over a better (smaller) error if the error 843 * is small. Ensure this preference for future configurations too by 844 * setting the error to 0. 845 */ 846 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 847 *error_ppm = 0; 848 849 return true; 850 } 851 852 return *error_ppm + 10 < best_error_ppm; 853 } 854 855 /* 856 * Returns a set of divisors for the desired target clock with the given 857 * refclk, or FALSE. 858 */ 859 static bool 860 vlv_find_best_dpll(const struct intel_limit *limit, 861 struct intel_crtc_state *crtc_state, 862 int target, int refclk, 863 const struct dpll *match_clock, 864 struct dpll *best_clock) 865 { 866 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 867 struct drm_device *dev = crtc->base.dev; 868 struct dpll clock; 869 unsigned int bestppm = 1000000; 870 /* min update 19.2 MHz */ 871 int max_n = min(limit->n.max, refclk / 19200); 872 bool found = false; 873 874 memset(best_clock, 0, sizeof(*best_clock)); 875 876 /* based on hardware requirement, prefer smaller n to precision */ 877 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 878 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 879 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 880 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 881 clock.p = clock.p1 * clock.p2 * 5; 882 /* based on hardware requirement, prefer bigger m1,m2 values */ 883 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 884 unsigned int ppm; 885 886 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 887 refclk * clock.m1); 888 889 vlv_calc_dpll_params(refclk, &clock); 890 891 if (!intel_pll_is_valid(to_i915(dev), 892 limit, 893 &clock)) 894 continue; 895 896 if (!vlv_PLL_is_optimal(dev, target, 897 &clock, 898 best_clock, 899 bestppm, &ppm)) 900 continue; 901 902 *best_clock = clock; 903 bestppm = ppm; 904 found = true; 905 } 906 } 907 } 908 } 909 910 return found; 911 } 912 913 /* 914 * Returns a set of divisors for the desired target clock with the given 915 * refclk, or FALSE. 916 */ 917 static bool 918 chv_find_best_dpll(const struct intel_limit *limit, 919 struct intel_crtc_state *crtc_state, 920 int target, int refclk, 921 const struct dpll *match_clock, 922 struct dpll *best_clock) 923 { 924 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 925 struct drm_device *dev = crtc->base.dev; 926 unsigned int best_error_ppm; 927 struct dpll clock; 928 u64 m2; 929 int found = false; 930 931 memset(best_clock, 0, sizeof(*best_clock)); 932 best_error_ppm = 1000000; 933 934 /* 935 * Based on hardware doc, the n always set to 1, and m1 always 936 * set to 2. If requires to support 200Mhz refclk, we need to 937 * revisit this because n may not 1 anymore. 938 */ 939 clock.n = 1; 940 clock.m1 = 2; 941 942 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 943 for (clock.p2 = limit->p2.p2_fast; 944 clock.p2 >= limit->p2.p2_slow; 945 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 946 unsigned int error_ppm; 947 948 clock.p = clock.p1 * clock.p2 * 5; 949 950 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 951 refclk * clock.m1); 952 953 if (m2 > INT_MAX/clock.m1) 954 continue; 955 956 clock.m2 = m2; 957 958 chv_calc_dpll_params(refclk, &clock); 959 960 if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) 961 continue; 962 963 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 964 best_error_ppm, &error_ppm)) 965 continue; 966 967 *best_clock = clock; 968 best_error_ppm = error_ppm; 969 found = true; 970 } 971 } 972 973 return found; 974 } 975 976 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 977 struct dpll *best_clock) 978 { 979 const struct intel_limit *limit = &intel_limits_bxt; 980 int refclk = 100000; 981 982 return chv_find_best_dpll(limit, crtc_state, 983 crtc_state->port_clock, refclk, 984 NULL, best_clock); 985 } 986 987 u32 i9xx_dpll_compute_fp(const struct dpll *dpll) 988 { 989 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 990 } 991 992 static u32 pnv_dpll_compute_fp(const struct dpll *dpll) 993 { 994 return (1 << dpll->n) << 16 | dpll->m2; 995 } 996 997 static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state) 998 { 999 return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 1000 } 1001 1002 static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, 1003 const struct dpll *clock, 1004 const struct dpll *reduced_clock) 1005 { 1006 struct intel_display *display = to_intel_display(crtc_state); 1007 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1008 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1009 u32 dpll; 1010 1011 dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS; 1012 1013 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 1014 dpll |= DPLLB_MODE_LVDS; 1015 else 1016 dpll |= DPLLB_MODE_DAC_SERIAL; 1017 1018 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 1019 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 1020 dpll |= (crtc_state->pixel_multiplier - 1) 1021 << SDVO_MULTIPLIER_SHIFT_HIRES; 1022 } 1023 1024 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 1025 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1026 dpll |= DPLL_SDVO_HIGH_SPEED; 1027 1028 if (intel_crtc_has_dp_encoder(crtc_state)) 1029 dpll |= DPLL_SDVO_HIGH_SPEED; 1030 1031 /* compute bitmask from p1 value */ 1032 if (IS_G4X(dev_priv)) { 1033 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1034 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 1035 } else if (IS_PINEVIEW(dev_priv)) { 1036 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 1037 WARN_ON(reduced_clock->p1 != clock->p1); 1038 } else { 1039 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1040 WARN_ON(reduced_clock->p1 != clock->p1); 1041 } 1042 1043 switch (clock->p2) { 1044 case 5: 1045 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1046 break; 1047 case 7: 1048 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 1049 break; 1050 case 10: 1051 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 1052 break; 1053 case 14: 1054 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 1055 break; 1056 } 1057 WARN_ON(reduced_clock->p2 != clock->p2); 1058 1059 if (DISPLAY_VER(dev_priv) >= 4) 1060 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 1061 1062 if (crtc_state->sdvo_tv_clock) 1063 dpll |= PLL_REF_INPUT_TVCLKINBC; 1064 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 1065 intel_panel_use_ssc(display)) 1066 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 1067 else 1068 dpll |= PLL_REF_INPUT_DREFCLK; 1069 1070 return dpll; 1071 } 1072 1073 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state, 1074 const struct dpll *clock, 1075 const struct dpll *reduced_clock) 1076 { 1077 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1078 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1079 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1080 1081 if (IS_PINEVIEW(dev_priv)) { 1082 hw_state->fp0 = pnv_dpll_compute_fp(clock); 1083 hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock); 1084 } else { 1085 hw_state->fp0 = i9xx_dpll_compute_fp(clock); 1086 hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock); 1087 } 1088 1089 hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock); 1090 1091 if (DISPLAY_VER(dev_priv) >= 4) 1092 hw_state->dpll_md = i965_dpll_md(crtc_state); 1093 } 1094 1095 static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state, 1096 const struct dpll *clock, 1097 const struct dpll *reduced_clock) 1098 { 1099 struct intel_display *display = to_intel_display(crtc_state); 1100 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1101 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1102 u32 dpll; 1103 1104 dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS; 1105 1106 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1107 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1108 } else { 1109 if (clock->p1 == 2) 1110 dpll |= PLL_P1_DIVIDE_BY_TWO; 1111 else 1112 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1113 if (clock->p2 == 4) 1114 dpll |= PLL_P2_DIVIDE_BY_4; 1115 } 1116 WARN_ON(reduced_clock->p1 != clock->p1); 1117 WARN_ON(reduced_clock->p2 != clock->p2); 1118 1119 /* 1120 * Bspec: 1121 * "[Almador Errata}: For the correct operation of the muxed DVO pins 1122 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 1123 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 1124 * Enable) must be set to “1” in both the DPLL A Control Register 1125 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 1126 * 1127 * For simplicity We simply keep both bits always enabled in 1128 * both DPLLS. The spec says we should disable the DVO 2X clock 1129 * when not needed, but this seems to work fine in practice. 1130 */ 1131 if (IS_I830(dev_priv) || 1132 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 1133 dpll |= DPLL_DVO_2X_MODE; 1134 1135 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 1136 intel_panel_use_ssc(display)) 1137 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 1138 else 1139 dpll |= PLL_REF_INPUT_DREFCLK; 1140 1141 return dpll; 1142 } 1143 1144 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, 1145 const struct dpll *clock, 1146 const struct dpll *reduced_clock) 1147 { 1148 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1149 1150 hw_state->fp0 = i9xx_dpll_compute_fp(clock); 1151 hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock); 1152 1153 hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock); 1154 } 1155 1156 static int hsw_crtc_compute_clock(struct intel_atomic_state *state, 1157 struct intel_crtc *crtc) 1158 { 1159 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1160 struct intel_crtc_state *crtc_state = 1161 intel_atomic_get_new_crtc_state(state, crtc); 1162 struct intel_encoder *encoder = 1163 intel_get_crtc_new_encoder(state, crtc_state); 1164 int ret; 1165 1166 if (DISPLAY_VER(dev_priv) < 11 && 1167 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1168 return 0; 1169 1170 ret = intel_compute_shared_dplls(state, crtc, encoder); 1171 if (ret) 1172 return ret; 1173 1174 /* FIXME this is a mess */ 1175 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1176 return 0; 1177 1178 /* CRT dotclock is determined via other means */ 1179 if (!crtc_state->has_pch_encoder) 1180 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1181 1182 return 0; 1183 } 1184 1185 static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state, 1186 struct intel_crtc *crtc) 1187 { 1188 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1189 struct intel_crtc_state *crtc_state = 1190 intel_atomic_get_new_crtc_state(state, crtc); 1191 struct intel_encoder *encoder = 1192 intel_get_crtc_new_encoder(state, crtc_state); 1193 1194 if (DISPLAY_VER(dev_priv) < 11 && 1195 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1196 return 0; 1197 1198 return intel_reserve_shared_dplls(state, crtc, encoder); 1199 } 1200 1201 static int dg2_crtc_compute_clock(struct intel_atomic_state *state, 1202 struct intel_crtc *crtc) 1203 { 1204 struct intel_crtc_state *crtc_state = 1205 intel_atomic_get_new_crtc_state(state, crtc); 1206 struct intel_encoder *encoder = 1207 intel_get_crtc_new_encoder(state, crtc_state); 1208 int ret; 1209 1210 ret = intel_mpllb_calc_state(crtc_state, encoder); 1211 if (ret) 1212 return ret; 1213 1214 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1215 1216 return 0; 1217 } 1218 1219 static int mtl_crtc_compute_clock(struct intel_atomic_state *state, 1220 struct intel_crtc *crtc) 1221 { 1222 struct intel_crtc_state *crtc_state = 1223 intel_atomic_get_new_crtc_state(state, crtc); 1224 struct intel_encoder *encoder = 1225 intel_get_crtc_new_encoder(state, crtc_state); 1226 int ret; 1227 1228 ret = intel_cx0pll_calc_state(crtc_state, encoder); 1229 if (ret) 1230 return ret; 1231 1232 /* TODO: Do the readback via intel_compute_shared_dplls() */ 1233 crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll); 1234 1235 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1236 1237 return 0; 1238 } 1239 1240 static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state) 1241 { 1242 struct intel_display *display = to_intel_display(crtc_state); 1243 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1244 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1245 1246 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 1247 ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) || 1248 (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915)))) 1249 return 25; 1250 1251 if (crtc_state->sdvo_tv_clock) 1252 return 20; 1253 1254 return 21; 1255 } 1256 1257 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor) 1258 { 1259 return dpll->m < factor * dpll->n; 1260 } 1261 1262 static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor) 1263 { 1264 u32 fp; 1265 1266 fp = i9xx_dpll_compute_fp(clock); 1267 if (ilk_needs_fb_cb_tune(clock, factor)) 1268 fp |= FP_CB_TUNE; 1269 1270 return fp; 1271 } 1272 1273 static u32 ilk_dpll(const struct intel_crtc_state *crtc_state, 1274 const struct dpll *clock, 1275 const struct dpll *reduced_clock) 1276 { 1277 struct intel_display *display = to_intel_display(crtc_state); 1278 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1279 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1280 u32 dpll; 1281 1282 dpll = DPLL_VCO_ENABLE; 1283 1284 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 1285 dpll |= DPLLB_MODE_LVDS; 1286 else 1287 dpll |= DPLLB_MODE_DAC_SERIAL; 1288 1289 dpll |= (crtc_state->pixel_multiplier - 1) 1290 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 1291 1292 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 1293 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1294 dpll |= DPLL_SDVO_HIGH_SPEED; 1295 1296 if (intel_crtc_has_dp_encoder(crtc_state)) 1297 dpll |= DPLL_SDVO_HIGH_SPEED; 1298 1299 /* 1300 * The high speed IO clock is only really required for 1301 * SDVO/HDMI/DP, but we also enable it for CRT to make it 1302 * possible to share the DPLL between CRT and HDMI. Enabling 1303 * the clock needlessly does no real harm, except use up a 1304 * bit of power potentially. 1305 * 1306 * We'll limit this to IVB with 3 pipes, since it has only two 1307 * DPLLs and so DPLL sharing is the only way to get three pipes 1308 * driving PCH ports at the same time. On SNB we could do this, 1309 * and potentially avoid enabling the second DPLL, but it's not 1310 * clear if it''s a win or loss power wise. No point in doing 1311 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 1312 */ 1313 if (INTEL_NUM_PIPES(dev_priv) == 3 && 1314 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 1315 dpll |= DPLL_SDVO_HIGH_SPEED; 1316 1317 /* compute bitmask from p1 value */ 1318 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1319 /* also FPA1 */ 1320 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 1321 1322 switch (clock->p2) { 1323 case 5: 1324 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1325 break; 1326 case 7: 1327 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 1328 break; 1329 case 10: 1330 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 1331 break; 1332 case 14: 1333 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 1334 break; 1335 } 1336 WARN_ON(reduced_clock->p2 != clock->p2); 1337 1338 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 1339 intel_panel_use_ssc(display)) 1340 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 1341 else 1342 dpll |= PLL_REF_INPUT_DREFCLK; 1343 1344 return dpll; 1345 } 1346 1347 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, 1348 const struct dpll *clock, 1349 const struct dpll *reduced_clock) 1350 { 1351 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1352 int factor = ilk_fb_cb_factor(crtc_state); 1353 1354 hw_state->fp0 = ilk_dpll_compute_fp(clock, factor); 1355 hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor); 1356 1357 hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock); 1358 } 1359 1360 static int ilk_crtc_compute_clock(struct intel_atomic_state *state, 1361 struct intel_crtc *crtc) 1362 { 1363 struct intel_display *display = to_intel_display(state); 1364 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1365 struct intel_crtc_state *crtc_state = 1366 intel_atomic_get_new_crtc_state(state, crtc); 1367 const struct intel_limit *limit; 1368 int refclk = 120000; 1369 int ret; 1370 1371 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 1372 if (!crtc_state->has_pch_encoder) 1373 return 0; 1374 1375 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1376 if (intel_panel_use_ssc(display)) { 1377 drm_dbg_kms(&dev_priv->drm, 1378 "using SSC reference clock of %d kHz\n", 1379 dev_priv->display.vbt.lvds_ssc_freq); 1380 refclk = dev_priv->display.vbt.lvds_ssc_freq; 1381 } 1382 1383 if (intel_is_dual_link_lvds(dev_priv)) { 1384 if (refclk == 100000) 1385 limit = &ilk_limits_dual_lvds_100m; 1386 else 1387 limit = &ilk_limits_dual_lvds; 1388 } else { 1389 if (refclk == 100000) 1390 limit = &ilk_limits_single_lvds_100m; 1391 else 1392 limit = &ilk_limits_single_lvds; 1393 } 1394 } else { 1395 limit = &ilk_limits_dac; 1396 } 1397 1398 if (!crtc_state->clock_set && 1399 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1400 refclk, NULL, &crtc_state->dpll)) 1401 return -EINVAL; 1402 1403 i9xx_calc_dpll_params(refclk, &crtc_state->dpll); 1404 1405 ilk_compute_dpll(crtc_state, &crtc_state->dpll, 1406 &crtc_state->dpll); 1407 1408 ret = intel_compute_shared_dplls(state, crtc, NULL); 1409 if (ret) 1410 return ret; 1411 1412 crtc_state->port_clock = crtc_state->dpll.dot; 1413 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1414 1415 return ret; 1416 } 1417 1418 static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state, 1419 struct intel_crtc *crtc) 1420 { 1421 struct intel_crtc_state *crtc_state = 1422 intel_atomic_get_new_crtc_state(state, crtc); 1423 1424 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 1425 if (!crtc_state->has_pch_encoder) 1426 return 0; 1427 1428 return intel_reserve_shared_dplls(state, crtc, NULL); 1429 } 1430 1431 static u32 vlv_dpll(const struct intel_crtc_state *crtc_state) 1432 { 1433 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1434 u32 dpll; 1435 1436 dpll = DPLL_INTEGRATED_REF_CLK_VLV | 1437 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1438 1439 if (crtc->pipe != PIPE_A) 1440 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 1441 1442 /* DPLL not used with DSI, but still need the rest set up */ 1443 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1444 dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV; 1445 1446 return dpll; 1447 } 1448 1449 void vlv_compute_dpll(struct intel_crtc_state *crtc_state) 1450 { 1451 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1452 1453 hw_state->dpll = vlv_dpll(crtc_state); 1454 hw_state->dpll_md = i965_dpll_md(crtc_state); 1455 } 1456 1457 static u32 chv_dpll(const struct intel_crtc_state *crtc_state) 1458 { 1459 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1460 u32 dpll; 1461 1462 dpll = DPLL_SSC_REF_CLK_CHV | 1463 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1464 1465 if (crtc->pipe != PIPE_A) 1466 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 1467 1468 /* DPLL not used with DSI, but still need the rest set up */ 1469 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1470 dpll |= DPLL_VCO_ENABLE; 1471 1472 return dpll; 1473 } 1474 1475 void chv_compute_dpll(struct intel_crtc_state *crtc_state) 1476 { 1477 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1478 1479 hw_state->dpll = chv_dpll(crtc_state); 1480 hw_state->dpll_md = i965_dpll_md(crtc_state); 1481 } 1482 1483 static int chv_crtc_compute_clock(struct intel_atomic_state *state, 1484 struct intel_crtc *crtc) 1485 { 1486 struct intel_crtc_state *crtc_state = 1487 intel_atomic_get_new_crtc_state(state, crtc); 1488 const struct intel_limit *limit = &intel_limits_chv; 1489 int refclk = 100000; 1490 1491 if (!crtc_state->clock_set && 1492 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1493 refclk, NULL, &crtc_state->dpll)) 1494 return -EINVAL; 1495 1496 chv_calc_dpll_params(refclk, &crtc_state->dpll); 1497 1498 chv_compute_dpll(crtc_state); 1499 1500 /* FIXME this is a mess */ 1501 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1502 return 0; 1503 1504 crtc_state->port_clock = crtc_state->dpll.dot; 1505 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1506 1507 return 0; 1508 } 1509 1510 static int vlv_crtc_compute_clock(struct intel_atomic_state *state, 1511 struct intel_crtc *crtc) 1512 { 1513 struct intel_crtc_state *crtc_state = 1514 intel_atomic_get_new_crtc_state(state, crtc); 1515 const struct intel_limit *limit = &intel_limits_vlv; 1516 int refclk = 100000; 1517 1518 if (!crtc_state->clock_set && 1519 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1520 refclk, NULL, &crtc_state->dpll)) 1521 return -EINVAL; 1522 1523 vlv_calc_dpll_params(refclk, &crtc_state->dpll); 1524 1525 vlv_compute_dpll(crtc_state); 1526 1527 /* FIXME this is a mess */ 1528 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1529 return 0; 1530 1531 crtc_state->port_clock = crtc_state->dpll.dot; 1532 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1533 1534 return 0; 1535 } 1536 1537 static int g4x_crtc_compute_clock(struct intel_atomic_state *state, 1538 struct intel_crtc *crtc) 1539 { 1540 struct intel_display *display = to_intel_display(state); 1541 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1542 struct intel_crtc_state *crtc_state = 1543 intel_atomic_get_new_crtc_state(state, crtc); 1544 const struct intel_limit *limit; 1545 int refclk = 96000; 1546 1547 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1548 if (intel_panel_use_ssc(display)) { 1549 refclk = dev_priv->display.vbt.lvds_ssc_freq; 1550 drm_dbg_kms(&dev_priv->drm, 1551 "using SSC reference clock of %d kHz\n", 1552 refclk); 1553 } 1554 1555 if (intel_is_dual_link_lvds(dev_priv)) 1556 limit = &intel_limits_g4x_dual_channel_lvds; 1557 else 1558 limit = &intel_limits_g4x_single_channel_lvds; 1559 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 1560 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 1561 limit = &intel_limits_g4x_hdmi; 1562 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 1563 limit = &intel_limits_g4x_sdvo; 1564 } else { 1565 /* The option is for other outputs */ 1566 limit = &intel_limits_i9xx_sdvo; 1567 } 1568 1569 if (!crtc_state->clock_set && 1570 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1571 refclk, NULL, &crtc_state->dpll)) 1572 return -EINVAL; 1573 1574 i9xx_calc_dpll_params(refclk, &crtc_state->dpll); 1575 1576 i9xx_compute_dpll(crtc_state, &crtc_state->dpll, 1577 &crtc_state->dpll); 1578 1579 crtc_state->port_clock = crtc_state->dpll.dot; 1580 /* FIXME this is a mess */ 1581 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT)) 1582 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1583 1584 return 0; 1585 } 1586 1587 static int pnv_crtc_compute_clock(struct intel_atomic_state *state, 1588 struct intel_crtc *crtc) 1589 { 1590 struct intel_display *display = to_intel_display(state); 1591 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1592 struct intel_crtc_state *crtc_state = 1593 intel_atomic_get_new_crtc_state(state, crtc); 1594 const struct intel_limit *limit; 1595 int refclk = 96000; 1596 1597 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1598 if (intel_panel_use_ssc(display)) { 1599 refclk = dev_priv->display.vbt.lvds_ssc_freq; 1600 drm_dbg_kms(&dev_priv->drm, 1601 "using SSC reference clock of %d kHz\n", 1602 refclk); 1603 } 1604 1605 limit = &pnv_limits_lvds; 1606 } else { 1607 limit = &pnv_limits_sdvo; 1608 } 1609 1610 if (!crtc_state->clock_set && 1611 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1612 refclk, NULL, &crtc_state->dpll)) 1613 return -EINVAL; 1614 1615 pnv_calc_dpll_params(refclk, &crtc_state->dpll); 1616 1617 i9xx_compute_dpll(crtc_state, &crtc_state->dpll, 1618 &crtc_state->dpll); 1619 1620 crtc_state->port_clock = crtc_state->dpll.dot; 1621 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1622 1623 return 0; 1624 } 1625 1626 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, 1627 struct intel_crtc *crtc) 1628 { 1629 struct intel_display *display = to_intel_display(state); 1630 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1631 struct intel_crtc_state *crtc_state = 1632 intel_atomic_get_new_crtc_state(state, crtc); 1633 const struct intel_limit *limit; 1634 int refclk = 96000; 1635 1636 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1637 if (intel_panel_use_ssc(display)) { 1638 refclk = dev_priv->display.vbt.lvds_ssc_freq; 1639 drm_dbg_kms(&dev_priv->drm, 1640 "using SSC reference clock of %d kHz\n", 1641 refclk); 1642 } 1643 1644 limit = &intel_limits_i9xx_lvds; 1645 } else { 1646 limit = &intel_limits_i9xx_sdvo; 1647 } 1648 1649 if (!crtc_state->clock_set && 1650 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1651 refclk, NULL, &crtc_state->dpll)) 1652 return -EINVAL; 1653 1654 i9xx_calc_dpll_params(refclk, &crtc_state->dpll); 1655 1656 i9xx_compute_dpll(crtc_state, &crtc_state->dpll, 1657 &crtc_state->dpll); 1658 1659 crtc_state->port_clock = crtc_state->dpll.dot; 1660 /* FIXME this is a mess */ 1661 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT)) 1662 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1663 1664 return 0; 1665 } 1666 1667 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, 1668 struct intel_crtc *crtc) 1669 { 1670 struct intel_display *display = to_intel_display(state); 1671 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1672 struct intel_crtc_state *crtc_state = 1673 intel_atomic_get_new_crtc_state(state, crtc); 1674 const struct intel_limit *limit; 1675 int refclk = 48000; 1676 1677 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1678 if (intel_panel_use_ssc(display)) { 1679 refclk = dev_priv->display.vbt.lvds_ssc_freq; 1680 drm_dbg_kms(&dev_priv->drm, 1681 "using SSC reference clock of %d kHz\n", 1682 refclk); 1683 } 1684 1685 limit = &intel_limits_i8xx_lvds; 1686 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 1687 limit = &intel_limits_i8xx_dvo; 1688 } else { 1689 limit = &intel_limits_i8xx_dac; 1690 } 1691 1692 if (!crtc_state->clock_set && 1693 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1694 refclk, NULL, &crtc_state->dpll)) 1695 return -EINVAL; 1696 1697 i9xx_calc_dpll_params(refclk, &crtc_state->dpll); 1698 1699 i8xx_compute_dpll(crtc_state, &crtc_state->dpll, 1700 &crtc_state->dpll); 1701 1702 crtc_state->port_clock = crtc_state->dpll.dot; 1703 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1704 1705 return 0; 1706 } 1707 1708 static const struct intel_dpll_funcs mtl_dpll_funcs = { 1709 .crtc_compute_clock = mtl_crtc_compute_clock, 1710 }; 1711 1712 static const struct intel_dpll_funcs dg2_dpll_funcs = { 1713 .crtc_compute_clock = dg2_crtc_compute_clock, 1714 }; 1715 1716 static const struct intel_dpll_funcs hsw_dpll_funcs = { 1717 .crtc_compute_clock = hsw_crtc_compute_clock, 1718 .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll, 1719 }; 1720 1721 static const struct intel_dpll_funcs ilk_dpll_funcs = { 1722 .crtc_compute_clock = ilk_crtc_compute_clock, 1723 .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll, 1724 }; 1725 1726 static const struct intel_dpll_funcs chv_dpll_funcs = { 1727 .crtc_compute_clock = chv_crtc_compute_clock, 1728 }; 1729 1730 static const struct intel_dpll_funcs vlv_dpll_funcs = { 1731 .crtc_compute_clock = vlv_crtc_compute_clock, 1732 }; 1733 1734 static const struct intel_dpll_funcs g4x_dpll_funcs = { 1735 .crtc_compute_clock = g4x_crtc_compute_clock, 1736 }; 1737 1738 static const struct intel_dpll_funcs pnv_dpll_funcs = { 1739 .crtc_compute_clock = pnv_crtc_compute_clock, 1740 }; 1741 1742 static const struct intel_dpll_funcs i9xx_dpll_funcs = { 1743 .crtc_compute_clock = i9xx_crtc_compute_clock, 1744 }; 1745 1746 static const struct intel_dpll_funcs i8xx_dpll_funcs = { 1747 .crtc_compute_clock = i8xx_crtc_compute_clock, 1748 }; 1749 1750 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, 1751 struct intel_crtc *crtc) 1752 { 1753 struct drm_i915_private *i915 = to_i915(state->base.dev); 1754 struct intel_crtc_state *crtc_state = 1755 intel_atomic_get_new_crtc_state(state, crtc); 1756 int ret; 1757 1758 drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); 1759 1760 memset(&crtc_state->dpll_hw_state, 0, 1761 sizeof(crtc_state->dpll_hw_state)); 1762 1763 if (!crtc_state->hw.enable) 1764 return 0; 1765 1766 ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc); 1767 if (ret) { 1768 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n", 1769 crtc->base.base.id, crtc->base.name); 1770 return ret; 1771 } 1772 1773 return 0; 1774 } 1775 1776 int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, 1777 struct intel_crtc *crtc) 1778 { 1779 struct drm_i915_private *i915 = to_i915(state->base.dev); 1780 struct intel_crtc_state *crtc_state = 1781 intel_atomic_get_new_crtc_state(state, crtc); 1782 int ret; 1783 1784 drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); 1785 drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll); 1786 1787 if (!crtc_state->hw.enable || crtc_state->shared_dpll) 1788 return 0; 1789 1790 if (!i915->display.funcs.dpll->crtc_get_shared_dpll) 1791 return 0; 1792 1793 ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc); 1794 if (ret) { 1795 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n", 1796 crtc->base.base.id, crtc->base.name); 1797 return ret; 1798 } 1799 1800 return 0; 1801 } 1802 1803 void 1804 intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) 1805 { 1806 if (DISPLAY_VER(dev_priv) >= 14) 1807 dev_priv->display.funcs.dpll = &mtl_dpll_funcs; 1808 else if (IS_DG2(dev_priv)) 1809 dev_priv->display.funcs.dpll = &dg2_dpll_funcs; 1810 else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) 1811 dev_priv->display.funcs.dpll = &hsw_dpll_funcs; 1812 else if (HAS_PCH_SPLIT(dev_priv)) 1813 dev_priv->display.funcs.dpll = &ilk_dpll_funcs; 1814 else if (IS_CHERRYVIEW(dev_priv)) 1815 dev_priv->display.funcs.dpll = &chv_dpll_funcs; 1816 else if (IS_VALLEYVIEW(dev_priv)) 1817 dev_priv->display.funcs.dpll = &vlv_dpll_funcs; 1818 else if (IS_G4X(dev_priv)) 1819 dev_priv->display.funcs.dpll = &g4x_dpll_funcs; 1820 else if (IS_PINEVIEW(dev_priv)) 1821 dev_priv->display.funcs.dpll = &pnv_dpll_funcs; 1822 else if (DISPLAY_VER(dev_priv) != 2) 1823 dev_priv->display.funcs.dpll = &i9xx_dpll_funcs; 1824 else 1825 dev_priv->display.funcs.dpll = &i8xx_dpll_funcs; 1826 } 1827 1828 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1829 { 1830 if (IS_I830(dev_priv)) 1831 return false; 1832 1833 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1834 } 1835 1836 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) 1837 { 1838 struct intel_display *display = to_intel_display(crtc_state); 1839 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1840 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1841 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1842 enum pipe pipe = crtc->pipe; 1843 int i; 1844 1845 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 1846 1847 /* PLL is protected by panel, make sure we can write it */ 1848 if (i9xx_has_pps(dev_priv)) 1849 assert_pps_unlocked(display, pipe); 1850 1851 intel_de_write(dev_priv, FP0(pipe), hw_state->fp0); 1852 intel_de_write(dev_priv, FP1(pipe), hw_state->fp1); 1853 1854 /* 1855 * Apparently we need to have VGA mode enabled prior to changing 1856 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1857 * dividers, even though the register value does change. 1858 */ 1859 intel_de_write(dev_priv, DPLL(dev_priv, pipe), 1860 hw_state->dpll & ~DPLL_VGA_MODE_DIS); 1861 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); 1862 1863 /* Wait for the clocks to stabilize. */ 1864 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 1865 udelay(150); 1866 1867 if (DISPLAY_VER(dev_priv) >= 4) { 1868 intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), 1869 hw_state->dpll_md); 1870 } else { 1871 /* The pixel multiplier can only be updated once the 1872 * DPLL is enabled and the clocks are stable. 1873 * 1874 * So write it again. 1875 */ 1876 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); 1877 } 1878 1879 /* We do this three times for luck */ 1880 for (i = 0; i < 3; i++) { 1881 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); 1882 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 1883 udelay(150); /* wait for warmup */ 1884 } 1885 } 1886 1887 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, 1888 enum dpio_phy phy, enum dpio_channel ch) 1889 { 1890 u32 tmp; 1891 1892 /* 1893 * PLLB opamp always calibrates to max value of 0x3f, force enable it 1894 * and set it to a reasonable value instead. 1895 */ 1896 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch)); 1897 tmp &= 0xffffff00; 1898 tmp |= 0x00000030; 1899 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp); 1900 1901 tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11); 1902 tmp &= 0x00ffffff; 1903 tmp |= 0x8c000000; 1904 vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp); 1905 1906 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch)); 1907 tmp &= 0xffffff00; 1908 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp); 1909 1910 tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11); 1911 tmp &= 0x00ffffff; 1912 tmp |= 0xb0000000; 1913 vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp); 1914 } 1915 1916 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) 1917 { 1918 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1919 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1920 const struct dpll *clock = &crtc_state->dpll; 1921 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); 1922 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 1923 enum pipe pipe = crtc->pipe; 1924 u32 tmp, coreclk; 1925 1926 vlv_dpio_get(dev_priv); 1927 1928 /* See eDP HDMI DPIO driver vbios notes doc */ 1929 1930 /* PLL B needs special handling */ 1931 if (pipe == PIPE_B) 1932 vlv_pllb_recal_opamp(dev_priv, phy, ch); 1933 1934 /* Set up Tx target for periodic Rcomp update */ 1935 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f); 1936 1937 /* Disable target IRef on PLL */ 1938 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch)); 1939 tmp &= 0x00ffffff; 1940 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp); 1941 1942 /* Disable fast lock */ 1943 vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610); 1944 1945 /* Set idtafcrecal before PLL is enabled */ 1946 tmp = DPIO_M1_DIV(clock->m1) | 1947 DPIO_M2_DIV(clock->m2) | 1948 DPIO_P1_DIV(clock->p1) | 1949 DPIO_P2_DIV(clock->p2) | 1950 DPIO_N_DIV(clock->n) | 1951 DPIO_K_DIV(1); 1952 1953 /* 1954 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 1955 * but we don't support that). 1956 * Note: don't use the DAC post divider as it seems unstable. 1957 */ 1958 tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP); 1959 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp); 1960 1961 tmp |= DPIO_ENABLE_CALIBRATION; 1962 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp); 1963 1964 /* Set HBR and RBR LPF coefficients */ 1965 if (crtc_state->port_clock == 162000 || 1966 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) || 1967 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1968 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch), 1969 0x009f0003); 1970 else 1971 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch), 1972 0x00d0000f); 1973 1974 if (intel_crtc_has_dp_encoder(crtc_state)) { 1975 /* Use SSC source */ 1976 if (pipe == PIPE_A) 1977 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 1978 0x0df40000); 1979 else 1980 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 1981 0x0df70000); 1982 } else { /* HDMI or VGA */ 1983 /* Use bend source */ 1984 if (pipe == PIPE_A) 1985 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 1986 0x0df70000); 1987 else 1988 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 1989 0x0df40000); 1990 } 1991 1992 coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch)); 1993 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 1994 if (intel_crtc_has_dp_encoder(crtc_state)) 1995 coreclk |= 0x01000000; 1996 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk); 1997 1998 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000); 1999 2000 vlv_dpio_put(dev_priv); 2001 } 2002 2003 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state) 2004 { 2005 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2006 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2007 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 2008 enum pipe pipe = crtc->pipe; 2009 2010 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); 2011 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 2012 udelay(150); 2013 2014 if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1)) 2015 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); 2016 } 2017 2018 void vlv_enable_pll(const struct intel_crtc_state *crtc_state) 2019 { 2020 struct intel_display *display = to_intel_display(crtc_state); 2021 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2022 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2023 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 2024 enum pipe pipe = crtc->pipe; 2025 2026 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 2027 2028 /* PLL is protected by panel, make sure we can write it */ 2029 assert_pps_unlocked(display, pipe); 2030 2031 /* Enable Refclk */ 2032 intel_de_write(dev_priv, DPLL(dev_priv, pipe), 2033 hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 2034 2035 if (hw_state->dpll & DPLL_VCO_ENABLE) { 2036 vlv_prepare_pll(crtc_state); 2037 _vlv_enable_pll(crtc_state); 2038 } 2039 2040 intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), hw_state->dpll_md); 2041 intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe)); 2042 } 2043 2044 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) 2045 { 2046 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2047 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2048 const struct dpll *clock = &crtc_state->dpll; 2049 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); 2050 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 2051 u32 tmp, loopfilter, tribuf_calcntr; 2052 u32 m2_frac; 2053 2054 m2_frac = clock->m2 & 0x3fffff; 2055 2056 vlv_dpio_get(dev_priv); 2057 2058 /* p1 and p2 divider */ 2059 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch), 2060 DPIO_CHV_S1_DIV(5) | 2061 DPIO_CHV_P1_DIV(clock->p1) | 2062 DPIO_CHV_P2_DIV(clock->p2) | 2063 DPIO_CHV_K_DIV(1)); 2064 2065 /* Feedback post-divider - m2 */ 2066 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch), 2067 DPIO_CHV_M2_DIV(clock->m2 >> 22)); 2068 2069 /* Feedback refclk divider - n and m1 */ 2070 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch), 2071 DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) | 2072 DPIO_CHV_N_DIV(1)); 2073 2074 /* M2 fraction division */ 2075 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch), 2076 DPIO_CHV_M2_FRAC_DIV(m2_frac)); 2077 2078 /* M2 fraction division enable */ 2079 tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch)); 2080 tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 2081 tmp |= DPIO_CHV_FEEDFWD_GAIN(2); 2082 if (m2_frac) 2083 tmp |= DPIO_CHV_FRAC_DIV_EN; 2084 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp); 2085 2086 /* Program digital lock detect threshold */ 2087 tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch)); 2088 tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 2089 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 2090 tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5); 2091 if (!m2_frac) 2092 tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 2093 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp); 2094 2095 /* Loop filter */ 2096 if (clock->vco == 5400000) { 2097 loopfilter = DPIO_CHV_PROP_COEFF(0x3) | 2098 DPIO_CHV_INT_COEFF(0x8) | 2099 DPIO_CHV_GAIN_CTRL(0x1); 2100 tribuf_calcntr = 0x9; 2101 } else if (clock->vco <= 6200000) { 2102 loopfilter = DPIO_CHV_PROP_COEFF(0x5) | 2103 DPIO_CHV_INT_COEFF(0xB) | 2104 DPIO_CHV_GAIN_CTRL(0x3); 2105 tribuf_calcntr = 0x9; 2106 } else if (clock->vco <= 6480000) { 2107 loopfilter = DPIO_CHV_PROP_COEFF(0x4) | 2108 DPIO_CHV_INT_COEFF(0x9) | 2109 DPIO_CHV_GAIN_CTRL(0x3); 2110 tribuf_calcntr = 0x8; 2111 } else { 2112 /* Not supported. Apply the same limits as in the max case */ 2113 loopfilter = DPIO_CHV_PROP_COEFF(0x4) | 2114 DPIO_CHV_INT_COEFF(0x9) | 2115 DPIO_CHV_GAIN_CTRL(0x3); 2116 tribuf_calcntr = 0; 2117 } 2118 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter); 2119 2120 tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch)); 2121 tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 2122 tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr); 2123 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp); 2124 2125 /* AFC Recal */ 2126 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), 2127 vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) | 2128 DPIO_AFC_RECAL); 2129 2130 vlv_dpio_put(dev_priv); 2131 } 2132 2133 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) 2134 { 2135 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2136 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2137 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 2138 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); 2139 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 2140 enum pipe pipe = crtc->pipe; 2141 u32 tmp; 2142 2143 vlv_dpio_get(dev_priv); 2144 2145 /* Enable back the 10bit clock to display controller */ 2146 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)); 2147 tmp |= DPIO_DCLKP_EN; 2148 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp); 2149 2150 vlv_dpio_put(dev_priv); 2151 2152 /* 2153 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 2154 */ 2155 udelay(1); 2156 2157 /* Enable PLL */ 2158 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); 2159 2160 /* Check PLL is locked */ 2161 if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1)) 2162 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); 2163 } 2164 2165 void chv_enable_pll(const struct intel_crtc_state *crtc_state) 2166 { 2167 struct intel_display *display = to_intel_display(crtc_state); 2168 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2169 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2170 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 2171 enum pipe pipe = crtc->pipe; 2172 2173 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 2174 2175 /* PLL is protected by panel, make sure we can write it */ 2176 assert_pps_unlocked(display, pipe); 2177 2178 /* Enable Refclk and SSC */ 2179 intel_de_write(dev_priv, DPLL(dev_priv, pipe), 2180 hw_state->dpll & ~DPLL_VCO_ENABLE); 2181 2182 if (hw_state->dpll & DPLL_VCO_ENABLE) { 2183 chv_prepare_pll(crtc_state); 2184 _chv_enable_pll(crtc_state); 2185 } 2186 2187 if (pipe != PIPE_A) { 2188 /* 2189 * WaPixelRepeatModeFixForC0:chv 2190 * 2191 * DPLLCMD is AWOL. Use chicken bits to propagate 2192 * the value from DPLLBMD to either pipe B or C. 2193 */ 2194 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 2195 intel_de_write(dev_priv, DPLL_MD(dev_priv, PIPE_B), 2196 hw_state->dpll_md); 2197 intel_de_write(dev_priv, CBR4_VLV, 0); 2198 dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md; 2199 2200 /* 2201 * DPLLB VGA mode also seems to cause problems. 2202 * We should always have it disabled. 2203 */ 2204 drm_WARN_ON(&dev_priv->drm, 2205 (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) & 2206 DPLL_VGA_MODE_DIS) == 0); 2207 } else { 2208 intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), 2209 hw_state->dpll_md); 2210 intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe)); 2211 } 2212 } 2213 2214 /** 2215 * vlv_force_pll_on - forcibly enable just the PLL 2216 * @dev_priv: i915 private structure 2217 * @pipe: pipe PLL to enable 2218 * @dpll: PLL configuration 2219 * 2220 * Enable the PLL for @pipe using the supplied @dpll config. To be used 2221 * in cases where we need the PLL enabled even when @pipe is not going to 2222 * be enabled. 2223 */ 2224 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 2225 const struct dpll *dpll) 2226 { 2227 struct intel_display *display = &dev_priv->display; 2228 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 2229 struct intel_crtc_state *crtc_state; 2230 2231 crtc_state = intel_crtc_state_alloc(crtc); 2232 if (!crtc_state) 2233 return -ENOMEM; 2234 2235 crtc_state->cpu_transcoder = (enum transcoder)pipe; 2236 crtc_state->pixel_multiplier = 1; 2237 crtc_state->dpll = *dpll; 2238 crtc_state->output_types = BIT(INTEL_OUTPUT_EDP); 2239 2240 if (IS_CHERRYVIEW(dev_priv)) { 2241 chv_compute_dpll(crtc_state); 2242 chv_enable_pll(crtc_state); 2243 } else { 2244 vlv_compute_dpll(crtc_state); 2245 vlv_enable_pll(crtc_state); 2246 } 2247 2248 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 2249 2250 return 0; 2251 } 2252 2253 void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 2254 { 2255 u32 val; 2256 2257 /* Make sure the pipe isn't still relying on us */ 2258 assert_transcoder_disabled(dev_priv, (enum transcoder)pipe); 2259 2260 val = DPLL_INTEGRATED_REF_CLK_VLV | 2261 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 2262 if (pipe != PIPE_A) 2263 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 2264 2265 intel_de_write(dev_priv, DPLL(dev_priv, pipe), val); 2266 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 2267 } 2268 2269 void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 2270 { 2271 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 2272 enum dpio_phy phy = vlv_pipe_to_phy(pipe); 2273 u32 val; 2274 2275 /* Make sure the pipe isn't still relying on us */ 2276 assert_transcoder_disabled(dev_priv, (enum transcoder)pipe); 2277 2278 val = DPLL_SSC_REF_CLK_CHV | 2279 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 2280 if (pipe != PIPE_A) 2281 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 2282 2283 intel_de_write(dev_priv, DPLL(dev_priv, pipe), val); 2284 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 2285 2286 vlv_dpio_get(dev_priv); 2287 2288 /* Disable 10bit clock to display controller */ 2289 val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)); 2290 val &= ~DPIO_DCLKP_EN; 2291 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val); 2292 2293 vlv_dpio_put(dev_priv); 2294 } 2295 2296 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 2297 { 2298 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2299 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2300 enum pipe pipe = crtc->pipe; 2301 2302 /* Don't disable pipe or pipe PLLs if needed */ 2303 if (IS_I830(dev_priv)) 2304 return; 2305 2306 /* Make sure the pipe isn't still relying on us */ 2307 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 2308 2309 intel_de_write(dev_priv, DPLL(dev_priv, pipe), DPLL_VGA_MODE_DIS); 2310 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 2311 } 2312 2313 2314 /** 2315 * vlv_force_pll_off - forcibly disable just the PLL 2316 * @dev_priv: i915 private structure 2317 * @pipe: pipe PLL to disable 2318 * 2319 * Disable the PLL for @pipe. To be used in cases where we need 2320 * the PLL enabled even when @pipe is not going to be enabled. 2321 */ 2322 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 2323 { 2324 if (IS_CHERRYVIEW(dev_priv)) 2325 chv_disable_pll(dev_priv, pipe); 2326 else 2327 vlv_disable_pll(dev_priv, pipe); 2328 } 2329 2330 /* Only for pre-ILK configs */ 2331 static void assert_pll(struct drm_i915_private *dev_priv, 2332 enum pipe pipe, bool state) 2333 { 2334 struct intel_display *display = &dev_priv->display; 2335 bool cur_state; 2336 2337 cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE; 2338 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 2339 "PLL state assertion failure (expected %s, current %s)\n", 2340 str_on_off(state), str_on_off(cur_state)); 2341 } 2342 2343 void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) 2344 { 2345 assert_pll(i915, pipe, true); 2346 } 2347 2348 void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe) 2349 { 2350 assert_pll(i915, pipe, false); 2351 } 2352