1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/string_helpers.h> 8 9 #include "i915_reg.h" 10 #include "intel_atomic.h" 11 #include "intel_crtc.h" 12 #include "intel_cx0_phy.h" 13 #include "intel_de.h" 14 #include "intel_display.h" 15 #include "intel_display_types.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_dpll.h" 18 #include "intel_lvds.h" 19 #include "intel_lvds_regs.h" 20 #include "intel_panel.h" 21 #include "intel_pps.h" 22 #include "intel_snps_phy.h" 23 #include "vlv_dpio_phy_regs.h" 24 #include "vlv_sideband.h" 25 26 struct intel_dpll_funcs { 27 int (*crtc_compute_clock)(struct intel_atomic_state *state, 28 struct intel_crtc *crtc); 29 int (*crtc_get_shared_dpll)(struct intel_atomic_state *state, 30 struct intel_crtc *crtc); 31 }; 32 33 struct intel_limit { 34 struct { 35 int min, max; 36 } dot, vco, n, m, m1, m2, p, p1; 37 38 struct { 39 int dot_limit; 40 int p2_slow, p2_fast; 41 } p2; 42 }; 43 static const struct intel_limit intel_limits_i8xx_dac = { 44 .dot = { .min = 25000, .max = 350000 }, 45 .vco = { .min = 908000, .max = 1512000 }, 46 .n = { .min = 2, .max = 16 }, 47 .m = { .min = 96, .max = 140 }, 48 .m1 = { .min = 18, .max = 26 }, 49 .m2 = { .min = 6, .max = 16 }, 50 .p = { .min = 4, .max = 128 }, 51 .p1 = { .min = 2, .max = 33 }, 52 .p2 = { .dot_limit = 165000, 53 .p2_slow = 4, .p2_fast = 2 }, 54 }; 55 56 static const struct intel_limit intel_limits_i8xx_dvo = { 57 .dot = { .min = 25000, .max = 350000 }, 58 .vco = { .min = 908000, .max = 1512000 }, 59 .n = { .min = 2, .max = 16 }, 60 .m = { .min = 96, .max = 140 }, 61 .m1 = { .min = 18, .max = 26 }, 62 .m2 = { .min = 6, .max = 16 }, 63 .p = { .min = 4, .max = 128 }, 64 .p1 = { .min = 2, .max = 33 }, 65 .p2 = { .dot_limit = 165000, 66 .p2_slow = 4, .p2_fast = 4 }, 67 }; 68 69 static const struct intel_limit intel_limits_i8xx_lvds = { 70 .dot = { .min = 25000, .max = 350000 }, 71 .vco = { .min = 908000, .max = 1512000 }, 72 .n = { .min = 2, .max = 16 }, 73 .m = { .min = 96, .max = 140 }, 74 .m1 = { .min = 18, .max = 26 }, 75 .m2 = { .min = 6, .max = 16 }, 76 .p = { .min = 4, .max = 128 }, 77 .p1 = { .min = 1, .max = 6 }, 78 .p2 = { .dot_limit = 165000, 79 .p2_slow = 14, .p2_fast = 7 }, 80 }; 81 82 static const struct intel_limit intel_limits_i9xx_sdvo = { 83 .dot = { .min = 20000, .max = 400000 }, 84 .vco = { .min = 1400000, .max = 2800000 }, 85 .n = { .min = 1, .max = 6 }, 86 .m = { .min = 70, .max = 120 }, 87 .m1 = { .min = 8, .max = 18 }, 88 .m2 = { .min = 3, .max = 7 }, 89 .p = { .min = 5, .max = 80 }, 90 .p1 = { .min = 1, .max = 8 }, 91 .p2 = { .dot_limit = 200000, 92 .p2_slow = 10, .p2_fast = 5 }, 93 }; 94 95 static const struct intel_limit intel_limits_i9xx_lvds = { 96 .dot = { .min = 20000, .max = 400000 }, 97 .vco = { .min = 1400000, .max = 2800000 }, 98 .n = { .min = 1, .max = 6 }, 99 .m = { .min = 70, .max = 120 }, 100 .m1 = { .min = 8, .max = 18 }, 101 .m2 = { .min = 3, .max = 7 }, 102 .p = { .min = 7, .max = 98 }, 103 .p1 = { .min = 1, .max = 8 }, 104 .p2 = { .dot_limit = 112000, 105 .p2_slow = 14, .p2_fast = 7 }, 106 }; 107 108 109 static const struct intel_limit intel_limits_g4x_sdvo = { 110 .dot = { .min = 25000, .max = 270000 }, 111 .vco = { .min = 1750000, .max = 3500000}, 112 .n = { .min = 1, .max = 4 }, 113 .m = { .min = 104, .max = 138 }, 114 .m1 = { .min = 17, .max = 23 }, 115 .m2 = { .min = 5, .max = 11 }, 116 .p = { .min = 10, .max = 30 }, 117 .p1 = { .min = 1, .max = 3}, 118 .p2 = { .dot_limit = 270000, 119 .p2_slow = 10, 120 .p2_fast = 10 121 }, 122 }; 123 124 static const struct intel_limit intel_limits_g4x_hdmi = { 125 .dot = { .min = 22000, .max = 400000 }, 126 .vco = { .min = 1750000, .max = 3500000}, 127 .n = { .min = 1, .max = 4 }, 128 .m = { .min = 104, .max = 138 }, 129 .m1 = { .min = 16, .max = 23 }, 130 .m2 = { .min = 5, .max = 11 }, 131 .p = { .min = 5, .max = 80 }, 132 .p1 = { .min = 1, .max = 8}, 133 .p2 = { .dot_limit = 165000, 134 .p2_slow = 10, .p2_fast = 5 }, 135 }; 136 137 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 138 .dot = { .min = 20000, .max = 115000 }, 139 .vco = { .min = 1750000, .max = 3500000 }, 140 .n = { .min = 1, .max = 3 }, 141 .m = { .min = 104, .max = 138 }, 142 .m1 = { .min = 17, .max = 23 }, 143 .m2 = { .min = 5, .max = 11 }, 144 .p = { .min = 28, .max = 112 }, 145 .p1 = { .min = 2, .max = 8 }, 146 .p2 = { .dot_limit = 0, 147 .p2_slow = 14, .p2_fast = 14 148 }, 149 }; 150 151 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 152 .dot = { .min = 80000, .max = 224000 }, 153 .vco = { .min = 1750000, .max = 3500000 }, 154 .n = { .min = 1, .max = 3 }, 155 .m = { .min = 104, .max = 138 }, 156 .m1 = { .min = 17, .max = 23 }, 157 .m2 = { .min = 5, .max = 11 }, 158 .p = { .min = 14, .max = 42 }, 159 .p1 = { .min = 2, .max = 6 }, 160 .p2 = { .dot_limit = 0, 161 .p2_slow = 7, .p2_fast = 7 162 }, 163 }; 164 165 static const struct intel_limit pnv_limits_sdvo = { 166 .dot = { .min = 20000, .max = 400000}, 167 .vco = { .min = 1700000, .max = 3500000 }, 168 /* Pineview's Ncounter is a ring counter */ 169 .n = { .min = 3, .max = 6 }, 170 .m = { .min = 2, .max = 256 }, 171 /* Pineview only has one combined m divider, which we treat as m2. */ 172 .m1 = { .min = 0, .max = 0 }, 173 .m2 = { .min = 0, .max = 254 }, 174 .p = { .min = 5, .max = 80 }, 175 .p1 = { .min = 1, .max = 8 }, 176 .p2 = { .dot_limit = 200000, 177 .p2_slow = 10, .p2_fast = 5 }, 178 }; 179 180 static const struct intel_limit pnv_limits_lvds = { 181 .dot = { .min = 20000, .max = 400000 }, 182 .vco = { .min = 1700000, .max = 3500000 }, 183 .n = { .min = 3, .max = 6 }, 184 .m = { .min = 2, .max = 256 }, 185 .m1 = { .min = 0, .max = 0 }, 186 .m2 = { .min = 0, .max = 254 }, 187 .p = { .min = 7, .max = 112 }, 188 .p1 = { .min = 1, .max = 8 }, 189 .p2 = { .dot_limit = 112000, 190 .p2_slow = 14, .p2_fast = 14 }, 191 }; 192 193 /* Ironlake / Sandybridge 194 * 195 * We calculate clock using (register_value + 2) for N/M1/M2, so here 196 * the range value for them is (actual_value - 2). 197 */ 198 static const struct intel_limit ilk_limits_dac = { 199 .dot = { .min = 25000, .max = 350000 }, 200 .vco = { .min = 1760000, .max = 3510000 }, 201 .n = { .min = 1, .max = 5 }, 202 .m = { .min = 79, .max = 127 }, 203 .m1 = { .min = 12, .max = 22 }, 204 .m2 = { .min = 5, .max = 9 }, 205 .p = { .min = 5, .max = 80 }, 206 .p1 = { .min = 1, .max = 8 }, 207 .p2 = { .dot_limit = 225000, 208 .p2_slow = 10, .p2_fast = 5 }, 209 }; 210 211 static const struct intel_limit ilk_limits_single_lvds = { 212 .dot = { .min = 25000, .max = 350000 }, 213 .vco = { .min = 1760000, .max = 3510000 }, 214 .n = { .min = 1, .max = 3 }, 215 .m = { .min = 79, .max = 118 }, 216 .m1 = { .min = 12, .max = 22 }, 217 .m2 = { .min = 5, .max = 9 }, 218 .p = { .min = 28, .max = 112 }, 219 .p1 = { .min = 2, .max = 8 }, 220 .p2 = { .dot_limit = 225000, 221 .p2_slow = 14, .p2_fast = 14 }, 222 }; 223 224 static const struct intel_limit ilk_limits_dual_lvds = { 225 .dot = { .min = 25000, .max = 350000 }, 226 .vco = { .min = 1760000, .max = 3510000 }, 227 .n = { .min = 1, .max = 3 }, 228 .m = { .min = 79, .max = 127 }, 229 .m1 = { .min = 12, .max = 22 }, 230 .m2 = { .min = 5, .max = 9 }, 231 .p = { .min = 14, .max = 56 }, 232 .p1 = { .min = 2, .max = 8 }, 233 .p2 = { .dot_limit = 225000, 234 .p2_slow = 7, .p2_fast = 7 }, 235 }; 236 237 /* LVDS 100mhz refclk limits. */ 238 static const struct intel_limit ilk_limits_single_lvds_100m = { 239 .dot = { .min = 25000, .max = 350000 }, 240 .vco = { .min = 1760000, .max = 3510000 }, 241 .n = { .min = 1, .max = 2 }, 242 .m = { .min = 79, .max = 126 }, 243 .m1 = { .min = 12, .max = 22 }, 244 .m2 = { .min = 5, .max = 9 }, 245 .p = { .min = 28, .max = 112 }, 246 .p1 = { .min = 2, .max = 8 }, 247 .p2 = { .dot_limit = 225000, 248 .p2_slow = 14, .p2_fast = 14 }, 249 }; 250 251 static const struct intel_limit ilk_limits_dual_lvds_100m = { 252 .dot = { .min = 25000, .max = 350000 }, 253 .vco = { .min = 1760000, .max = 3510000 }, 254 .n = { .min = 1, .max = 3 }, 255 .m = { .min = 79, .max = 126 }, 256 .m1 = { .min = 12, .max = 22 }, 257 .m2 = { .min = 5, .max = 9 }, 258 .p = { .min = 14, .max = 42 }, 259 .p1 = { .min = 2, .max = 6 }, 260 .p2 = { .dot_limit = 225000, 261 .p2_slow = 7, .p2_fast = 7 }, 262 }; 263 264 static const struct intel_limit intel_limits_vlv = { 265 /* 266 * These are based on the data rate limits (measured in fast clocks) 267 * since those are the strictest limits we have. The fast 268 * clock and actual rate limits are more relaxed, so checking 269 * them would make no difference. 270 */ 271 .dot = { .min = 25000, .max = 270000 }, 272 .vco = { .min = 4000000, .max = 6000000 }, 273 .n = { .min = 1, .max = 7 }, 274 .m1 = { .min = 2, .max = 3 }, 275 .m2 = { .min = 11, .max = 156 }, 276 .p1 = { .min = 2, .max = 3 }, 277 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 278 }; 279 280 static const struct intel_limit intel_limits_chv = { 281 /* 282 * These are based on the data rate limits (measured in fast clocks) 283 * since those are the strictest limits we have. The fast 284 * clock and actual rate limits are more relaxed, so checking 285 * them would make no difference. 286 */ 287 .dot = { .min = 25000, .max = 540000 }, 288 .vco = { .min = 4800000, .max = 6480000 }, 289 .n = { .min = 1, .max = 1 }, 290 .m1 = { .min = 2, .max = 2 }, 291 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 292 .p1 = { .min = 2, .max = 4 }, 293 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 294 }; 295 296 static const struct intel_limit intel_limits_bxt = { 297 .dot = { .min = 25000, .max = 594000 }, 298 .vco = { .min = 4800000, .max = 6700000 }, 299 .n = { .min = 1, .max = 1 }, 300 .m1 = { .min = 2, .max = 2 }, 301 /* FIXME: find real m2 limits */ 302 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 303 .p1 = { .min = 2, .max = 4 }, 304 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 305 }; 306 307 /* 308 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 309 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 310 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 311 * The helpers' return value is the rate of the clock that is fed to the 312 * display engine's pipe which can be the above fast dot clock rate or a 313 * divided-down version of it. 314 */ 315 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 316 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 317 { 318 clock->m = clock->m2 + 2; 319 clock->p = clock->p1 * clock->p2; 320 321 clock->vco = clock->n == 0 ? 0 : 322 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 323 clock->dot = clock->p == 0 ? 0 : 324 DIV_ROUND_CLOSEST(clock->vco, clock->p); 325 326 return clock->dot; 327 } 328 329 static u32 i9xx_dpll_compute_m(const struct dpll *dpll) 330 { 331 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 332 } 333 334 int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 335 { 336 clock->m = i9xx_dpll_compute_m(clock); 337 clock->p = clock->p1 * clock->p2; 338 339 clock->vco = clock->n + 2 == 0 ? 0 : 340 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 341 clock->dot = clock->p == 0 ? 0 : 342 DIV_ROUND_CLOSEST(clock->vco, clock->p); 343 344 return clock->dot; 345 } 346 347 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 348 { 349 clock->m = clock->m1 * clock->m2; 350 clock->p = clock->p1 * clock->p2 * 5; 351 352 clock->vco = clock->n == 0 ? 0 : 353 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 354 clock->dot = clock->p == 0 ? 0 : 355 DIV_ROUND_CLOSEST(clock->vco, clock->p); 356 357 return clock->dot; 358 } 359 360 int chv_calc_dpll_params(int refclk, struct dpll *clock) 361 { 362 clock->m = clock->m1 * clock->m2; 363 clock->p = clock->p1 * clock->p2 * 5; 364 365 clock->vco = clock->n == 0 ? 0 : 366 DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22); 367 clock->dot = clock->p == 0 ? 0 : 368 DIV_ROUND_CLOSEST(clock->vco, clock->p); 369 370 return clock->dot; 371 } 372 373 static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state) 374 { 375 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 376 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 377 378 if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 379 return i915->display.vbt.lvds_ssc_freq; 380 else if (HAS_PCH_SPLIT(i915)) 381 return 120000; 382 else if (DISPLAY_VER(i915) != 2) 383 return 96000; 384 else 385 return 48000; 386 } 387 388 void i9xx_dpll_get_hw_state(struct intel_crtc *crtc, 389 struct intel_dpll_hw_state *dpll_hw_state) 390 { 391 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 392 struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx; 393 394 if (DISPLAY_VER(dev_priv) >= 4) { 395 u32 tmp; 396 397 /* No way to read it out on pipes B and C */ 398 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 399 tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe]; 400 else 401 tmp = intel_de_read(dev_priv, 402 DPLL_MD(dev_priv, crtc->pipe)); 403 404 hw_state->dpll_md = tmp; 405 } 406 407 hw_state->dpll = intel_de_read(dev_priv, DPLL(dev_priv, crtc->pipe)); 408 409 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 410 hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe)); 411 hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe)); 412 } else { 413 /* Mask out read-only status bits. */ 414 hw_state->dpll &= ~(DPLL_LOCK_VLV | 415 DPLL_PORTC_READY_MASK | 416 DPLL_PORTB_READY_MASK); 417 } 418 } 419 420 /* Returns the clock of the currently programmed mode of the given pipe. */ 421 void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state) 422 { 423 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 424 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 425 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 426 u32 dpll = hw_state->dpll; 427 u32 fp; 428 struct dpll clock; 429 int port_clock; 430 int refclk = i9xx_pll_refclk(crtc_state); 431 432 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 433 fp = hw_state->fp0; 434 else 435 fp = hw_state->fp1; 436 437 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 438 if (IS_PINEVIEW(dev_priv)) { 439 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 440 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 441 } else { 442 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 443 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 444 } 445 446 if (DISPLAY_VER(dev_priv) != 2) { 447 if (IS_PINEVIEW(dev_priv)) 448 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 449 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 450 else 451 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 452 DPLL_FPA01_P1_POST_DIV_SHIFT); 453 454 switch (dpll & DPLL_MODE_MASK) { 455 case DPLLB_MODE_DAC_SERIAL: 456 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 457 5 : 10; 458 break; 459 case DPLLB_MODE_LVDS: 460 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 461 7 : 14; 462 break; 463 default: 464 drm_dbg_kms(&dev_priv->drm, 465 "Unknown DPLL mode %08x in programmed " 466 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 467 return; 468 } 469 470 if (IS_PINEVIEW(dev_priv)) 471 port_clock = pnv_calc_dpll_params(refclk, &clock); 472 else 473 port_clock = i9xx_calc_dpll_params(refclk, &clock); 474 } else { 475 enum pipe lvds_pipe; 476 477 if (IS_I85X(dev_priv) && 478 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && 479 lvds_pipe == crtc->pipe) { 480 u32 lvds = intel_de_read(dev_priv, LVDS); 481 482 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 483 DPLL_FPA01_P1_POST_DIV_SHIFT); 484 485 if (lvds & LVDS_CLKB_POWER_UP) 486 clock.p2 = 7; 487 else 488 clock.p2 = 14; 489 } else { 490 if (dpll & PLL_P1_DIVIDE_BY_TWO) 491 clock.p1 = 2; 492 else { 493 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 494 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 495 } 496 if (dpll & PLL_P2_DIVIDE_BY_4) 497 clock.p2 = 4; 498 else 499 clock.p2 = 2; 500 } 501 502 port_clock = i9xx_calc_dpll_params(refclk, &clock); 503 } 504 505 /* 506 * This value includes pixel_multiplier. We will use 507 * port_clock to compute adjusted_mode.crtc_clock in the 508 * encoder's get_config() function. 509 */ 510 crtc_state->port_clock = port_clock; 511 } 512 513 void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state) 514 { 515 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 517 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); 518 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 519 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 520 int refclk = 100000; 521 struct dpll clock; 522 u32 tmp; 523 524 /* In case of DSI, DPLL will not be used */ 525 if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0) 526 return; 527 528 vlv_dpio_get(dev_priv); 529 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch)); 530 vlv_dpio_put(dev_priv); 531 532 clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp); 533 clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp); 534 clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp); 535 clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp); 536 clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp); 537 538 crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock); 539 } 540 541 void chv_crtc_clock_get(struct intel_crtc_state *crtc_state) 542 { 543 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 544 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 545 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); 546 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 547 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 548 struct dpll clock; 549 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 550 int refclk = 100000; 551 552 /* In case of DSI, DPLL will not be used */ 553 if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0) 554 return; 555 556 vlv_dpio_get(dev_priv); 557 cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch)); 558 pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch)); 559 pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch)); 560 pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch)); 561 pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch)); 562 vlv_dpio_put(dev_priv); 563 564 clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 565 clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22; 566 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 567 clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2); 568 clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1); 569 clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13); 570 clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13); 571 572 crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock); 573 } 574 575 /* 576 * Returns whether the given set of divisors are valid for a given refclk with 577 * the given connectors. 578 */ 579 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, 580 const struct intel_limit *limit, 581 const struct dpll *clock) 582 { 583 if (clock->n < limit->n.min || limit->n.max < clock->n) 584 return false; 585 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 586 return false; 587 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 588 return false; 589 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 590 return false; 591 592 if (!IS_PINEVIEW(dev_priv) && 593 !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 594 !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) 595 if (clock->m1 <= clock->m2) 596 return false; 597 598 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 599 !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) { 600 if (clock->p < limit->p.min || limit->p.max < clock->p) 601 return false; 602 if (clock->m < limit->m.min || limit->m.max < clock->m) 603 return false; 604 } 605 606 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 607 return false; 608 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 609 * connector, etc., rather than just a single range. 610 */ 611 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 612 return false; 613 614 return true; 615 } 616 617 static int 618 i9xx_select_p2_div(const struct intel_limit *limit, 619 const struct intel_crtc_state *crtc_state, 620 int target) 621 { 622 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 623 624 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 625 /* 626 * For LVDS just rely on its current settings for dual-channel. 627 * We haven't figured out how to reliably set up different 628 * single/dual channel state, if we even can. 629 */ 630 if (intel_is_dual_link_lvds(dev_priv)) 631 return limit->p2.p2_fast; 632 else 633 return limit->p2.p2_slow; 634 } else { 635 if (target < limit->p2.dot_limit) 636 return limit->p2.p2_slow; 637 else 638 return limit->p2.p2_fast; 639 } 640 } 641 642 /* 643 * Returns a set of divisors for the desired target clock with the given 644 * refclk, or FALSE. 645 * 646 * Target and reference clocks are specified in kHz. 647 * 648 * If match_clock is provided, then best_clock P divider must match the P 649 * divider from @match_clock used for LVDS downclocking. 650 */ 651 static bool 652 i9xx_find_best_dpll(const struct intel_limit *limit, 653 struct intel_crtc_state *crtc_state, 654 int target, int refclk, 655 const struct dpll *match_clock, 656 struct dpll *best_clock) 657 { 658 struct drm_device *dev = crtc_state->uapi.crtc->dev; 659 struct dpll clock; 660 int err = target; 661 662 memset(best_clock, 0, sizeof(*best_clock)); 663 664 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 665 666 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 667 clock.m1++) { 668 for (clock.m2 = limit->m2.min; 669 clock.m2 <= limit->m2.max; clock.m2++) { 670 if (clock.m2 >= clock.m1) 671 break; 672 for (clock.n = limit->n.min; 673 clock.n <= limit->n.max; clock.n++) { 674 for (clock.p1 = limit->p1.min; 675 clock.p1 <= limit->p1.max; clock.p1++) { 676 int this_err; 677 678 i9xx_calc_dpll_params(refclk, &clock); 679 if (!intel_pll_is_valid(to_i915(dev), 680 limit, 681 &clock)) 682 continue; 683 if (match_clock && 684 clock.p != match_clock->p) 685 continue; 686 687 this_err = abs(clock.dot - target); 688 if (this_err < err) { 689 *best_clock = clock; 690 err = this_err; 691 } 692 } 693 } 694 } 695 } 696 697 return (err != target); 698 } 699 700 /* 701 * Returns a set of divisors for the desired target clock with the given 702 * refclk, or FALSE. 703 * 704 * Target and reference clocks are specified in kHz. 705 * 706 * If match_clock is provided, then best_clock P divider must match the P 707 * divider from @match_clock used for LVDS downclocking. 708 */ 709 static bool 710 pnv_find_best_dpll(const struct intel_limit *limit, 711 struct intel_crtc_state *crtc_state, 712 int target, int refclk, 713 const struct dpll *match_clock, 714 struct dpll *best_clock) 715 { 716 struct drm_device *dev = crtc_state->uapi.crtc->dev; 717 struct dpll clock; 718 int err = target; 719 720 memset(best_clock, 0, sizeof(*best_clock)); 721 722 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 723 724 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 725 clock.m1++) { 726 for (clock.m2 = limit->m2.min; 727 clock.m2 <= limit->m2.max; clock.m2++) { 728 for (clock.n = limit->n.min; 729 clock.n <= limit->n.max; clock.n++) { 730 for (clock.p1 = limit->p1.min; 731 clock.p1 <= limit->p1.max; clock.p1++) { 732 int this_err; 733 734 pnv_calc_dpll_params(refclk, &clock); 735 if (!intel_pll_is_valid(to_i915(dev), 736 limit, 737 &clock)) 738 continue; 739 if (match_clock && 740 clock.p != match_clock->p) 741 continue; 742 743 this_err = abs(clock.dot - target); 744 if (this_err < err) { 745 *best_clock = clock; 746 err = this_err; 747 } 748 } 749 } 750 } 751 } 752 753 return (err != target); 754 } 755 756 /* 757 * Returns a set of divisors for the desired target clock with the given 758 * refclk, or FALSE. 759 * 760 * Target and reference clocks are specified in kHz. 761 * 762 * If match_clock is provided, then best_clock P divider must match the P 763 * divider from @match_clock used for LVDS downclocking. 764 */ 765 static bool 766 g4x_find_best_dpll(const struct intel_limit *limit, 767 struct intel_crtc_state *crtc_state, 768 int target, int refclk, 769 const struct dpll *match_clock, 770 struct dpll *best_clock) 771 { 772 struct drm_device *dev = crtc_state->uapi.crtc->dev; 773 struct dpll clock; 774 int max_n; 775 bool found = false; 776 /* approximately equals target * 0.00585 */ 777 int err_most = (target >> 8) + (target >> 9); 778 779 memset(best_clock, 0, sizeof(*best_clock)); 780 781 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 782 783 max_n = limit->n.max; 784 /* based on hardware requirement, prefer smaller n to precision */ 785 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 786 /* based on hardware requirement, prefer larger m1,m2 */ 787 for (clock.m1 = limit->m1.max; 788 clock.m1 >= limit->m1.min; clock.m1--) { 789 for (clock.m2 = limit->m2.max; 790 clock.m2 >= limit->m2.min; clock.m2--) { 791 for (clock.p1 = limit->p1.max; 792 clock.p1 >= limit->p1.min; clock.p1--) { 793 int this_err; 794 795 i9xx_calc_dpll_params(refclk, &clock); 796 if (!intel_pll_is_valid(to_i915(dev), 797 limit, 798 &clock)) 799 continue; 800 801 this_err = abs(clock.dot - target); 802 if (this_err < err_most) { 803 *best_clock = clock; 804 err_most = this_err; 805 max_n = clock.n; 806 found = true; 807 } 808 } 809 } 810 } 811 } 812 return found; 813 } 814 815 /* 816 * Check if the calculated PLL configuration is more optimal compared to the 817 * best configuration and error found so far. Return the calculated error. 818 */ 819 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 820 const struct dpll *calculated_clock, 821 const struct dpll *best_clock, 822 unsigned int best_error_ppm, 823 unsigned int *error_ppm) 824 { 825 /* 826 * For CHV ignore the error and consider only the P value. 827 * Prefer a bigger P value based on HW requirements. 828 */ 829 if (IS_CHERRYVIEW(to_i915(dev))) { 830 *error_ppm = 0; 831 832 return calculated_clock->p > best_clock->p; 833 } 834 835 if (drm_WARN_ON_ONCE(dev, !target_freq)) 836 return false; 837 838 *error_ppm = div_u64(1000000ULL * 839 abs(target_freq - calculated_clock->dot), 840 target_freq); 841 /* 842 * Prefer a better P value over a better (smaller) error if the error 843 * is small. Ensure this preference for future configurations too by 844 * setting the error to 0. 845 */ 846 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 847 *error_ppm = 0; 848 849 return true; 850 } 851 852 return *error_ppm + 10 < best_error_ppm; 853 } 854 855 /* 856 * Returns a set of divisors for the desired target clock with the given 857 * refclk, or FALSE. 858 */ 859 static bool 860 vlv_find_best_dpll(const struct intel_limit *limit, 861 struct intel_crtc_state *crtc_state, 862 int target, int refclk, 863 const struct dpll *match_clock, 864 struct dpll *best_clock) 865 { 866 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 867 struct drm_device *dev = crtc->base.dev; 868 struct dpll clock; 869 unsigned int bestppm = 1000000; 870 /* min update 19.2 MHz */ 871 int max_n = min(limit->n.max, refclk / 19200); 872 bool found = false; 873 874 memset(best_clock, 0, sizeof(*best_clock)); 875 876 /* based on hardware requirement, prefer smaller n to precision */ 877 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 878 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 879 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 880 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 881 clock.p = clock.p1 * clock.p2 * 5; 882 /* based on hardware requirement, prefer bigger m1,m2 values */ 883 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 884 unsigned int ppm; 885 886 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 887 refclk * clock.m1); 888 889 vlv_calc_dpll_params(refclk, &clock); 890 891 if (!intel_pll_is_valid(to_i915(dev), 892 limit, 893 &clock)) 894 continue; 895 896 if (!vlv_PLL_is_optimal(dev, target, 897 &clock, 898 best_clock, 899 bestppm, &ppm)) 900 continue; 901 902 *best_clock = clock; 903 bestppm = ppm; 904 found = true; 905 } 906 } 907 } 908 } 909 910 return found; 911 } 912 913 /* 914 * Returns a set of divisors for the desired target clock with the given 915 * refclk, or FALSE. 916 */ 917 static bool 918 chv_find_best_dpll(const struct intel_limit *limit, 919 struct intel_crtc_state *crtc_state, 920 int target, int refclk, 921 const struct dpll *match_clock, 922 struct dpll *best_clock) 923 { 924 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 925 struct drm_device *dev = crtc->base.dev; 926 unsigned int best_error_ppm; 927 struct dpll clock; 928 u64 m2; 929 int found = false; 930 931 memset(best_clock, 0, sizeof(*best_clock)); 932 best_error_ppm = 1000000; 933 934 /* 935 * Based on hardware doc, the n always set to 1, and m1 always 936 * set to 2. If requires to support 200Mhz refclk, we need to 937 * revisit this because n may not 1 anymore. 938 */ 939 clock.n = 1; 940 clock.m1 = 2; 941 942 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 943 for (clock.p2 = limit->p2.p2_fast; 944 clock.p2 >= limit->p2.p2_slow; 945 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 946 unsigned int error_ppm; 947 948 clock.p = clock.p1 * clock.p2 * 5; 949 950 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 951 refclk * clock.m1); 952 953 if (m2 > INT_MAX/clock.m1) 954 continue; 955 956 clock.m2 = m2; 957 958 chv_calc_dpll_params(refclk, &clock); 959 960 if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) 961 continue; 962 963 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 964 best_error_ppm, &error_ppm)) 965 continue; 966 967 *best_clock = clock; 968 best_error_ppm = error_ppm; 969 found = true; 970 } 971 } 972 973 return found; 974 } 975 976 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 977 struct dpll *best_clock) 978 { 979 const struct intel_limit *limit = &intel_limits_bxt; 980 int refclk = 100000; 981 982 return chv_find_best_dpll(limit, crtc_state, 983 crtc_state->port_clock, refclk, 984 NULL, best_clock); 985 } 986 987 u32 i9xx_dpll_compute_fp(const struct dpll *dpll) 988 { 989 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 990 } 991 992 static u32 pnv_dpll_compute_fp(const struct dpll *dpll) 993 { 994 return (1 << dpll->n) << 16 | dpll->m2; 995 } 996 997 static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state) 998 { 999 return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 1000 } 1001 1002 static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, 1003 const struct dpll *clock, 1004 const struct dpll *reduced_clock) 1005 { 1006 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1007 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1008 u32 dpll; 1009 1010 dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS; 1011 1012 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 1013 dpll |= DPLLB_MODE_LVDS; 1014 else 1015 dpll |= DPLLB_MODE_DAC_SERIAL; 1016 1017 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 1018 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 1019 dpll |= (crtc_state->pixel_multiplier - 1) 1020 << SDVO_MULTIPLIER_SHIFT_HIRES; 1021 } 1022 1023 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 1024 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1025 dpll |= DPLL_SDVO_HIGH_SPEED; 1026 1027 if (intel_crtc_has_dp_encoder(crtc_state)) 1028 dpll |= DPLL_SDVO_HIGH_SPEED; 1029 1030 /* compute bitmask from p1 value */ 1031 if (IS_G4X(dev_priv)) { 1032 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1033 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 1034 } else if (IS_PINEVIEW(dev_priv)) { 1035 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 1036 WARN_ON(reduced_clock->p1 != clock->p1); 1037 } else { 1038 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1039 WARN_ON(reduced_clock->p1 != clock->p1); 1040 } 1041 1042 switch (clock->p2) { 1043 case 5: 1044 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1045 break; 1046 case 7: 1047 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 1048 break; 1049 case 10: 1050 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 1051 break; 1052 case 14: 1053 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 1054 break; 1055 } 1056 WARN_ON(reduced_clock->p2 != clock->p2); 1057 1058 if (DISPLAY_VER(dev_priv) >= 4) 1059 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 1060 1061 if (crtc_state->sdvo_tv_clock) 1062 dpll |= PLL_REF_INPUT_TVCLKINBC; 1063 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 1064 intel_panel_use_ssc(dev_priv)) 1065 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 1066 else 1067 dpll |= PLL_REF_INPUT_DREFCLK; 1068 1069 return dpll; 1070 } 1071 1072 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state, 1073 const struct dpll *clock, 1074 const struct dpll *reduced_clock) 1075 { 1076 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1077 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1078 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1079 1080 if (IS_PINEVIEW(dev_priv)) { 1081 hw_state->fp0 = pnv_dpll_compute_fp(clock); 1082 hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock); 1083 } else { 1084 hw_state->fp0 = i9xx_dpll_compute_fp(clock); 1085 hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock); 1086 } 1087 1088 hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock); 1089 1090 if (DISPLAY_VER(dev_priv) >= 4) 1091 hw_state->dpll_md = i965_dpll_md(crtc_state); 1092 } 1093 1094 static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state, 1095 const struct dpll *clock, 1096 const struct dpll *reduced_clock) 1097 { 1098 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1099 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1100 u32 dpll; 1101 1102 dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS; 1103 1104 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1105 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1106 } else { 1107 if (clock->p1 == 2) 1108 dpll |= PLL_P1_DIVIDE_BY_TWO; 1109 else 1110 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1111 if (clock->p2 == 4) 1112 dpll |= PLL_P2_DIVIDE_BY_4; 1113 } 1114 WARN_ON(reduced_clock->p1 != clock->p1); 1115 WARN_ON(reduced_clock->p2 != clock->p2); 1116 1117 /* 1118 * Bspec: 1119 * "[Almador Errata}: For the correct operation of the muxed DVO pins 1120 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 1121 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 1122 * Enable) must be set to “1” in both the DPLL A Control Register 1123 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 1124 * 1125 * For simplicity We simply keep both bits always enabled in 1126 * both DPLLS. The spec says we should disable the DVO 2X clock 1127 * when not needed, but this seems to work fine in practice. 1128 */ 1129 if (IS_I830(dev_priv) || 1130 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 1131 dpll |= DPLL_DVO_2X_MODE; 1132 1133 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 1134 intel_panel_use_ssc(dev_priv)) 1135 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 1136 else 1137 dpll |= PLL_REF_INPUT_DREFCLK; 1138 1139 return dpll; 1140 } 1141 1142 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, 1143 const struct dpll *clock, 1144 const struct dpll *reduced_clock) 1145 { 1146 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1147 1148 hw_state->fp0 = i9xx_dpll_compute_fp(clock); 1149 hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock); 1150 1151 hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock); 1152 } 1153 1154 static int hsw_crtc_compute_clock(struct intel_atomic_state *state, 1155 struct intel_crtc *crtc) 1156 { 1157 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1158 struct intel_crtc_state *crtc_state = 1159 intel_atomic_get_new_crtc_state(state, crtc); 1160 struct intel_encoder *encoder = 1161 intel_get_crtc_new_encoder(state, crtc_state); 1162 int ret; 1163 1164 if (DISPLAY_VER(dev_priv) < 11 && 1165 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1166 return 0; 1167 1168 ret = intel_compute_shared_dplls(state, crtc, encoder); 1169 if (ret) 1170 return ret; 1171 1172 /* FIXME this is a mess */ 1173 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1174 return 0; 1175 1176 /* CRT dotclock is determined via other means */ 1177 if (!crtc_state->has_pch_encoder) 1178 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1179 1180 return 0; 1181 } 1182 1183 static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state, 1184 struct intel_crtc *crtc) 1185 { 1186 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1187 struct intel_crtc_state *crtc_state = 1188 intel_atomic_get_new_crtc_state(state, crtc); 1189 struct intel_encoder *encoder = 1190 intel_get_crtc_new_encoder(state, crtc_state); 1191 1192 if (DISPLAY_VER(dev_priv) < 11 && 1193 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1194 return 0; 1195 1196 return intel_reserve_shared_dplls(state, crtc, encoder); 1197 } 1198 1199 static int dg2_crtc_compute_clock(struct intel_atomic_state *state, 1200 struct intel_crtc *crtc) 1201 { 1202 struct intel_crtc_state *crtc_state = 1203 intel_atomic_get_new_crtc_state(state, crtc); 1204 struct intel_encoder *encoder = 1205 intel_get_crtc_new_encoder(state, crtc_state); 1206 int ret; 1207 1208 ret = intel_mpllb_calc_state(crtc_state, encoder); 1209 if (ret) 1210 return ret; 1211 1212 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1213 1214 return 0; 1215 } 1216 1217 static int mtl_crtc_compute_clock(struct intel_atomic_state *state, 1218 struct intel_crtc *crtc) 1219 { 1220 struct intel_crtc_state *crtc_state = 1221 intel_atomic_get_new_crtc_state(state, crtc); 1222 struct intel_encoder *encoder = 1223 intel_get_crtc_new_encoder(state, crtc_state); 1224 int ret; 1225 1226 ret = intel_cx0pll_calc_state(crtc_state, encoder); 1227 if (ret) 1228 return ret; 1229 1230 /* TODO: Do the readback via intel_compute_shared_dplls() */ 1231 crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll); 1232 1233 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1234 1235 return 0; 1236 } 1237 1238 static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state) 1239 { 1240 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1241 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1242 1243 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 1244 ((intel_panel_use_ssc(i915) && i915->display.vbt.lvds_ssc_freq == 100000) || 1245 (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915)))) 1246 return 25; 1247 1248 if (crtc_state->sdvo_tv_clock) 1249 return 20; 1250 1251 return 21; 1252 } 1253 1254 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor) 1255 { 1256 return dpll->m < factor * dpll->n; 1257 } 1258 1259 static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor) 1260 { 1261 u32 fp; 1262 1263 fp = i9xx_dpll_compute_fp(clock); 1264 if (ilk_needs_fb_cb_tune(clock, factor)) 1265 fp |= FP_CB_TUNE; 1266 1267 return fp; 1268 } 1269 1270 static u32 ilk_dpll(const struct intel_crtc_state *crtc_state, 1271 const struct dpll *clock, 1272 const struct dpll *reduced_clock) 1273 { 1274 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1275 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1276 u32 dpll; 1277 1278 dpll = DPLL_VCO_ENABLE; 1279 1280 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 1281 dpll |= DPLLB_MODE_LVDS; 1282 else 1283 dpll |= DPLLB_MODE_DAC_SERIAL; 1284 1285 dpll |= (crtc_state->pixel_multiplier - 1) 1286 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 1287 1288 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 1289 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1290 dpll |= DPLL_SDVO_HIGH_SPEED; 1291 1292 if (intel_crtc_has_dp_encoder(crtc_state)) 1293 dpll |= DPLL_SDVO_HIGH_SPEED; 1294 1295 /* 1296 * The high speed IO clock is only really required for 1297 * SDVO/HDMI/DP, but we also enable it for CRT to make it 1298 * possible to share the DPLL between CRT and HDMI. Enabling 1299 * the clock needlessly does no real harm, except use up a 1300 * bit of power potentially. 1301 * 1302 * We'll limit this to IVB with 3 pipes, since it has only two 1303 * DPLLs and so DPLL sharing is the only way to get three pipes 1304 * driving PCH ports at the same time. On SNB we could do this, 1305 * and potentially avoid enabling the second DPLL, but it's not 1306 * clear if it''s a win or loss power wise. No point in doing 1307 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 1308 */ 1309 if (INTEL_NUM_PIPES(dev_priv) == 3 && 1310 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 1311 dpll |= DPLL_SDVO_HIGH_SPEED; 1312 1313 /* compute bitmask from p1 value */ 1314 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1315 /* also FPA1 */ 1316 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 1317 1318 switch (clock->p2) { 1319 case 5: 1320 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1321 break; 1322 case 7: 1323 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 1324 break; 1325 case 10: 1326 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 1327 break; 1328 case 14: 1329 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 1330 break; 1331 } 1332 WARN_ON(reduced_clock->p2 != clock->p2); 1333 1334 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 1335 intel_panel_use_ssc(dev_priv)) 1336 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 1337 else 1338 dpll |= PLL_REF_INPUT_DREFCLK; 1339 1340 return dpll; 1341 } 1342 1343 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, 1344 const struct dpll *clock, 1345 const struct dpll *reduced_clock) 1346 { 1347 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1348 int factor = ilk_fb_cb_factor(crtc_state); 1349 1350 hw_state->fp0 = ilk_dpll_compute_fp(clock, factor); 1351 hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor); 1352 1353 hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock); 1354 } 1355 1356 static int ilk_crtc_compute_clock(struct intel_atomic_state *state, 1357 struct intel_crtc *crtc) 1358 { 1359 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1360 struct intel_crtc_state *crtc_state = 1361 intel_atomic_get_new_crtc_state(state, crtc); 1362 const struct intel_limit *limit; 1363 int refclk = 120000; 1364 int ret; 1365 1366 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 1367 if (!crtc_state->has_pch_encoder) 1368 return 0; 1369 1370 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1371 if (intel_panel_use_ssc(dev_priv)) { 1372 drm_dbg_kms(&dev_priv->drm, 1373 "using SSC reference clock of %d kHz\n", 1374 dev_priv->display.vbt.lvds_ssc_freq); 1375 refclk = dev_priv->display.vbt.lvds_ssc_freq; 1376 } 1377 1378 if (intel_is_dual_link_lvds(dev_priv)) { 1379 if (refclk == 100000) 1380 limit = &ilk_limits_dual_lvds_100m; 1381 else 1382 limit = &ilk_limits_dual_lvds; 1383 } else { 1384 if (refclk == 100000) 1385 limit = &ilk_limits_single_lvds_100m; 1386 else 1387 limit = &ilk_limits_single_lvds; 1388 } 1389 } else { 1390 limit = &ilk_limits_dac; 1391 } 1392 1393 if (!crtc_state->clock_set && 1394 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1395 refclk, NULL, &crtc_state->dpll)) 1396 return -EINVAL; 1397 1398 i9xx_calc_dpll_params(refclk, &crtc_state->dpll); 1399 1400 ilk_compute_dpll(crtc_state, &crtc_state->dpll, 1401 &crtc_state->dpll); 1402 1403 ret = intel_compute_shared_dplls(state, crtc, NULL); 1404 if (ret) 1405 return ret; 1406 1407 crtc_state->port_clock = crtc_state->dpll.dot; 1408 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1409 1410 return ret; 1411 } 1412 1413 static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state, 1414 struct intel_crtc *crtc) 1415 { 1416 struct intel_crtc_state *crtc_state = 1417 intel_atomic_get_new_crtc_state(state, crtc); 1418 1419 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 1420 if (!crtc_state->has_pch_encoder) 1421 return 0; 1422 1423 return intel_reserve_shared_dplls(state, crtc, NULL); 1424 } 1425 1426 static u32 vlv_dpll(const struct intel_crtc_state *crtc_state) 1427 { 1428 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1429 u32 dpll; 1430 1431 dpll = DPLL_INTEGRATED_REF_CLK_VLV | 1432 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1433 1434 if (crtc->pipe != PIPE_A) 1435 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 1436 1437 /* DPLL not used with DSI, but still need the rest set up */ 1438 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1439 dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV; 1440 1441 return dpll; 1442 } 1443 1444 void vlv_compute_dpll(struct intel_crtc_state *crtc_state) 1445 { 1446 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1447 1448 hw_state->dpll = vlv_dpll(crtc_state); 1449 hw_state->dpll_md = i965_dpll_md(crtc_state); 1450 } 1451 1452 static u32 chv_dpll(const struct intel_crtc_state *crtc_state) 1453 { 1454 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1455 u32 dpll; 1456 1457 dpll = DPLL_SSC_REF_CLK_CHV | 1458 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1459 1460 if (crtc->pipe != PIPE_A) 1461 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 1462 1463 /* DPLL not used with DSI, but still need the rest set up */ 1464 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1465 dpll |= DPLL_VCO_ENABLE; 1466 1467 return dpll; 1468 } 1469 1470 void chv_compute_dpll(struct intel_crtc_state *crtc_state) 1471 { 1472 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1473 1474 hw_state->dpll = chv_dpll(crtc_state); 1475 hw_state->dpll_md = i965_dpll_md(crtc_state); 1476 } 1477 1478 static int chv_crtc_compute_clock(struct intel_atomic_state *state, 1479 struct intel_crtc *crtc) 1480 { 1481 struct intel_crtc_state *crtc_state = 1482 intel_atomic_get_new_crtc_state(state, crtc); 1483 const struct intel_limit *limit = &intel_limits_chv; 1484 int refclk = 100000; 1485 1486 if (!crtc_state->clock_set && 1487 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1488 refclk, NULL, &crtc_state->dpll)) 1489 return -EINVAL; 1490 1491 chv_calc_dpll_params(refclk, &crtc_state->dpll); 1492 1493 chv_compute_dpll(crtc_state); 1494 1495 /* FIXME this is a mess */ 1496 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1497 return 0; 1498 1499 crtc_state->port_clock = crtc_state->dpll.dot; 1500 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1501 1502 return 0; 1503 } 1504 1505 static int vlv_crtc_compute_clock(struct intel_atomic_state *state, 1506 struct intel_crtc *crtc) 1507 { 1508 struct intel_crtc_state *crtc_state = 1509 intel_atomic_get_new_crtc_state(state, crtc); 1510 const struct intel_limit *limit = &intel_limits_vlv; 1511 int refclk = 100000; 1512 1513 if (!crtc_state->clock_set && 1514 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1515 refclk, NULL, &crtc_state->dpll)) 1516 return -EINVAL; 1517 1518 vlv_calc_dpll_params(refclk, &crtc_state->dpll); 1519 1520 vlv_compute_dpll(crtc_state); 1521 1522 /* FIXME this is a mess */ 1523 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1524 return 0; 1525 1526 crtc_state->port_clock = crtc_state->dpll.dot; 1527 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1528 1529 return 0; 1530 } 1531 1532 static int g4x_crtc_compute_clock(struct intel_atomic_state *state, 1533 struct intel_crtc *crtc) 1534 { 1535 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1536 struct intel_crtc_state *crtc_state = 1537 intel_atomic_get_new_crtc_state(state, crtc); 1538 const struct intel_limit *limit; 1539 int refclk = 96000; 1540 1541 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1542 if (intel_panel_use_ssc(dev_priv)) { 1543 refclk = dev_priv->display.vbt.lvds_ssc_freq; 1544 drm_dbg_kms(&dev_priv->drm, 1545 "using SSC reference clock of %d kHz\n", 1546 refclk); 1547 } 1548 1549 if (intel_is_dual_link_lvds(dev_priv)) 1550 limit = &intel_limits_g4x_dual_channel_lvds; 1551 else 1552 limit = &intel_limits_g4x_single_channel_lvds; 1553 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 1554 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 1555 limit = &intel_limits_g4x_hdmi; 1556 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 1557 limit = &intel_limits_g4x_sdvo; 1558 } else { 1559 /* The option is for other outputs */ 1560 limit = &intel_limits_i9xx_sdvo; 1561 } 1562 1563 if (!crtc_state->clock_set && 1564 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1565 refclk, NULL, &crtc_state->dpll)) 1566 return -EINVAL; 1567 1568 i9xx_calc_dpll_params(refclk, &crtc_state->dpll); 1569 1570 i9xx_compute_dpll(crtc_state, &crtc_state->dpll, 1571 &crtc_state->dpll); 1572 1573 crtc_state->port_clock = crtc_state->dpll.dot; 1574 /* FIXME this is a mess */ 1575 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT)) 1576 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1577 1578 return 0; 1579 } 1580 1581 static int pnv_crtc_compute_clock(struct intel_atomic_state *state, 1582 struct intel_crtc *crtc) 1583 { 1584 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1585 struct intel_crtc_state *crtc_state = 1586 intel_atomic_get_new_crtc_state(state, crtc); 1587 const struct intel_limit *limit; 1588 int refclk = 96000; 1589 1590 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1591 if (intel_panel_use_ssc(dev_priv)) { 1592 refclk = dev_priv->display.vbt.lvds_ssc_freq; 1593 drm_dbg_kms(&dev_priv->drm, 1594 "using SSC reference clock of %d kHz\n", 1595 refclk); 1596 } 1597 1598 limit = &pnv_limits_lvds; 1599 } else { 1600 limit = &pnv_limits_sdvo; 1601 } 1602 1603 if (!crtc_state->clock_set && 1604 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1605 refclk, NULL, &crtc_state->dpll)) 1606 return -EINVAL; 1607 1608 pnv_calc_dpll_params(refclk, &crtc_state->dpll); 1609 1610 i9xx_compute_dpll(crtc_state, &crtc_state->dpll, 1611 &crtc_state->dpll); 1612 1613 crtc_state->port_clock = crtc_state->dpll.dot; 1614 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1615 1616 return 0; 1617 } 1618 1619 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, 1620 struct intel_crtc *crtc) 1621 { 1622 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1623 struct intel_crtc_state *crtc_state = 1624 intel_atomic_get_new_crtc_state(state, crtc); 1625 const struct intel_limit *limit; 1626 int refclk = 96000; 1627 1628 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1629 if (intel_panel_use_ssc(dev_priv)) { 1630 refclk = dev_priv->display.vbt.lvds_ssc_freq; 1631 drm_dbg_kms(&dev_priv->drm, 1632 "using SSC reference clock of %d kHz\n", 1633 refclk); 1634 } 1635 1636 limit = &intel_limits_i9xx_lvds; 1637 } else { 1638 limit = &intel_limits_i9xx_sdvo; 1639 } 1640 1641 if (!crtc_state->clock_set && 1642 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1643 refclk, NULL, &crtc_state->dpll)) 1644 return -EINVAL; 1645 1646 i9xx_calc_dpll_params(refclk, &crtc_state->dpll); 1647 1648 i9xx_compute_dpll(crtc_state, &crtc_state->dpll, 1649 &crtc_state->dpll); 1650 1651 crtc_state->port_clock = crtc_state->dpll.dot; 1652 /* FIXME this is a mess */ 1653 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT)) 1654 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1655 1656 return 0; 1657 } 1658 1659 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, 1660 struct intel_crtc *crtc) 1661 { 1662 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1663 struct intel_crtc_state *crtc_state = 1664 intel_atomic_get_new_crtc_state(state, crtc); 1665 const struct intel_limit *limit; 1666 int refclk = 48000; 1667 1668 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 1669 if (intel_panel_use_ssc(dev_priv)) { 1670 refclk = dev_priv->display.vbt.lvds_ssc_freq; 1671 drm_dbg_kms(&dev_priv->drm, 1672 "using SSC reference clock of %d kHz\n", 1673 refclk); 1674 } 1675 1676 limit = &intel_limits_i8xx_lvds; 1677 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 1678 limit = &intel_limits_i8xx_dvo; 1679 } else { 1680 limit = &intel_limits_i8xx_dac; 1681 } 1682 1683 if (!crtc_state->clock_set && 1684 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 1685 refclk, NULL, &crtc_state->dpll)) 1686 return -EINVAL; 1687 1688 i9xx_calc_dpll_params(refclk, &crtc_state->dpll); 1689 1690 i8xx_compute_dpll(crtc_state, &crtc_state->dpll, 1691 &crtc_state->dpll); 1692 1693 crtc_state->port_clock = crtc_state->dpll.dot; 1694 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1695 1696 return 0; 1697 } 1698 1699 static const struct intel_dpll_funcs mtl_dpll_funcs = { 1700 .crtc_compute_clock = mtl_crtc_compute_clock, 1701 }; 1702 1703 static const struct intel_dpll_funcs dg2_dpll_funcs = { 1704 .crtc_compute_clock = dg2_crtc_compute_clock, 1705 }; 1706 1707 static const struct intel_dpll_funcs hsw_dpll_funcs = { 1708 .crtc_compute_clock = hsw_crtc_compute_clock, 1709 .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll, 1710 }; 1711 1712 static const struct intel_dpll_funcs ilk_dpll_funcs = { 1713 .crtc_compute_clock = ilk_crtc_compute_clock, 1714 .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll, 1715 }; 1716 1717 static const struct intel_dpll_funcs chv_dpll_funcs = { 1718 .crtc_compute_clock = chv_crtc_compute_clock, 1719 }; 1720 1721 static const struct intel_dpll_funcs vlv_dpll_funcs = { 1722 .crtc_compute_clock = vlv_crtc_compute_clock, 1723 }; 1724 1725 static const struct intel_dpll_funcs g4x_dpll_funcs = { 1726 .crtc_compute_clock = g4x_crtc_compute_clock, 1727 }; 1728 1729 static const struct intel_dpll_funcs pnv_dpll_funcs = { 1730 .crtc_compute_clock = pnv_crtc_compute_clock, 1731 }; 1732 1733 static const struct intel_dpll_funcs i9xx_dpll_funcs = { 1734 .crtc_compute_clock = i9xx_crtc_compute_clock, 1735 }; 1736 1737 static const struct intel_dpll_funcs i8xx_dpll_funcs = { 1738 .crtc_compute_clock = i8xx_crtc_compute_clock, 1739 }; 1740 1741 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, 1742 struct intel_crtc *crtc) 1743 { 1744 struct drm_i915_private *i915 = to_i915(state->base.dev); 1745 struct intel_crtc_state *crtc_state = 1746 intel_atomic_get_new_crtc_state(state, crtc); 1747 int ret; 1748 1749 drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); 1750 1751 memset(&crtc_state->dpll_hw_state, 0, 1752 sizeof(crtc_state->dpll_hw_state)); 1753 1754 if (!crtc_state->hw.enable) 1755 return 0; 1756 1757 ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc); 1758 if (ret) { 1759 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n", 1760 crtc->base.base.id, crtc->base.name); 1761 return ret; 1762 } 1763 1764 return 0; 1765 } 1766 1767 int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, 1768 struct intel_crtc *crtc) 1769 { 1770 struct drm_i915_private *i915 = to_i915(state->base.dev); 1771 struct intel_crtc_state *crtc_state = 1772 intel_atomic_get_new_crtc_state(state, crtc); 1773 int ret; 1774 1775 drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); 1776 drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll); 1777 1778 if (!crtc_state->hw.enable || crtc_state->shared_dpll) 1779 return 0; 1780 1781 if (!i915->display.funcs.dpll->crtc_get_shared_dpll) 1782 return 0; 1783 1784 ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc); 1785 if (ret) { 1786 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n", 1787 crtc->base.base.id, crtc->base.name); 1788 return ret; 1789 } 1790 1791 return 0; 1792 } 1793 1794 void 1795 intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) 1796 { 1797 if (DISPLAY_VER(dev_priv) >= 14) 1798 dev_priv->display.funcs.dpll = &mtl_dpll_funcs; 1799 else if (IS_DG2(dev_priv)) 1800 dev_priv->display.funcs.dpll = &dg2_dpll_funcs; 1801 else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) 1802 dev_priv->display.funcs.dpll = &hsw_dpll_funcs; 1803 else if (HAS_PCH_SPLIT(dev_priv)) 1804 dev_priv->display.funcs.dpll = &ilk_dpll_funcs; 1805 else if (IS_CHERRYVIEW(dev_priv)) 1806 dev_priv->display.funcs.dpll = &chv_dpll_funcs; 1807 else if (IS_VALLEYVIEW(dev_priv)) 1808 dev_priv->display.funcs.dpll = &vlv_dpll_funcs; 1809 else if (IS_G4X(dev_priv)) 1810 dev_priv->display.funcs.dpll = &g4x_dpll_funcs; 1811 else if (IS_PINEVIEW(dev_priv)) 1812 dev_priv->display.funcs.dpll = &pnv_dpll_funcs; 1813 else if (DISPLAY_VER(dev_priv) != 2) 1814 dev_priv->display.funcs.dpll = &i9xx_dpll_funcs; 1815 else 1816 dev_priv->display.funcs.dpll = &i8xx_dpll_funcs; 1817 } 1818 1819 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1820 { 1821 if (IS_I830(dev_priv)) 1822 return false; 1823 1824 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1825 } 1826 1827 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) 1828 { 1829 struct intel_display *display = to_intel_display(crtc_state); 1830 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1831 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1832 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1833 enum pipe pipe = crtc->pipe; 1834 int i; 1835 1836 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 1837 1838 /* PLL is protected by panel, make sure we can write it */ 1839 if (i9xx_has_pps(dev_priv)) 1840 assert_pps_unlocked(display, pipe); 1841 1842 intel_de_write(dev_priv, FP0(pipe), hw_state->fp0); 1843 intel_de_write(dev_priv, FP1(pipe), hw_state->fp1); 1844 1845 /* 1846 * Apparently we need to have VGA mode enabled prior to changing 1847 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1848 * dividers, even though the register value does change. 1849 */ 1850 intel_de_write(dev_priv, DPLL(dev_priv, pipe), 1851 hw_state->dpll & ~DPLL_VGA_MODE_DIS); 1852 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); 1853 1854 /* Wait for the clocks to stabilize. */ 1855 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 1856 udelay(150); 1857 1858 if (DISPLAY_VER(dev_priv) >= 4) { 1859 intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), 1860 hw_state->dpll_md); 1861 } else { 1862 /* The pixel multiplier can only be updated once the 1863 * DPLL is enabled and the clocks are stable. 1864 * 1865 * So write it again. 1866 */ 1867 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); 1868 } 1869 1870 /* We do this three times for luck */ 1871 for (i = 0; i < 3; i++) { 1872 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); 1873 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 1874 udelay(150); /* wait for warmup */ 1875 } 1876 } 1877 1878 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, 1879 enum dpio_phy phy, enum dpio_channel ch) 1880 { 1881 u32 tmp; 1882 1883 /* 1884 * PLLB opamp always calibrates to max value of 0x3f, force enable it 1885 * and set it to a reasonable value instead. 1886 */ 1887 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch)); 1888 tmp &= 0xffffff00; 1889 tmp |= 0x00000030; 1890 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp); 1891 1892 tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11); 1893 tmp &= 0x00ffffff; 1894 tmp |= 0x8c000000; 1895 vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp); 1896 1897 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch)); 1898 tmp &= 0xffffff00; 1899 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp); 1900 1901 tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11); 1902 tmp &= 0x00ffffff; 1903 tmp |= 0xb0000000; 1904 vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp); 1905 } 1906 1907 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) 1908 { 1909 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1910 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1911 const struct dpll *clock = &crtc_state->dpll; 1912 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); 1913 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 1914 enum pipe pipe = crtc->pipe; 1915 u32 tmp, coreclk; 1916 1917 vlv_dpio_get(dev_priv); 1918 1919 /* See eDP HDMI DPIO driver vbios notes doc */ 1920 1921 /* PLL B needs special handling */ 1922 if (pipe == PIPE_B) 1923 vlv_pllb_recal_opamp(dev_priv, phy, ch); 1924 1925 /* Set up Tx target for periodic Rcomp update */ 1926 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f); 1927 1928 /* Disable target IRef on PLL */ 1929 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch)); 1930 tmp &= 0x00ffffff; 1931 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp); 1932 1933 /* Disable fast lock */ 1934 vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610); 1935 1936 /* Set idtafcrecal before PLL is enabled */ 1937 tmp = DPIO_M1_DIV(clock->m1) | 1938 DPIO_M2_DIV(clock->m2) | 1939 DPIO_P1_DIV(clock->p1) | 1940 DPIO_P2_DIV(clock->p2) | 1941 DPIO_N_DIV(clock->n) | 1942 DPIO_K_DIV(1); 1943 1944 /* 1945 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 1946 * but we don't support that). 1947 * Note: don't use the DAC post divider as it seems unstable. 1948 */ 1949 tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP); 1950 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp); 1951 1952 tmp |= DPIO_ENABLE_CALIBRATION; 1953 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp); 1954 1955 /* Set HBR and RBR LPF coefficients */ 1956 if (crtc_state->port_clock == 162000 || 1957 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) || 1958 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1959 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch), 1960 0x009f0003); 1961 else 1962 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch), 1963 0x00d0000f); 1964 1965 if (intel_crtc_has_dp_encoder(crtc_state)) { 1966 /* Use SSC source */ 1967 if (pipe == PIPE_A) 1968 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 1969 0x0df40000); 1970 else 1971 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 1972 0x0df70000); 1973 } else { /* HDMI or VGA */ 1974 /* Use bend source */ 1975 if (pipe == PIPE_A) 1976 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 1977 0x0df70000); 1978 else 1979 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 1980 0x0df40000); 1981 } 1982 1983 coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch)); 1984 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 1985 if (intel_crtc_has_dp_encoder(crtc_state)) 1986 coreclk |= 0x01000000; 1987 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk); 1988 1989 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000); 1990 1991 vlv_dpio_put(dev_priv); 1992 } 1993 1994 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state) 1995 { 1996 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1997 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1998 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 1999 enum pipe pipe = crtc->pipe; 2000 2001 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); 2002 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 2003 udelay(150); 2004 2005 if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1)) 2006 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); 2007 } 2008 2009 void vlv_enable_pll(const struct intel_crtc_state *crtc_state) 2010 { 2011 struct intel_display *display = to_intel_display(crtc_state); 2012 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2013 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2014 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 2015 enum pipe pipe = crtc->pipe; 2016 2017 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 2018 2019 /* PLL is protected by panel, make sure we can write it */ 2020 assert_pps_unlocked(display, pipe); 2021 2022 /* Enable Refclk */ 2023 intel_de_write(dev_priv, DPLL(dev_priv, pipe), 2024 hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 2025 2026 if (hw_state->dpll & DPLL_VCO_ENABLE) { 2027 vlv_prepare_pll(crtc_state); 2028 _vlv_enable_pll(crtc_state); 2029 } 2030 2031 intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), hw_state->dpll_md); 2032 intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe)); 2033 } 2034 2035 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) 2036 { 2037 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2038 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2039 const struct dpll *clock = &crtc_state->dpll; 2040 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); 2041 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 2042 u32 tmp, loopfilter, tribuf_calcntr; 2043 u32 m2_frac; 2044 2045 m2_frac = clock->m2 & 0x3fffff; 2046 2047 vlv_dpio_get(dev_priv); 2048 2049 /* p1 and p2 divider */ 2050 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch), 2051 DPIO_CHV_S1_DIV(5) | 2052 DPIO_CHV_P1_DIV(clock->p1) | 2053 DPIO_CHV_P2_DIV(clock->p2) | 2054 DPIO_CHV_K_DIV(1)); 2055 2056 /* Feedback post-divider - m2 */ 2057 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch), 2058 DPIO_CHV_M2_DIV(clock->m2 >> 22)); 2059 2060 /* Feedback refclk divider - n and m1 */ 2061 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch), 2062 DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) | 2063 DPIO_CHV_N_DIV(1)); 2064 2065 /* M2 fraction division */ 2066 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch), 2067 DPIO_CHV_M2_FRAC_DIV(m2_frac)); 2068 2069 /* M2 fraction division enable */ 2070 tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch)); 2071 tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 2072 tmp |= DPIO_CHV_FEEDFWD_GAIN(2); 2073 if (m2_frac) 2074 tmp |= DPIO_CHV_FRAC_DIV_EN; 2075 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp); 2076 2077 /* Program digital lock detect threshold */ 2078 tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch)); 2079 tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 2080 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 2081 tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5); 2082 if (!m2_frac) 2083 tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 2084 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp); 2085 2086 /* Loop filter */ 2087 if (clock->vco == 5400000) { 2088 loopfilter = DPIO_CHV_PROP_COEFF(0x3) | 2089 DPIO_CHV_INT_COEFF(0x8) | 2090 DPIO_CHV_GAIN_CTRL(0x1); 2091 tribuf_calcntr = 0x9; 2092 } else if (clock->vco <= 6200000) { 2093 loopfilter = DPIO_CHV_PROP_COEFF(0x5) | 2094 DPIO_CHV_INT_COEFF(0xB) | 2095 DPIO_CHV_GAIN_CTRL(0x3); 2096 tribuf_calcntr = 0x9; 2097 } else if (clock->vco <= 6480000) { 2098 loopfilter = DPIO_CHV_PROP_COEFF(0x4) | 2099 DPIO_CHV_INT_COEFF(0x9) | 2100 DPIO_CHV_GAIN_CTRL(0x3); 2101 tribuf_calcntr = 0x8; 2102 } else { 2103 /* Not supported. Apply the same limits as in the max case */ 2104 loopfilter = DPIO_CHV_PROP_COEFF(0x4) | 2105 DPIO_CHV_INT_COEFF(0x9) | 2106 DPIO_CHV_GAIN_CTRL(0x3); 2107 tribuf_calcntr = 0; 2108 } 2109 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter); 2110 2111 tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch)); 2112 tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 2113 tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr); 2114 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp); 2115 2116 /* AFC Recal */ 2117 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), 2118 vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) | 2119 DPIO_AFC_RECAL); 2120 2121 vlv_dpio_put(dev_priv); 2122 } 2123 2124 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) 2125 { 2126 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2127 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2128 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 2129 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); 2130 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 2131 enum pipe pipe = crtc->pipe; 2132 u32 tmp; 2133 2134 vlv_dpio_get(dev_priv); 2135 2136 /* Enable back the 10bit clock to display controller */ 2137 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)); 2138 tmp |= DPIO_DCLKP_EN; 2139 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp); 2140 2141 vlv_dpio_put(dev_priv); 2142 2143 /* 2144 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 2145 */ 2146 udelay(1); 2147 2148 /* Enable PLL */ 2149 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); 2150 2151 /* Check PLL is locked */ 2152 if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1)) 2153 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); 2154 } 2155 2156 void chv_enable_pll(const struct intel_crtc_state *crtc_state) 2157 { 2158 struct intel_display *display = to_intel_display(crtc_state); 2159 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2160 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2161 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; 2162 enum pipe pipe = crtc->pipe; 2163 2164 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 2165 2166 /* PLL is protected by panel, make sure we can write it */ 2167 assert_pps_unlocked(display, pipe); 2168 2169 /* Enable Refclk and SSC */ 2170 intel_de_write(dev_priv, DPLL(dev_priv, pipe), 2171 hw_state->dpll & ~DPLL_VCO_ENABLE); 2172 2173 if (hw_state->dpll & DPLL_VCO_ENABLE) { 2174 chv_prepare_pll(crtc_state); 2175 _chv_enable_pll(crtc_state); 2176 } 2177 2178 if (pipe != PIPE_A) { 2179 /* 2180 * WaPixelRepeatModeFixForC0:chv 2181 * 2182 * DPLLCMD is AWOL. Use chicken bits to propagate 2183 * the value from DPLLBMD to either pipe B or C. 2184 */ 2185 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 2186 intel_de_write(dev_priv, DPLL_MD(dev_priv, PIPE_B), 2187 hw_state->dpll_md); 2188 intel_de_write(dev_priv, CBR4_VLV, 0); 2189 dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md; 2190 2191 /* 2192 * DPLLB VGA mode also seems to cause problems. 2193 * We should always have it disabled. 2194 */ 2195 drm_WARN_ON(&dev_priv->drm, 2196 (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) & 2197 DPLL_VGA_MODE_DIS) == 0); 2198 } else { 2199 intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), 2200 hw_state->dpll_md); 2201 intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe)); 2202 } 2203 } 2204 2205 /** 2206 * vlv_force_pll_on - forcibly enable just the PLL 2207 * @dev_priv: i915 private structure 2208 * @pipe: pipe PLL to enable 2209 * @dpll: PLL configuration 2210 * 2211 * Enable the PLL for @pipe using the supplied @dpll config. To be used 2212 * in cases where we need the PLL enabled even when @pipe is not going to 2213 * be enabled. 2214 */ 2215 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 2216 const struct dpll *dpll) 2217 { 2218 struct intel_display *display = &dev_priv->display; 2219 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 2220 struct intel_crtc_state *crtc_state; 2221 2222 crtc_state = intel_crtc_state_alloc(crtc); 2223 if (!crtc_state) 2224 return -ENOMEM; 2225 2226 crtc_state->cpu_transcoder = (enum transcoder)pipe; 2227 crtc_state->pixel_multiplier = 1; 2228 crtc_state->dpll = *dpll; 2229 crtc_state->output_types = BIT(INTEL_OUTPUT_EDP); 2230 2231 if (IS_CHERRYVIEW(dev_priv)) { 2232 chv_compute_dpll(crtc_state); 2233 chv_enable_pll(crtc_state); 2234 } else { 2235 vlv_compute_dpll(crtc_state); 2236 vlv_enable_pll(crtc_state); 2237 } 2238 2239 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); 2240 2241 return 0; 2242 } 2243 2244 void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 2245 { 2246 u32 val; 2247 2248 /* Make sure the pipe isn't still relying on us */ 2249 assert_transcoder_disabled(dev_priv, (enum transcoder)pipe); 2250 2251 val = DPLL_INTEGRATED_REF_CLK_VLV | 2252 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 2253 if (pipe != PIPE_A) 2254 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 2255 2256 intel_de_write(dev_priv, DPLL(dev_priv, pipe), val); 2257 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 2258 } 2259 2260 void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 2261 { 2262 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 2263 enum dpio_phy phy = vlv_pipe_to_phy(pipe); 2264 u32 val; 2265 2266 /* Make sure the pipe isn't still relying on us */ 2267 assert_transcoder_disabled(dev_priv, (enum transcoder)pipe); 2268 2269 val = DPLL_SSC_REF_CLK_CHV | 2270 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 2271 if (pipe != PIPE_A) 2272 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 2273 2274 intel_de_write(dev_priv, DPLL(dev_priv, pipe), val); 2275 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 2276 2277 vlv_dpio_get(dev_priv); 2278 2279 /* Disable 10bit clock to display controller */ 2280 val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)); 2281 val &= ~DPIO_DCLKP_EN; 2282 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val); 2283 2284 vlv_dpio_put(dev_priv); 2285 } 2286 2287 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 2288 { 2289 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2290 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2291 enum pipe pipe = crtc->pipe; 2292 2293 /* Don't disable pipe or pipe PLLs if needed */ 2294 if (IS_I830(dev_priv)) 2295 return; 2296 2297 /* Make sure the pipe isn't still relying on us */ 2298 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 2299 2300 intel_de_write(dev_priv, DPLL(dev_priv, pipe), DPLL_VGA_MODE_DIS); 2301 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); 2302 } 2303 2304 2305 /** 2306 * vlv_force_pll_off - forcibly disable just the PLL 2307 * @dev_priv: i915 private structure 2308 * @pipe: pipe PLL to disable 2309 * 2310 * Disable the PLL for @pipe. To be used in cases where we need 2311 * the PLL enabled even when @pipe is not going to be enabled. 2312 */ 2313 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 2314 { 2315 if (IS_CHERRYVIEW(dev_priv)) 2316 chv_disable_pll(dev_priv, pipe); 2317 else 2318 vlv_disable_pll(dev_priv, pipe); 2319 } 2320 2321 /* Only for pre-ILK configs */ 2322 static void assert_pll(struct drm_i915_private *dev_priv, 2323 enum pipe pipe, bool state) 2324 { 2325 bool cur_state; 2326 2327 cur_state = intel_de_read(dev_priv, DPLL(dev_priv, pipe)) & DPLL_VCO_ENABLE; 2328 I915_STATE_WARN(dev_priv, cur_state != state, 2329 "PLL state assertion failure (expected %s, current %s)\n", 2330 str_on_off(state), str_on_off(cur_state)); 2331 } 2332 2333 void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) 2334 { 2335 assert_pll(i915, pipe, true); 2336 } 2337 2338 void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe) 2339 { 2340 assert_pll(i915, pipe, false); 2341 } 2342