1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_reg.h" 8 #include "intel_de.h" 9 #include "intel_display_types.h" 10 #include "intel_panel.h" 11 #include "intel_pch_refclk.h" 12 #include "intel_sbi.h" 13 14 static void lpt_fdi_reset_mphy(struct intel_display *display) 15 { 16 intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL); 17 18 if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) & 19 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 20 drm_err(display->drm, "FDI mPHY reset assert timeout\n"); 21 22 intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0); 23 24 if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) & 25 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 26 drm_err(display->drm, "FDI mPHY reset de-assert timeout\n"); 27 } 28 29 /* WaMPhyProgramming:hsw */ 30 static void lpt_fdi_program_mphy(struct intel_display *display) 31 { 32 struct drm_i915_private *dev_priv = to_i915(display->drm); 33 u32 tmp; 34 35 lpt_fdi_reset_mphy(display); 36 37 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 38 tmp &= ~(0xFF << 24); 39 tmp |= (0x12 << 24); 40 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 41 42 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 43 tmp |= (1 << 11); 44 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 45 46 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 47 tmp |= (1 << 11); 48 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 49 50 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 51 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 52 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 53 54 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 55 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 56 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 57 58 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 59 tmp &= ~(7 << 13); 60 tmp |= (5 << 13); 61 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 62 63 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 64 tmp &= ~(7 << 13); 65 tmp |= (5 << 13); 66 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 67 68 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 69 tmp &= ~0xFF; 70 tmp |= 0x1C; 71 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 72 73 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 74 tmp &= ~0xFF; 75 tmp |= 0x1C; 76 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 77 78 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 79 tmp &= ~(0xFF << 16); 80 tmp |= (0x1C << 16); 81 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 82 83 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 84 tmp &= ~(0xFF << 16); 85 tmp |= (0x1C << 16); 86 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 87 88 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 89 tmp |= (1 << 27); 90 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 91 92 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 93 tmp |= (1 << 27); 94 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 95 96 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 97 tmp &= ~(0xF << 28); 98 tmp |= (4 << 28); 99 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 100 101 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 102 tmp &= ~(0xF << 28); 103 tmp |= (4 << 28); 104 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 105 } 106 107 void lpt_disable_iclkip(struct intel_display *display) 108 { 109 struct drm_i915_private *dev_priv = to_i915(display->drm); 110 u32 temp; 111 112 intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_GATE); 113 114 intel_sbi_lock(dev_priv); 115 116 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 117 temp |= SBI_SSCCTL_DISABLE; 118 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 119 120 intel_sbi_unlock(dev_priv); 121 } 122 123 struct iclkip_params { 124 u32 iclk_virtual_root_freq; 125 u32 iclk_pi_range; 126 u32 divsel, phaseinc, auxdiv, phasedir, desired_divisor; 127 }; 128 129 static void iclkip_params_init(struct iclkip_params *p) 130 { 131 memset(p, 0, sizeof(*p)); 132 133 p->iclk_virtual_root_freq = 172800 * 1000; 134 p->iclk_pi_range = 64; 135 } 136 137 static int lpt_iclkip_freq(struct iclkip_params *p) 138 { 139 return DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq, 140 p->desired_divisor << p->auxdiv); 141 } 142 143 static void lpt_compute_iclkip(struct iclkip_params *p, int clock) 144 { 145 iclkip_params_init(p); 146 147 /* The iCLK virtual clock root frequency is in MHz, 148 * but the adjusted_mode->crtc_clock in KHz. To get the 149 * divisors, it is necessary to divide one by another, so we 150 * convert the virtual clock precision to KHz here for higher 151 * precision. 152 */ 153 for (p->auxdiv = 0; p->auxdiv < 2; p->auxdiv++) { 154 p->desired_divisor = DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq, 155 clock << p->auxdiv); 156 p->divsel = (p->desired_divisor / p->iclk_pi_range) - 2; 157 p->phaseinc = p->desired_divisor % p->iclk_pi_range; 158 159 /* 160 * Near 20MHz is a corner case which is 161 * out of range for the 7-bit divisor 162 */ 163 if (p->divsel <= 0x7f) 164 break; 165 } 166 } 167 168 int lpt_iclkip(const struct intel_crtc_state *crtc_state) 169 { 170 struct iclkip_params p; 171 172 lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock); 173 174 return lpt_iclkip_freq(&p); 175 } 176 177 /* Program iCLKIP clock to the desired frequency */ 178 void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 179 { 180 struct intel_display *display = to_intel_display(crtc_state); 181 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 182 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 183 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 184 struct iclkip_params p; 185 u32 temp; 186 187 lpt_disable_iclkip(display); 188 189 lpt_compute_iclkip(&p, clock); 190 drm_WARN_ON(display->drm, lpt_iclkip_freq(&p) != clock); 191 192 /* This should not happen with any sane values */ 193 drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) & 194 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 195 drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) & 196 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 197 198 drm_dbg_kms(display->drm, 199 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 200 clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc); 201 202 intel_sbi_lock(dev_priv); 203 204 /* Program SSCDIVINTPHASE6 */ 205 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 206 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 207 temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel); 208 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 209 temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc); 210 temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir); 211 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 212 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 213 214 /* Program SSCAUXDIV */ 215 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 216 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 217 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv); 218 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 219 220 /* Enable modulator and associated divider */ 221 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 222 temp &= ~SBI_SSCCTL_DISABLE; 223 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 224 225 intel_sbi_unlock(dev_priv); 226 227 /* Wait for initialization time */ 228 udelay(24); 229 230 intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_UNGATE); 231 } 232 233 int lpt_get_iclkip(struct intel_display *display) 234 { 235 struct drm_i915_private *dev_priv = to_i915(display->drm); 236 struct iclkip_params p; 237 u32 temp; 238 239 if ((intel_de_read(display, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 240 return 0; 241 242 iclkip_params_init(&p); 243 244 intel_sbi_lock(dev_priv); 245 246 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 247 if (temp & SBI_SSCCTL_DISABLE) { 248 intel_sbi_unlock(dev_priv); 249 return 0; 250 } 251 252 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 253 p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 254 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 255 p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 256 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 257 258 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 259 p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 260 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 261 262 intel_sbi_unlock(dev_priv); 263 264 p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc; 265 266 return lpt_iclkip_freq(&p); 267 } 268 269 /* Implements 3 different sequences from BSpec chapter "Display iCLK 270 * Programming" based on the parameters passed: 271 * - Sequence to enable CLKOUT_DP 272 * - Sequence to enable CLKOUT_DP without spread 273 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 274 */ 275 static void lpt_enable_clkout_dp(struct intel_display *display, 276 bool with_spread, bool with_fdi) 277 { 278 struct drm_i915_private *dev_priv = to_i915(display->drm); 279 u32 reg, tmp; 280 281 if (drm_WARN(display->drm, with_fdi && !with_spread, 282 "FDI requires downspread\n")) 283 with_spread = true; 284 if (drm_WARN(display->drm, HAS_PCH_LPT_LP(dev_priv) && 285 with_fdi, "LP PCH doesn't have FDI\n")) 286 with_fdi = false; 287 288 intel_sbi_lock(dev_priv); 289 290 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 291 tmp &= ~SBI_SSCCTL_DISABLE; 292 tmp |= SBI_SSCCTL_PATHALT; 293 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 294 295 udelay(24); 296 297 if (with_spread) { 298 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 299 tmp &= ~SBI_SSCCTL_PATHALT; 300 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 301 302 if (with_fdi) 303 lpt_fdi_program_mphy(display); 304 } 305 306 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 307 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 308 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 309 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 310 311 intel_sbi_unlock(dev_priv); 312 } 313 314 /* Sequence to disable CLKOUT_DP */ 315 void lpt_disable_clkout_dp(struct intel_display *display) 316 { 317 struct drm_i915_private *dev_priv = to_i915(display->drm); 318 u32 reg, tmp; 319 320 intel_sbi_lock(dev_priv); 321 322 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 323 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 324 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 325 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 326 327 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 328 if (!(tmp & SBI_SSCCTL_DISABLE)) { 329 if (!(tmp & SBI_SSCCTL_PATHALT)) { 330 tmp |= SBI_SSCCTL_PATHALT; 331 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 332 udelay(32); 333 } 334 tmp |= SBI_SSCCTL_DISABLE; 335 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 336 } 337 338 intel_sbi_unlock(dev_priv); 339 } 340 341 #define BEND_IDX(steps) ((50 + (steps)) / 5) 342 343 static const u16 sscdivintphase[] = { 344 [BEND_IDX( 50)] = 0x3B23, 345 [BEND_IDX( 45)] = 0x3B23, 346 [BEND_IDX( 40)] = 0x3C23, 347 [BEND_IDX( 35)] = 0x3C23, 348 [BEND_IDX( 30)] = 0x3D23, 349 [BEND_IDX( 25)] = 0x3D23, 350 [BEND_IDX( 20)] = 0x3E23, 351 [BEND_IDX( 15)] = 0x3E23, 352 [BEND_IDX( 10)] = 0x3F23, 353 [BEND_IDX( 5)] = 0x3F23, 354 [BEND_IDX( 0)] = 0x0025, 355 [BEND_IDX( -5)] = 0x0025, 356 [BEND_IDX(-10)] = 0x0125, 357 [BEND_IDX(-15)] = 0x0125, 358 [BEND_IDX(-20)] = 0x0225, 359 [BEND_IDX(-25)] = 0x0225, 360 [BEND_IDX(-30)] = 0x0325, 361 [BEND_IDX(-35)] = 0x0325, 362 [BEND_IDX(-40)] = 0x0425, 363 [BEND_IDX(-45)] = 0x0425, 364 [BEND_IDX(-50)] = 0x0525, 365 }; 366 367 /* 368 * Bend CLKOUT_DP 369 * steps -50 to 50 inclusive, in steps of 5 370 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 371 * change in clock period = -(steps / 10) * 5.787 ps 372 */ 373 static void lpt_bend_clkout_dp(struct intel_display *display, int steps) 374 { 375 struct drm_i915_private *dev_priv = to_i915(display->drm); 376 u32 tmp; 377 int idx = BEND_IDX(steps); 378 379 if (drm_WARN_ON(display->drm, steps % 5 != 0)) 380 return; 381 382 if (drm_WARN_ON(display->drm, idx >= ARRAY_SIZE(sscdivintphase))) 383 return; 384 385 intel_sbi_lock(dev_priv); 386 387 if (steps % 10 != 0) 388 tmp = 0xAAAAAAAB; 389 else 390 tmp = 0x00000000; 391 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 392 393 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 394 tmp &= 0xffff0000; 395 tmp |= sscdivintphase[idx]; 396 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 397 398 intel_sbi_unlock(dev_priv); 399 } 400 401 #undef BEND_IDX 402 403 static bool spll_uses_pch_ssc(struct intel_display *display) 404 { 405 u32 fuse_strap = intel_de_read(display, FUSE_STRAP); 406 u32 ctl = intel_de_read(display, SPLL_CTL); 407 408 if ((ctl & SPLL_PLL_ENABLE) == 0) 409 return false; 410 411 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 412 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 413 return true; 414 415 if (display->platform.broadwell && 416 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 417 return true; 418 419 return false; 420 } 421 422 static bool wrpll_uses_pch_ssc(struct intel_display *display, enum intel_dpll_id id) 423 { 424 u32 fuse_strap = intel_de_read(display, FUSE_STRAP); 425 u32 ctl = intel_de_read(display, WRPLL_CTL(id)); 426 427 if ((ctl & WRPLL_PLL_ENABLE) == 0) 428 return false; 429 430 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 431 return true; 432 433 if ((display->platform.broadwell || display->platform.haswell_ult) && 434 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 435 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 436 return true; 437 438 return false; 439 } 440 441 static void lpt_init_pch_refclk(struct intel_display *display) 442 { 443 struct intel_encoder *encoder; 444 bool has_fdi = false; 445 446 for_each_intel_encoder(display->drm, encoder) { 447 switch (encoder->type) { 448 case INTEL_OUTPUT_ANALOG: 449 has_fdi = true; 450 break; 451 default: 452 break; 453 } 454 } 455 456 /* 457 * The BIOS may have decided to use the PCH SSC 458 * reference so we must not disable it until the 459 * relevant PLLs have stopped relying on it. We'll 460 * just leave the PCH SSC reference enabled in case 461 * any active PLL is using it. It will get disabled 462 * after runtime suspend if we don't have FDI. 463 * 464 * TODO: Move the whole reference clock handling 465 * to the modeset sequence proper so that we can 466 * actually enable/disable/reconfigure these things 467 * safely. To do that we need to introduce a real 468 * clock hierarchy. That would also allow us to do 469 * clock bending finally. 470 */ 471 display->dpll.pch_ssc_use = 0; 472 473 if (spll_uses_pch_ssc(display)) { 474 drm_dbg_kms(display->drm, "SPLL using PCH SSC\n"); 475 display->dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL); 476 } 477 478 if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL1)) { 479 drm_dbg_kms(display->drm, "WRPLL1 using PCH SSC\n"); 480 display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 481 } 482 483 if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL2)) { 484 drm_dbg_kms(display->drm, "WRPLL2 using PCH SSC\n"); 485 display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 486 } 487 488 if (display->dpll.pch_ssc_use) 489 return; 490 491 if (has_fdi) { 492 lpt_bend_clkout_dp(display, 0); 493 lpt_enable_clkout_dp(display, true, true); 494 } else { 495 lpt_disable_clkout_dp(display); 496 } 497 } 498 499 static void ilk_init_pch_refclk(struct intel_display *display) 500 { 501 struct drm_i915_private *dev_priv = to_i915(display->drm); 502 struct intel_encoder *encoder; 503 struct intel_shared_dpll *pll; 504 int i; 505 u32 val, final; 506 bool has_lvds = false; 507 bool has_cpu_edp = false; 508 bool has_panel = false; 509 bool has_ck505 = false; 510 bool can_ssc = false; 511 bool using_ssc_source = false; 512 513 /* We need to take the global config into account */ 514 for_each_intel_encoder(display->drm, encoder) { 515 switch (encoder->type) { 516 case INTEL_OUTPUT_LVDS: 517 has_panel = true; 518 has_lvds = true; 519 break; 520 case INTEL_OUTPUT_EDP: 521 has_panel = true; 522 if (encoder->port == PORT_A) 523 has_cpu_edp = true; 524 break; 525 default: 526 break; 527 } 528 } 529 530 if (HAS_PCH_IBX(dev_priv)) { 531 has_ck505 = display->vbt.display_clock_mode; 532 can_ssc = has_ck505; 533 } else { 534 has_ck505 = false; 535 can_ssc = true; 536 } 537 538 /* Check if any DPLLs are using the SSC source */ 539 for_each_shared_dpll(display, pll, i) { 540 u32 temp; 541 542 temp = intel_de_read(display, PCH_DPLL(pll->info->id)); 543 544 if (!(temp & DPLL_VCO_ENABLE)) 545 continue; 546 547 if ((temp & PLL_REF_INPUT_MASK) == 548 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 549 using_ssc_source = true; 550 break; 551 } 552 } 553 554 drm_dbg_kms(display->drm, 555 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 556 has_panel, has_lvds, has_ck505, using_ssc_source); 557 558 /* Ironlake: try to setup display ref clock before DPLL 559 * enabling. This is only under driver's control after 560 * PCH B stepping, previous chipset stepping should be 561 * ignoring this setting. 562 */ 563 val = intel_de_read(display, PCH_DREF_CONTROL); 564 565 /* As we must carefully and slowly disable/enable each source in turn, 566 * compute the final state we want first and check if we need to 567 * make any changes at all. 568 */ 569 final = val; 570 final &= ~DREF_NONSPREAD_SOURCE_MASK; 571 if (has_ck505) 572 final |= DREF_NONSPREAD_CK505_ENABLE; 573 else 574 final |= DREF_NONSPREAD_SOURCE_ENABLE; 575 576 final &= ~DREF_SSC_SOURCE_MASK; 577 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 578 final &= ~DREF_SSC1_ENABLE; 579 580 if (has_panel) { 581 final |= DREF_SSC_SOURCE_ENABLE; 582 583 if (intel_panel_use_ssc(display) && can_ssc) 584 final |= DREF_SSC1_ENABLE; 585 586 if (has_cpu_edp) { 587 if (intel_panel_use_ssc(display) && can_ssc) 588 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 589 else 590 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 591 } else { 592 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 593 } 594 } else if (using_ssc_source) { 595 final |= DREF_SSC_SOURCE_ENABLE; 596 final |= DREF_SSC1_ENABLE; 597 } 598 599 if (final == val) 600 return; 601 602 /* Always enable nonspread source */ 603 val &= ~DREF_NONSPREAD_SOURCE_MASK; 604 605 if (has_ck505) 606 val |= DREF_NONSPREAD_CK505_ENABLE; 607 else 608 val |= DREF_NONSPREAD_SOURCE_ENABLE; 609 610 if (has_panel) { 611 val &= ~DREF_SSC_SOURCE_MASK; 612 val |= DREF_SSC_SOURCE_ENABLE; 613 614 /* SSC must be turned on before enabling the CPU output */ 615 if (intel_panel_use_ssc(display) && can_ssc) { 616 drm_dbg_kms(display->drm, "Using SSC on panel\n"); 617 val |= DREF_SSC1_ENABLE; 618 } else { 619 val &= ~DREF_SSC1_ENABLE; 620 } 621 622 /* Get SSC going before enabling the outputs */ 623 intel_de_write(display, PCH_DREF_CONTROL, val); 624 intel_de_posting_read(display, PCH_DREF_CONTROL); 625 udelay(200); 626 627 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 628 629 /* Enable CPU source on CPU attached eDP */ 630 if (has_cpu_edp) { 631 if (intel_panel_use_ssc(display) && can_ssc) { 632 drm_dbg_kms(display->drm, 633 "Using SSC on eDP\n"); 634 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 635 } else { 636 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 637 } 638 } else { 639 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 640 } 641 642 intel_de_write(display, PCH_DREF_CONTROL, val); 643 intel_de_posting_read(display, PCH_DREF_CONTROL); 644 udelay(200); 645 } else { 646 drm_dbg_kms(display->drm, "Disabling CPU source output\n"); 647 648 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 649 650 /* Turn off CPU output */ 651 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 652 653 intel_de_write(display, PCH_DREF_CONTROL, val); 654 intel_de_posting_read(display, PCH_DREF_CONTROL); 655 udelay(200); 656 657 if (!using_ssc_source) { 658 drm_dbg_kms(display->drm, "Disabling SSC source\n"); 659 660 /* Turn off the SSC source */ 661 val &= ~DREF_SSC_SOURCE_MASK; 662 val |= DREF_SSC_SOURCE_DISABLE; 663 664 /* Turn off SSC1 */ 665 val &= ~DREF_SSC1_ENABLE; 666 667 intel_de_write(display, PCH_DREF_CONTROL, val); 668 intel_de_posting_read(display, PCH_DREF_CONTROL); 669 udelay(200); 670 } 671 } 672 673 drm_WARN_ON(display->drm, val != final); 674 } 675 676 /* 677 * Initialize reference clocks when the driver loads 678 */ 679 void intel_init_pch_refclk(struct intel_display *display) 680 { 681 struct drm_i915_private *dev_priv = to_i915(display->drm); 682 683 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 684 ilk_init_pch_refclk(display); 685 else if (HAS_PCH_LPT(dev_priv)) 686 lpt_init_pch_refclk(display); 687 } 688