xref: /linux/drivers/gpu/drm/i915/display/intel_pch_refclk.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "i915_reg.h"
7 #include "intel_de.h"
8 #include "intel_display_types.h"
9 #include "intel_panel.h"
10 #include "intel_pch_refclk.h"
11 #include "intel_sbi.h"
12 
13 static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
14 {
15 	intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
16 
17 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
18 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
19 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
20 
21 	intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
22 
23 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
24 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
25 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
26 }
27 
28 /* WaMPhyProgramming:hsw */
29 static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
30 {
31 	u32 tmp;
32 
33 	lpt_fdi_reset_mphy(dev_priv);
34 
35 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
36 	tmp &= ~(0xFF << 24);
37 	tmp |= (0x12 << 24);
38 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
39 
40 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
41 	tmp |= (1 << 11);
42 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
43 
44 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
45 	tmp |= (1 << 11);
46 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
47 
48 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
49 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
50 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
51 
52 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
53 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
54 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
55 
56 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
57 	tmp &= ~(7 << 13);
58 	tmp |= (5 << 13);
59 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
60 
61 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
62 	tmp &= ~(7 << 13);
63 	tmp |= (5 << 13);
64 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
65 
66 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
67 	tmp &= ~0xFF;
68 	tmp |= 0x1C;
69 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
70 
71 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
72 	tmp &= ~0xFF;
73 	tmp |= 0x1C;
74 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
75 
76 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
77 	tmp &= ~(0xFF << 16);
78 	tmp |= (0x1C << 16);
79 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
80 
81 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
82 	tmp &= ~(0xFF << 16);
83 	tmp |= (0x1C << 16);
84 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
85 
86 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
87 	tmp |= (1 << 27);
88 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
89 
90 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
91 	tmp |= (1 << 27);
92 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
93 
94 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
95 	tmp &= ~(0xF << 28);
96 	tmp |= (4 << 28);
97 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
98 
99 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
100 	tmp &= ~(0xF << 28);
101 	tmp |= (4 << 28);
102 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
103 }
104 
105 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
106 {
107 	u32 temp;
108 
109 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
110 
111 	mutex_lock(&dev_priv->sb_lock);
112 
113 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
114 	temp |= SBI_SSCCTL_DISABLE;
115 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
116 
117 	mutex_unlock(&dev_priv->sb_lock);
118 }
119 
120 struct iclkip_params {
121 	u32 iclk_virtual_root_freq;
122 	u32 iclk_pi_range;
123 	u32 divsel, phaseinc, auxdiv, phasedir, desired_divisor;
124 };
125 
126 static void iclkip_params_init(struct iclkip_params *p)
127 {
128 	memset(p, 0, sizeof(*p));
129 
130 	p->iclk_virtual_root_freq = 172800 * 1000;
131 	p->iclk_pi_range = 64;
132 }
133 
134 static int lpt_iclkip_freq(struct iclkip_params *p)
135 {
136 	return DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
137 				 p->desired_divisor << p->auxdiv);
138 }
139 
140 static void lpt_compute_iclkip(struct iclkip_params *p, int clock)
141 {
142 	iclkip_params_init(p);
143 
144 	/* The iCLK virtual clock root frequency is in MHz,
145 	 * but the adjusted_mode->crtc_clock in KHz. To get the
146 	 * divisors, it is necessary to divide one by another, so we
147 	 * convert the virtual clock precision to KHz here for higher
148 	 * precision.
149 	 */
150 	for (p->auxdiv = 0; p->auxdiv < 2; p->auxdiv++) {
151 		p->desired_divisor = DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
152 						       clock << p->auxdiv);
153 		p->divsel = (p->desired_divisor / p->iclk_pi_range) - 2;
154 		p->phaseinc = p->desired_divisor % p->iclk_pi_range;
155 
156 		/*
157 		 * Near 20MHz is a corner case which is
158 		 * out of range for the 7-bit divisor
159 		 */
160 		if (p->divsel <= 0x7f)
161 			break;
162 	}
163 }
164 
165 int lpt_iclkip(const struct intel_crtc_state *crtc_state)
166 {
167 	struct iclkip_params p;
168 
169 	lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock);
170 
171 	return lpt_iclkip_freq(&p);
172 }
173 
174 /* Program iCLKIP clock to the desired frequency */
175 void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
176 {
177 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
178 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
179 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
180 	struct iclkip_params p;
181 	u32 temp;
182 
183 	lpt_disable_iclkip(dev_priv);
184 
185 	lpt_compute_iclkip(&p, clock);
186 	drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
187 
188 	/* This should not happen with any sane values */
189 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
190 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
191 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
192 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
193 
194 	drm_dbg_kms(&dev_priv->drm,
195 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
196 		    clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
197 
198 	mutex_lock(&dev_priv->sb_lock);
199 
200 	/* Program SSCDIVINTPHASE6 */
201 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
202 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
203 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel);
204 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
205 	temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc);
206 	temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir);
207 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
208 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
209 
210 	/* Program SSCAUXDIV */
211 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
212 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
213 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv);
214 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
215 
216 	/* Enable modulator and associated divider */
217 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
218 	temp &= ~SBI_SSCCTL_DISABLE;
219 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
220 
221 	mutex_unlock(&dev_priv->sb_lock);
222 
223 	/* Wait for initialization time */
224 	udelay(24);
225 
226 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
227 }
228 
229 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
230 {
231 	struct iclkip_params p;
232 	u32 temp;
233 
234 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
235 		return 0;
236 
237 	iclkip_params_init(&p);
238 
239 	mutex_lock(&dev_priv->sb_lock);
240 
241 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
242 	if (temp & SBI_SSCCTL_DISABLE) {
243 		mutex_unlock(&dev_priv->sb_lock);
244 		return 0;
245 	}
246 
247 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
248 	p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
249 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
250 	p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
251 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
252 
253 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
254 	p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
255 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
256 
257 	mutex_unlock(&dev_priv->sb_lock);
258 
259 	p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
260 
261 	return lpt_iclkip_freq(&p);
262 }
263 
264 /* Implements 3 different sequences from BSpec chapter "Display iCLK
265  * Programming" based on the parameters passed:
266  * - Sequence to enable CLKOUT_DP
267  * - Sequence to enable CLKOUT_DP without spread
268  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
269  */
270 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
271 				 bool with_spread, bool with_fdi)
272 {
273 	u32 reg, tmp;
274 
275 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
276 		     "FDI requires downspread\n"))
277 		with_spread = true;
278 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
279 		     with_fdi, "LP PCH doesn't have FDI\n"))
280 		with_fdi = false;
281 
282 	mutex_lock(&dev_priv->sb_lock);
283 
284 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
285 	tmp &= ~SBI_SSCCTL_DISABLE;
286 	tmp |= SBI_SSCCTL_PATHALT;
287 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
288 
289 	udelay(24);
290 
291 	if (with_spread) {
292 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
293 		tmp &= ~SBI_SSCCTL_PATHALT;
294 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
295 
296 		if (with_fdi)
297 			lpt_fdi_program_mphy(dev_priv);
298 	}
299 
300 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
301 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
302 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
303 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
304 
305 	mutex_unlock(&dev_priv->sb_lock);
306 }
307 
308 /* Sequence to disable CLKOUT_DP */
309 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
310 {
311 	u32 reg, tmp;
312 
313 	mutex_lock(&dev_priv->sb_lock);
314 
315 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
316 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
317 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
318 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
319 
320 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
321 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
322 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
323 			tmp |= SBI_SSCCTL_PATHALT;
324 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
325 			udelay(32);
326 		}
327 		tmp |= SBI_SSCCTL_DISABLE;
328 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
329 	}
330 
331 	mutex_unlock(&dev_priv->sb_lock);
332 }
333 
334 #define BEND_IDX(steps) ((50 + (steps)) / 5)
335 
336 static const u16 sscdivintphase[] = {
337 	[BEND_IDX( 50)] = 0x3B23,
338 	[BEND_IDX( 45)] = 0x3B23,
339 	[BEND_IDX( 40)] = 0x3C23,
340 	[BEND_IDX( 35)] = 0x3C23,
341 	[BEND_IDX( 30)] = 0x3D23,
342 	[BEND_IDX( 25)] = 0x3D23,
343 	[BEND_IDX( 20)] = 0x3E23,
344 	[BEND_IDX( 15)] = 0x3E23,
345 	[BEND_IDX( 10)] = 0x3F23,
346 	[BEND_IDX(  5)] = 0x3F23,
347 	[BEND_IDX(  0)] = 0x0025,
348 	[BEND_IDX( -5)] = 0x0025,
349 	[BEND_IDX(-10)] = 0x0125,
350 	[BEND_IDX(-15)] = 0x0125,
351 	[BEND_IDX(-20)] = 0x0225,
352 	[BEND_IDX(-25)] = 0x0225,
353 	[BEND_IDX(-30)] = 0x0325,
354 	[BEND_IDX(-35)] = 0x0325,
355 	[BEND_IDX(-40)] = 0x0425,
356 	[BEND_IDX(-45)] = 0x0425,
357 	[BEND_IDX(-50)] = 0x0525,
358 };
359 
360 /*
361  * Bend CLKOUT_DP
362  * steps -50 to 50 inclusive, in steps of 5
363  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
364  * change in clock period = -(steps / 10) * 5.787 ps
365  */
366 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
367 {
368 	u32 tmp;
369 	int idx = BEND_IDX(steps);
370 
371 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
372 		return;
373 
374 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
375 		return;
376 
377 	mutex_lock(&dev_priv->sb_lock);
378 
379 	if (steps % 10 != 0)
380 		tmp = 0xAAAAAAAB;
381 	else
382 		tmp = 0x00000000;
383 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
384 
385 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
386 	tmp &= 0xffff0000;
387 	tmp |= sscdivintphase[idx];
388 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
389 
390 	mutex_unlock(&dev_priv->sb_lock);
391 }
392 
393 #undef BEND_IDX
394 
395 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
396 {
397 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
398 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
399 
400 	if ((ctl & SPLL_PLL_ENABLE) == 0)
401 		return false;
402 
403 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
404 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
405 		return true;
406 
407 	if (IS_BROADWELL(dev_priv) &&
408 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
409 		return true;
410 
411 	return false;
412 }
413 
414 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
415 			       enum intel_dpll_id id)
416 {
417 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
418 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
419 
420 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
421 		return false;
422 
423 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
424 		return true;
425 
426 	if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
427 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
428 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
429 		return true;
430 
431 	return false;
432 }
433 
434 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
435 {
436 	struct intel_encoder *encoder;
437 	bool has_fdi = false;
438 
439 	for_each_intel_encoder(&dev_priv->drm, encoder) {
440 		switch (encoder->type) {
441 		case INTEL_OUTPUT_ANALOG:
442 			has_fdi = true;
443 			break;
444 		default:
445 			break;
446 		}
447 	}
448 
449 	/*
450 	 * The BIOS may have decided to use the PCH SSC
451 	 * reference so we must not disable it until the
452 	 * relevant PLLs have stopped relying on it. We'll
453 	 * just leave the PCH SSC reference enabled in case
454 	 * any active PLL is using it. It will get disabled
455 	 * after runtime suspend if we don't have FDI.
456 	 *
457 	 * TODO: Move the whole reference clock handling
458 	 * to the modeset sequence proper so that we can
459 	 * actually enable/disable/reconfigure these things
460 	 * safely. To do that we need to introduce a real
461 	 * clock hierarchy. That would also allow us to do
462 	 * clock bending finally.
463 	 */
464 	dev_priv->display.dpll.pch_ssc_use = 0;
465 
466 	if (spll_uses_pch_ssc(dev_priv)) {
467 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
468 		dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
469 	}
470 
471 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
472 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
473 		dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
474 	}
475 
476 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
477 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
478 		dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
479 	}
480 
481 	if (dev_priv->display.dpll.pch_ssc_use)
482 		return;
483 
484 	if (has_fdi) {
485 		lpt_bend_clkout_dp(dev_priv, 0);
486 		lpt_enable_clkout_dp(dev_priv, true, true);
487 	} else {
488 		lpt_disable_clkout_dp(dev_priv);
489 	}
490 }
491 
492 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
493 {
494 	struct intel_display *display = &dev_priv->display;
495 	struct intel_encoder *encoder;
496 	struct intel_shared_dpll *pll;
497 	int i;
498 	u32 val, final;
499 	bool has_lvds = false;
500 	bool has_cpu_edp = false;
501 	bool has_panel = false;
502 	bool has_ck505 = false;
503 	bool can_ssc = false;
504 	bool using_ssc_source = false;
505 
506 	/* We need to take the global config into account */
507 	for_each_intel_encoder(&dev_priv->drm, encoder) {
508 		switch (encoder->type) {
509 		case INTEL_OUTPUT_LVDS:
510 			has_panel = true;
511 			has_lvds = true;
512 			break;
513 		case INTEL_OUTPUT_EDP:
514 			has_panel = true;
515 			if (encoder->port == PORT_A)
516 				has_cpu_edp = true;
517 			break;
518 		default:
519 			break;
520 		}
521 	}
522 
523 	if (HAS_PCH_IBX(dev_priv)) {
524 		has_ck505 = dev_priv->display.vbt.display_clock_mode;
525 		can_ssc = has_ck505;
526 	} else {
527 		has_ck505 = false;
528 		can_ssc = true;
529 	}
530 
531 	/* Check if any DPLLs are using the SSC source */
532 	for_each_shared_dpll(dev_priv, pll, i) {
533 		u32 temp;
534 
535 		temp = intel_de_read(dev_priv, PCH_DPLL(pll->info->id));
536 
537 		if (!(temp & DPLL_VCO_ENABLE))
538 			continue;
539 
540 		if ((temp & PLL_REF_INPUT_MASK) ==
541 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
542 			using_ssc_source = true;
543 			break;
544 		}
545 	}
546 
547 	drm_dbg_kms(&dev_priv->drm,
548 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
549 		    has_panel, has_lvds, has_ck505, using_ssc_source);
550 
551 	/* Ironlake: try to setup display ref clock before DPLL
552 	 * enabling. This is only under driver's control after
553 	 * PCH B stepping, previous chipset stepping should be
554 	 * ignoring this setting.
555 	 */
556 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
557 
558 	/* As we must carefully and slowly disable/enable each source in turn,
559 	 * compute the final state we want first and check if we need to
560 	 * make any changes at all.
561 	 */
562 	final = val;
563 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
564 	if (has_ck505)
565 		final |= DREF_NONSPREAD_CK505_ENABLE;
566 	else
567 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
568 
569 	final &= ~DREF_SSC_SOURCE_MASK;
570 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
571 	final &= ~DREF_SSC1_ENABLE;
572 
573 	if (has_panel) {
574 		final |= DREF_SSC_SOURCE_ENABLE;
575 
576 		if (intel_panel_use_ssc(display) && can_ssc)
577 			final |= DREF_SSC1_ENABLE;
578 
579 		if (has_cpu_edp) {
580 			if (intel_panel_use_ssc(display) && can_ssc)
581 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
582 			else
583 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
584 		} else {
585 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
586 		}
587 	} else if (using_ssc_source) {
588 		final |= DREF_SSC_SOURCE_ENABLE;
589 		final |= DREF_SSC1_ENABLE;
590 	}
591 
592 	if (final == val)
593 		return;
594 
595 	/* Always enable nonspread source */
596 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
597 
598 	if (has_ck505)
599 		val |= DREF_NONSPREAD_CK505_ENABLE;
600 	else
601 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
602 
603 	if (has_panel) {
604 		val &= ~DREF_SSC_SOURCE_MASK;
605 		val |= DREF_SSC_SOURCE_ENABLE;
606 
607 		/* SSC must be turned on before enabling the CPU output  */
608 		if (intel_panel_use_ssc(display) && can_ssc) {
609 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
610 			val |= DREF_SSC1_ENABLE;
611 		} else {
612 			val &= ~DREF_SSC1_ENABLE;
613 		}
614 
615 		/* Get SSC going before enabling the outputs */
616 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
617 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
618 		udelay(200);
619 
620 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
621 
622 		/* Enable CPU source on CPU attached eDP */
623 		if (has_cpu_edp) {
624 			if (intel_panel_use_ssc(display) && can_ssc) {
625 				drm_dbg_kms(&dev_priv->drm,
626 					    "Using SSC on eDP\n");
627 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
628 			} else {
629 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
630 			}
631 		} else {
632 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
633 		}
634 
635 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
636 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
637 		udelay(200);
638 	} else {
639 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
640 
641 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
642 
643 		/* Turn off CPU output */
644 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
645 
646 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
647 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
648 		udelay(200);
649 
650 		if (!using_ssc_source) {
651 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
652 
653 			/* Turn off the SSC source */
654 			val &= ~DREF_SSC_SOURCE_MASK;
655 			val |= DREF_SSC_SOURCE_DISABLE;
656 
657 			/* Turn off SSC1 */
658 			val &= ~DREF_SSC1_ENABLE;
659 
660 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
661 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
662 			udelay(200);
663 		}
664 	}
665 
666 	drm_WARN_ON(&dev_priv->drm, val != final);
667 }
668 
669 /*
670  * Initialize reference clocks when the driver loads
671  */
672 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
673 {
674 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
675 		ilk_init_pch_refclk(dev_priv);
676 	else if (HAS_PCH_LPT(dev_priv))
677 		lpt_init_pch_refclk(dev_priv);
678 }
679