1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_de.h"
9 #include "intel_display_types.h"
10 #include "intel_panel.h"
11 #include "intel_pch_refclk.h"
12 #include "intel_sbi.h"
13
lpt_fdi_reset_mphy(struct drm_i915_private * dev_priv)14 static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
15 {
16 intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
17
18 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
19 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
20 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
21
22 intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
23
24 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
25 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
26 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
27 }
28
29 /* WaMPhyProgramming:hsw */
lpt_fdi_program_mphy(struct drm_i915_private * dev_priv)30 static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
31 {
32 u32 tmp;
33
34 lpt_fdi_reset_mphy(dev_priv);
35
36 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
37 tmp &= ~(0xFF << 24);
38 tmp |= (0x12 << 24);
39 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
40
41 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
42 tmp |= (1 << 11);
43 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
44
45 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
46 tmp |= (1 << 11);
47 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
48
49 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
50 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
51 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
52
53 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
54 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
55 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
56
57 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
58 tmp &= ~(7 << 13);
59 tmp |= (5 << 13);
60 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
61
62 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
63 tmp &= ~(7 << 13);
64 tmp |= (5 << 13);
65 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
66
67 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
68 tmp &= ~0xFF;
69 tmp |= 0x1C;
70 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
71
72 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
73 tmp &= ~0xFF;
74 tmp |= 0x1C;
75 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
76
77 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
78 tmp &= ~(0xFF << 16);
79 tmp |= (0x1C << 16);
80 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
81
82 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
83 tmp &= ~(0xFF << 16);
84 tmp |= (0x1C << 16);
85 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
86
87 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
88 tmp |= (1 << 27);
89 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
90
91 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
92 tmp |= (1 << 27);
93 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
94
95 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
96 tmp &= ~(0xF << 28);
97 tmp |= (4 << 28);
98 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
99
100 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
101 tmp &= ~(0xF << 28);
102 tmp |= (4 << 28);
103 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
104 }
105
lpt_disable_iclkip(struct drm_i915_private * dev_priv)106 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
107 {
108 u32 temp;
109
110 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
111
112 intel_sbi_lock(dev_priv);
113
114 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
115 temp |= SBI_SSCCTL_DISABLE;
116 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
117
118 intel_sbi_unlock(dev_priv);
119 }
120
121 struct iclkip_params {
122 u32 iclk_virtual_root_freq;
123 u32 iclk_pi_range;
124 u32 divsel, phaseinc, auxdiv, phasedir, desired_divisor;
125 };
126
iclkip_params_init(struct iclkip_params * p)127 static void iclkip_params_init(struct iclkip_params *p)
128 {
129 memset(p, 0, sizeof(*p));
130
131 p->iclk_virtual_root_freq = 172800 * 1000;
132 p->iclk_pi_range = 64;
133 }
134
lpt_iclkip_freq(struct iclkip_params * p)135 static int lpt_iclkip_freq(struct iclkip_params *p)
136 {
137 return DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
138 p->desired_divisor << p->auxdiv);
139 }
140
lpt_compute_iclkip(struct iclkip_params * p,int clock)141 static void lpt_compute_iclkip(struct iclkip_params *p, int clock)
142 {
143 iclkip_params_init(p);
144
145 /* The iCLK virtual clock root frequency is in MHz,
146 * but the adjusted_mode->crtc_clock in KHz. To get the
147 * divisors, it is necessary to divide one by another, so we
148 * convert the virtual clock precision to KHz here for higher
149 * precision.
150 */
151 for (p->auxdiv = 0; p->auxdiv < 2; p->auxdiv++) {
152 p->desired_divisor = DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
153 clock << p->auxdiv);
154 p->divsel = (p->desired_divisor / p->iclk_pi_range) - 2;
155 p->phaseinc = p->desired_divisor % p->iclk_pi_range;
156
157 /*
158 * Near 20MHz is a corner case which is
159 * out of range for the 7-bit divisor
160 */
161 if (p->divsel <= 0x7f)
162 break;
163 }
164 }
165
lpt_iclkip(const struct intel_crtc_state * crtc_state)166 int lpt_iclkip(const struct intel_crtc_state *crtc_state)
167 {
168 struct iclkip_params p;
169
170 lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock);
171
172 return lpt_iclkip_freq(&p);
173 }
174
175 /* Program iCLKIP clock to the desired frequency */
lpt_program_iclkip(const struct intel_crtc_state * crtc_state)176 void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
177 {
178 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
179 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
180 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
181 struct iclkip_params p;
182 u32 temp;
183
184 lpt_disable_iclkip(dev_priv);
185
186 lpt_compute_iclkip(&p, clock);
187 drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
188
189 /* This should not happen with any sane values */
190 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
191 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
192 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
193 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
194
195 drm_dbg_kms(&dev_priv->drm,
196 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
197 clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
198
199 intel_sbi_lock(dev_priv);
200
201 /* Program SSCDIVINTPHASE6 */
202 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
203 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
204 temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel);
205 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
206 temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc);
207 temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir);
208 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
209 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
210
211 /* Program SSCAUXDIV */
212 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
213 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
214 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv);
215 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
216
217 /* Enable modulator and associated divider */
218 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
219 temp &= ~SBI_SSCCTL_DISABLE;
220 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
221
222 intel_sbi_unlock(dev_priv);
223
224 /* Wait for initialization time */
225 udelay(24);
226
227 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
228 }
229
lpt_get_iclkip(struct drm_i915_private * dev_priv)230 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
231 {
232 struct iclkip_params p;
233 u32 temp;
234
235 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
236 return 0;
237
238 iclkip_params_init(&p);
239
240 intel_sbi_lock(dev_priv);
241
242 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
243 if (temp & SBI_SSCCTL_DISABLE) {
244 intel_sbi_unlock(dev_priv);
245 return 0;
246 }
247
248 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
249 p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
250 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
251 p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
252 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
253
254 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
255 p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
256 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
257
258 intel_sbi_unlock(dev_priv);
259
260 p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
261
262 return lpt_iclkip_freq(&p);
263 }
264
265 /* Implements 3 different sequences from BSpec chapter "Display iCLK
266 * Programming" based on the parameters passed:
267 * - Sequence to enable CLKOUT_DP
268 * - Sequence to enable CLKOUT_DP without spread
269 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
270 */
lpt_enable_clkout_dp(struct drm_i915_private * dev_priv,bool with_spread,bool with_fdi)271 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
272 bool with_spread, bool with_fdi)
273 {
274 u32 reg, tmp;
275
276 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
277 "FDI requires downspread\n"))
278 with_spread = true;
279 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
280 with_fdi, "LP PCH doesn't have FDI\n"))
281 with_fdi = false;
282
283 intel_sbi_lock(dev_priv);
284
285 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
286 tmp &= ~SBI_SSCCTL_DISABLE;
287 tmp |= SBI_SSCCTL_PATHALT;
288 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
289
290 udelay(24);
291
292 if (with_spread) {
293 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
294 tmp &= ~SBI_SSCCTL_PATHALT;
295 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
296
297 if (with_fdi)
298 lpt_fdi_program_mphy(dev_priv);
299 }
300
301 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
302 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
303 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
304 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
305
306 intel_sbi_unlock(dev_priv);
307 }
308
309 /* Sequence to disable CLKOUT_DP */
lpt_disable_clkout_dp(struct drm_i915_private * dev_priv)310 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
311 {
312 u32 reg, tmp;
313
314 intel_sbi_lock(dev_priv);
315
316 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
317 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
318 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
319 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
320
321 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
322 if (!(tmp & SBI_SSCCTL_DISABLE)) {
323 if (!(tmp & SBI_SSCCTL_PATHALT)) {
324 tmp |= SBI_SSCCTL_PATHALT;
325 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
326 udelay(32);
327 }
328 tmp |= SBI_SSCCTL_DISABLE;
329 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
330 }
331
332 intel_sbi_unlock(dev_priv);
333 }
334
335 #define BEND_IDX(steps) ((50 + (steps)) / 5)
336
337 static const u16 sscdivintphase[] = {
338 [BEND_IDX( 50)] = 0x3B23,
339 [BEND_IDX( 45)] = 0x3B23,
340 [BEND_IDX( 40)] = 0x3C23,
341 [BEND_IDX( 35)] = 0x3C23,
342 [BEND_IDX( 30)] = 0x3D23,
343 [BEND_IDX( 25)] = 0x3D23,
344 [BEND_IDX( 20)] = 0x3E23,
345 [BEND_IDX( 15)] = 0x3E23,
346 [BEND_IDX( 10)] = 0x3F23,
347 [BEND_IDX( 5)] = 0x3F23,
348 [BEND_IDX( 0)] = 0x0025,
349 [BEND_IDX( -5)] = 0x0025,
350 [BEND_IDX(-10)] = 0x0125,
351 [BEND_IDX(-15)] = 0x0125,
352 [BEND_IDX(-20)] = 0x0225,
353 [BEND_IDX(-25)] = 0x0225,
354 [BEND_IDX(-30)] = 0x0325,
355 [BEND_IDX(-35)] = 0x0325,
356 [BEND_IDX(-40)] = 0x0425,
357 [BEND_IDX(-45)] = 0x0425,
358 [BEND_IDX(-50)] = 0x0525,
359 };
360
361 /*
362 * Bend CLKOUT_DP
363 * steps -50 to 50 inclusive, in steps of 5
364 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
365 * change in clock period = -(steps / 10) * 5.787 ps
366 */
lpt_bend_clkout_dp(struct drm_i915_private * dev_priv,int steps)367 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
368 {
369 u32 tmp;
370 int idx = BEND_IDX(steps);
371
372 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
373 return;
374
375 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
376 return;
377
378 intel_sbi_lock(dev_priv);
379
380 if (steps % 10 != 0)
381 tmp = 0xAAAAAAAB;
382 else
383 tmp = 0x00000000;
384 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
385
386 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
387 tmp &= 0xffff0000;
388 tmp |= sscdivintphase[idx];
389 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
390
391 intel_sbi_unlock(dev_priv);
392 }
393
394 #undef BEND_IDX
395
spll_uses_pch_ssc(struct drm_i915_private * dev_priv)396 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
397 {
398 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
399 u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
400
401 if ((ctl & SPLL_PLL_ENABLE) == 0)
402 return false;
403
404 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
405 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
406 return true;
407
408 if (IS_BROADWELL(dev_priv) &&
409 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
410 return true;
411
412 return false;
413 }
414
wrpll_uses_pch_ssc(struct drm_i915_private * dev_priv,enum intel_dpll_id id)415 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
416 enum intel_dpll_id id)
417 {
418 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
419 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
420
421 if ((ctl & WRPLL_PLL_ENABLE) == 0)
422 return false;
423
424 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
425 return true;
426
427 if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
428 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
429 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
430 return true;
431
432 return false;
433 }
434
lpt_init_pch_refclk(struct drm_i915_private * dev_priv)435 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
436 {
437 struct intel_encoder *encoder;
438 bool has_fdi = false;
439
440 for_each_intel_encoder(&dev_priv->drm, encoder) {
441 switch (encoder->type) {
442 case INTEL_OUTPUT_ANALOG:
443 has_fdi = true;
444 break;
445 default:
446 break;
447 }
448 }
449
450 /*
451 * The BIOS may have decided to use the PCH SSC
452 * reference so we must not disable it until the
453 * relevant PLLs have stopped relying on it. We'll
454 * just leave the PCH SSC reference enabled in case
455 * any active PLL is using it. It will get disabled
456 * after runtime suspend if we don't have FDI.
457 *
458 * TODO: Move the whole reference clock handling
459 * to the modeset sequence proper so that we can
460 * actually enable/disable/reconfigure these things
461 * safely. To do that we need to introduce a real
462 * clock hierarchy. That would also allow us to do
463 * clock bending finally.
464 */
465 dev_priv->display.dpll.pch_ssc_use = 0;
466
467 if (spll_uses_pch_ssc(dev_priv)) {
468 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
469 dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
470 }
471
472 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
473 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
474 dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
475 }
476
477 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
478 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
479 dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
480 }
481
482 if (dev_priv->display.dpll.pch_ssc_use)
483 return;
484
485 if (has_fdi) {
486 lpt_bend_clkout_dp(dev_priv, 0);
487 lpt_enable_clkout_dp(dev_priv, true, true);
488 } else {
489 lpt_disable_clkout_dp(dev_priv);
490 }
491 }
492
ilk_init_pch_refclk(struct drm_i915_private * dev_priv)493 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
494 {
495 struct intel_display *display = &dev_priv->display;
496 struct intel_encoder *encoder;
497 struct intel_shared_dpll *pll;
498 int i;
499 u32 val, final;
500 bool has_lvds = false;
501 bool has_cpu_edp = false;
502 bool has_panel = false;
503 bool has_ck505 = false;
504 bool can_ssc = false;
505 bool using_ssc_source = false;
506
507 /* We need to take the global config into account */
508 for_each_intel_encoder(&dev_priv->drm, encoder) {
509 switch (encoder->type) {
510 case INTEL_OUTPUT_LVDS:
511 has_panel = true;
512 has_lvds = true;
513 break;
514 case INTEL_OUTPUT_EDP:
515 has_panel = true;
516 if (encoder->port == PORT_A)
517 has_cpu_edp = true;
518 break;
519 default:
520 break;
521 }
522 }
523
524 if (HAS_PCH_IBX(dev_priv)) {
525 has_ck505 = dev_priv->display.vbt.display_clock_mode;
526 can_ssc = has_ck505;
527 } else {
528 has_ck505 = false;
529 can_ssc = true;
530 }
531
532 /* Check if any DPLLs are using the SSC source */
533 for_each_shared_dpll(dev_priv, pll, i) {
534 u32 temp;
535
536 temp = intel_de_read(dev_priv, PCH_DPLL(pll->info->id));
537
538 if (!(temp & DPLL_VCO_ENABLE))
539 continue;
540
541 if ((temp & PLL_REF_INPUT_MASK) ==
542 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
543 using_ssc_source = true;
544 break;
545 }
546 }
547
548 drm_dbg_kms(&dev_priv->drm,
549 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
550 has_panel, has_lvds, has_ck505, using_ssc_source);
551
552 /* Ironlake: try to setup display ref clock before DPLL
553 * enabling. This is only under driver's control after
554 * PCH B stepping, previous chipset stepping should be
555 * ignoring this setting.
556 */
557 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
558
559 /* As we must carefully and slowly disable/enable each source in turn,
560 * compute the final state we want first and check if we need to
561 * make any changes at all.
562 */
563 final = val;
564 final &= ~DREF_NONSPREAD_SOURCE_MASK;
565 if (has_ck505)
566 final |= DREF_NONSPREAD_CK505_ENABLE;
567 else
568 final |= DREF_NONSPREAD_SOURCE_ENABLE;
569
570 final &= ~DREF_SSC_SOURCE_MASK;
571 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
572 final &= ~DREF_SSC1_ENABLE;
573
574 if (has_panel) {
575 final |= DREF_SSC_SOURCE_ENABLE;
576
577 if (intel_panel_use_ssc(display) && can_ssc)
578 final |= DREF_SSC1_ENABLE;
579
580 if (has_cpu_edp) {
581 if (intel_panel_use_ssc(display) && can_ssc)
582 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
583 else
584 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
585 } else {
586 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
587 }
588 } else if (using_ssc_source) {
589 final |= DREF_SSC_SOURCE_ENABLE;
590 final |= DREF_SSC1_ENABLE;
591 }
592
593 if (final == val)
594 return;
595
596 /* Always enable nonspread source */
597 val &= ~DREF_NONSPREAD_SOURCE_MASK;
598
599 if (has_ck505)
600 val |= DREF_NONSPREAD_CK505_ENABLE;
601 else
602 val |= DREF_NONSPREAD_SOURCE_ENABLE;
603
604 if (has_panel) {
605 val &= ~DREF_SSC_SOURCE_MASK;
606 val |= DREF_SSC_SOURCE_ENABLE;
607
608 /* SSC must be turned on before enabling the CPU output */
609 if (intel_panel_use_ssc(display) && can_ssc) {
610 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
611 val |= DREF_SSC1_ENABLE;
612 } else {
613 val &= ~DREF_SSC1_ENABLE;
614 }
615
616 /* Get SSC going before enabling the outputs */
617 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
618 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
619 udelay(200);
620
621 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
622
623 /* Enable CPU source on CPU attached eDP */
624 if (has_cpu_edp) {
625 if (intel_panel_use_ssc(display) && can_ssc) {
626 drm_dbg_kms(&dev_priv->drm,
627 "Using SSC on eDP\n");
628 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
629 } else {
630 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
631 }
632 } else {
633 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
634 }
635
636 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
637 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
638 udelay(200);
639 } else {
640 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
641
642 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
643
644 /* Turn off CPU output */
645 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
646
647 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
648 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
649 udelay(200);
650
651 if (!using_ssc_source) {
652 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
653
654 /* Turn off the SSC source */
655 val &= ~DREF_SSC_SOURCE_MASK;
656 val |= DREF_SSC_SOURCE_DISABLE;
657
658 /* Turn off SSC1 */
659 val &= ~DREF_SSC1_ENABLE;
660
661 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
662 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
663 udelay(200);
664 }
665 }
666
667 drm_WARN_ON(&dev_priv->drm, val != final);
668 }
669
670 /*
671 * Initialize reference clocks when the driver loads
672 */
intel_init_pch_refclk(struct drm_i915_private * dev_priv)673 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
674 {
675 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
676 ilk_init_pch_refclk(dev_priv);
677 else if (HAS_PCH_LPT(dev_priv))
678 lpt_init_pch_refclk(dev_priv);
679 }
680