1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/string_helpers.h>
8
9 #include "i915_drv.h"
10 #include "i915_reg.h"
11 #include "intel_atomic.h"
12 #include "intel_crtc.h"
13 #include "intel_cx0_phy.h"
14 #include "intel_de.h"
15 #include "intel_display.h"
16 #include "intel_display_types.h"
17 #include "intel_dpio_phy.h"
18 #include "intel_dpll.h"
19 #include "intel_lvds.h"
20 #include "intel_lvds_regs.h"
21 #include "intel_panel.h"
22 #include "intel_pps.h"
23 #include "intel_snps_phy.h"
24 #include "vlv_dpio_phy_regs.h"
25 #include "vlv_sideband.h"
26
27 struct intel_dpll_funcs {
28 int (*crtc_compute_clock)(struct intel_atomic_state *state,
29 struct intel_crtc *crtc);
30 int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
31 struct intel_crtc *crtc);
32 };
33
34 struct intel_limit {
35 struct {
36 int min, max;
37 } dot, vco, n, m, m1, m2, p, p1;
38
39 struct {
40 int dot_limit;
41 int p2_slow, p2_fast;
42 } p2;
43 };
44 static const struct intel_limit intel_limits_i8xx_dac = {
45 .dot = { .min = 25000, .max = 350000 },
46 .vco = { .min = 908000, .max = 1512000 },
47 .n = { .min = 2, .max = 16 },
48 .m = { .min = 96, .max = 140 },
49 .m1 = { .min = 18, .max = 26 },
50 .m2 = { .min = 6, .max = 16 },
51 .p = { .min = 4, .max = 128 },
52 .p1 = { .min = 2, .max = 33 },
53 .p2 = { .dot_limit = 165000,
54 .p2_slow = 4, .p2_fast = 2 },
55 };
56
57 static const struct intel_limit intel_limits_i8xx_dvo = {
58 .dot = { .min = 25000, .max = 350000 },
59 .vco = { .min = 908000, .max = 1512000 },
60 .n = { .min = 2, .max = 16 },
61 .m = { .min = 96, .max = 140 },
62 .m1 = { .min = 18, .max = 26 },
63 .m2 = { .min = 6, .max = 16 },
64 .p = { .min = 4, .max = 128 },
65 .p1 = { .min = 2, .max = 33 },
66 .p2 = { .dot_limit = 165000,
67 .p2_slow = 4, .p2_fast = 4 },
68 };
69
70 static const struct intel_limit intel_limits_i8xx_lvds = {
71 .dot = { .min = 25000, .max = 350000 },
72 .vco = { .min = 908000, .max = 1512000 },
73 .n = { .min = 2, .max = 16 },
74 .m = { .min = 96, .max = 140 },
75 .m1 = { .min = 18, .max = 26 },
76 .m2 = { .min = 6, .max = 16 },
77 .p = { .min = 4, .max = 128 },
78 .p1 = { .min = 1, .max = 6 },
79 .p2 = { .dot_limit = 165000,
80 .p2_slow = 14, .p2_fast = 7 },
81 };
82
83 static const struct intel_limit intel_limits_i9xx_sdvo = {
84 .dot = { .min = 20000, .max = 400000 },
85 .vco = { .min = 1400000, .max = 2800000 },
86 .n = { .min = 1, .max = 6 },
87 .m = { .min = 70, .max = 120 },
88 .m1 = { .min = 8, .max = 18 },
89 .m2 = { .min = 3, .max = 7 },
90 .p = { .min = 5, .max = 80 },
91 .p1 = { .min = 1, .max = 8 },
92 .p2 = { .dot_limit = 200000,
93 .p2_slow = 10, .p2_fast = 5 },
94 };
95
96 static const struct intel_limit intel_limits_i9xx_lvds = {
97 .dot = { .min = 20000, .max = 400000 },
98 .vco = { .min = 1400000, .max = 2800000 },
99 .n = { .min = 1, .max = 6 },
100 .m = { .min = 70, .max = 120 },
101 .m1 = { .min = 8, .max = 18 },
102 .m2 = { .min = 3, .max = 7 },
103 .p = { .min = 7, .max = 98 },
104 .p1 = { .min = 1, .max = 8 },
105 .p2 = { .dot_limit = 112000,
106 .p2_slow = 14, .p2_fast = 7 },
107 };
108
109
110 static const struct intel_limit intel_limits_g4x_sdvo = {
111 .dot = { .min = 25000, .max = 270000 },
112 .vco = { .min = 1750000, .max = 3500000},
113 .n = { .min = 1, .max = 4 },
114 .m = { .min = 104, .max = 138 },
115 .m1 = { .min = 17, .max = 23 },
116 .m2 = { .min = 5, .max = 11 },
117 .p = { .min = 10, .max = 30 },
118 .p1 = { .min = 1, .max = 3},
119 .p2 = { .dot_limit = 270000,
120 .p2_slow = 10,
121 .p2_fast = 10
122 },
123 };
124
125 static const struct intel_limit intel_limits_g4x_hdmi = {
126 .dot = { .min = 22000, .max = 400000 },
127 .vco = { .min = 1750000, .max = 3500000},
128 .n = { .min = 1, .max = 4 },
129 .m = { .min = 104, .max = 138 },
130 .m1 = { .min = 16, .max = 23 },
131 .m2 = { .min = 5, .max = 11 },
132 .p = { .min = 5, .max = 80 },
133 .p1 = { .min = 1, .max = 8},
134 .p2 = { .dot_limit = 165000,
135 .p2_slow = 10, .p2_fast = 5 },
136 };
137
138 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
139 .dot = { .min = 20000, .max = 115000 },
140 .vco = { .min = 1750000, .max = 3500000 },
141 .n = { .min = 1, .max = 3 },
142 .m = { .min = 104, .max = 138 },
143 .m1 = { .min = 17, .max = 23 },
144 .m2 = { .min = 5, .max = 11 },
145 .p = { .min = 28, .max = 112 },
146 .p1 = { .min = 2, .max = 8 },
147 .p2 = { .dot_limit = 0,
148 .p2_slow = 14, .p2_fast = 14
149 },
150 };
151
152 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
153 .dot = { .min = 80000, .max = 224000 },
154 .vco = { .min = 1750000, .max = 3500000 },
155 .n = { .min = 1, .max = 3 },
156 .m = { .min = 104, .max = 138 },
157 .m1 = { .min = 17, .max = 23 },
158 .m2 = { .min = 5, .max = 11 },
159 .p = { .min = 14, .max = 42 },
160 .p1 = { .min = 2, .max = 6 },
161 .p2 = { .dot_limit = 0,
162 .p2_slow = 7, .p2_fast = 7
163 },
164 };
165
166 static const struct intel_limit pnv_limits_sdvo = {
167 .dot = { .min = 20000, .max = 400000},
168 .vco = { .min = 1700000, .max = 3500000 },
169 /* Pineview's Ncounter is a ring counter */
170 .n = { .min = 3, .max = 6 },
171 .m = { .min = 2, .max = 256 },
172 /* Pineview only has one combined m divider, which we treat as m2. */
173 .m1 = { .min = 0, .max = 0 },
174 .m2 = { .min = 0, .max = 254 },
175 .p = { .min = 5, .max = 80 },
176 .p1 = { .min = 1, .max = 8 },
177 .p2 = { .dot_limit = 200000,
178 .p2_slow = 10, .p2_fast = 5 },
179 };
180
181 static const struct intel_limit pnv_limits_lvds = {
182 .dot = { .min = 20000, .max = 400000 },
183 .vco = { .min = 1700000, .max = 3500000 },
184 .n = { .min = 3, .max = 6 },
185 .m = { .min = 2, .max = 256 },
186 .m1 = { .min = 0, .max = 0 },
187 .m2 = { .min = 0, .max = 254 },
188 .p = { .min = 7, .max = 112 },
189 .p1 = { .min = 1, .max = 8 },
190 .p2 = { .dot_limit = 112000,
191 .p2_slow = 14, .p2_fast = 14 },
192 };
193
194 /* Ironlake / Sandybridge
195 *
196 * We calculate clock using (register_value + 2) for N/M1/M2, so here
197 * the range value for them is (actual_value - 2).
198 */
199 static const struct intel_limit ilk_limits_dac = {
200 .dot = { .min = 25000, .max = 350000 },
201 .vco = { .min = 1760000, .max = 3510000 },
202 .n = { .min = 1, .max = 5 },
203 .m = { .min = 79, .max = 127 },
204 .m1 = { .min = 12, .max = 22 },
205 .m2 = { .min = 5, .max = 9 },
206 .p = { .min = 5, .max = 80 },
207 .p1 = { .min = 1, .max = 8 },
208 .p2 = { .dot_limit = 225000,
209 .p2_slow = 10, .p2_fast = 5 },
210 };
211
212 static const struct intel_limit ilk_limits_single_lvds = {
213 .dot = { .min = 25000, .max = 350000 },
214 .vco = { .min = 1760000, .max = 3510000 },
215 .n = { .min = 1, .max = 3 },
216 .m = { .min = 79, .max = 118 },
217 .m1 = { .min = 12, .max = 22 },
218 .m2 = { .min = 5, .max = 9 },
219 .p = { .min = 28, .max = 112 },
220 .p1 = { .min = 2, .max = 8 },
221 .p2 = { .dot_limit = 225000,
222 .p2_slow = 14, .p2_fast = 14 },
223 };
224
225 static const struct intel_limit ilk_limits_dual_lvds = {
226 .dot = { .min = 25000, .max = 350000 },
227 .vco = { .min = 1760000, .max = 3510000 },
228 .n = { .min = 1, .max = 3 },
229 .m = { .min = 79, .max = 127 },
230 .m1 = { .min = 12, .max = 22 },
231 .m2 = { .min = 5, .max = 9 },
232 .p = { .min = 14, .max = 56 },
233 .p1 = { .min = 2, .max = 8 },
234 .p2 = { .dot_limit = 225000,
235 .p2_slow = 7, .p2_fast = 7 },
236 };
237
238 /* LVDS 100mhz refclk limits. */
239 static const struct intel_limit ilk_limits_single_lvds_100m = {
240 .dot = { .min = 25000, .max = 350000 },
241 .vco = { .min = 1760000, .max = 3510000 },
242 .n = { .min = 1, .max = 2 },
243 .m = { .min = 79, .max = 126 },
244 .m1 = { .min = 12, .max = 22 },
245 .m2 = { .min = 5, .max = 9 },
246 .p = { .min = 28, .max = 112 },
247 .p1 = { .min = 2, .max = 8 },
248 .p2 = { .dot_limit = 225000,
249 .p2_slow = 14, .p2_fast = 14 },
250 };
251
252 static const struct intel_limit ilk_limits_dual_lvds_100m = {
253 .dot = { .min = 25000, .max = 350000 },
254 .vco = { .min = 1760000, .max = 3510000 },
255 .n = { .min = 1, .max = 3 },
256 .m = { .min = 79, .max = 126 },
257 .m1 = { .min = 12, .max = 22 },
258 .m2 = { .min = 5, .max = 9 },
259 .p = { .min = 14, .max = 42 },
260 .p1 = { .min = 2, .max = 6 },
261 .p2 = { .dot_limit = 225000,
262 .p2_slow = 7, .p2_fast = 7 },
263 };
264
265 static const struct intel_limit intel_limits_vlv = {
266 /*
267 * These are based on the data rate limits (measured in fast clocks)
268 * since those are the strictest limits we have. The fast
269 * clock and actual rate limits are more relaxed, so checking
270 * them would make no difference.
271 */
272 .dot = { .min = 25000, .max = 270000 },
273 .vco = { .min = 4000000, .max = 6000000 },
274 .n = { .min = 1, .max = 7 },
275 .m1 = { .min = 2, .max = 3 },
276 .m2 = { .min = 11, .max = 156 },
277 .p1 = { .min = 2, .max = 3 },
278 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
279 };
280
281 static const struct intel_limit intel_limits_chv = {
282 /*
283 * These are based on the data rate limits (measured in fast clocks)
284 * since those are the strictest limits we have. The fast
285 * clock and actual rate limits are more relaxed, so checking
286 * them would make no difference.
287 */
288 .dot = { .min = 25000, .max = 540000 },
289 .vco = { .min = 4800000, .max = 6480000 },
290 .n = { .min = 1, .max = 1 },
291 .m1 = { .min = 2, .max = 2 },
292 .m2 = { .min = 24 << 22, .max = 175 << 22 },
293 .p1 = { .min = 2, .max = 4 },
294 .p2 = { .p2_slow = 1, .p2_fast = 14 },
295 };
296
297 static const struct intel_limit intel_limits_bxt = {
298 .dot = { .min = 25000, .max = 594000 },
299 .vco = { .min = 4800000, .max = 6700000 },
300 .n = { .min = 1, .max = 1 },
301 .m1 = { .min = 2, .max = 2 },
302 /* FIXME: find real m2 limits */
303 .m2 = { .min = 2 << 22, .max = 255 << 22 },
304 .p1 = { .min = 2, .max = 4 },
305 .p2 = { .p2_slow = 1, .p2_fast = 20 },
306 };
307
308 /*
309 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
310 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
311 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
312 * The helpers' return value is the rate of the clock that is fed to the
313 * display engine's pipe which can be the above fast dot clock rate or a
314 * divided-down version of it.
315 */
316 /* m1 is reserved as 0 in Pineview, n is a ring counter */
pnv_calc_dpll_params(int refclk,struct dpll * clock)317 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
318 {
319 clock->m = clock->m2 + 2;
320 clock->p = clock->p1 * clock->p2;
321
322 clock->vco = clock->n == 0 ? 0 :
323 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
324 clock->dot = clock->p == 0 ? 0 :
325 DIV_ROUND_CLOSEST(clock->vco, clock->p);
326
327 return clock->dot;
328 }
329
i9xx_dpll_compute_m(const struct dpll * dpll)330 static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
331 {
332 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
333 }
334
i9xx_calc_dpll_params(int refclk,struct dpll * clock)335 int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
336 {
337 clock->m = i9xx_dpll_compute_m(clock);
338 clock->p = clock->p1 * clock->p2;
339
340 clock->vco = clock->n + 2 == 0 ? 0 :
341 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
342 clock->dot = clock->p == 0 ? 0 :
343 DIV_ROUND_CLOSEST(clock->vco, clock->p);
344
345 return clock->dot;
346 }
347
vlv_calc_dpll_params(int refclk,struct dpll * clock)348 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
349 {
350 clock->m = clock->m1 * clock->m2;
351 clock->p = clock->p1 * clock->p2 * 5;
352
353 clock->vco = clock->n == 0 ? 0 :
354 DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
355 clock->dot = clock->p == 0 ? 0 :
356 DIV_ROUND_CLOSEST(clock->vco, clock->p);
357
358 return clock->dot;
359 }
360
chv_calc_dpll_params(int refclk,struct dpll * clock)361 int chv_calc_dpll_params(int refclk, struct dpll *clock)
362 {
363 clock->m = clock->m1 * clock->m2;
364 clock->p = clock->p1 * clock->p2 * 5;
365
366 clock->vco = clock->n == 0 ? 0 :
367 DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22);
368 clock->dot = clock->p == 0 ? 0 :
369 DIV_ROUND_CLOSEST(clock->vco, clock->p);
370
371 return clock->dot;
372 }
373
i9xx_pll_refclk(const struct intel_crtc_state * crtc_state)374 static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
375 {
376 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
377 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
378
379 if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
380 return i915->display.vbt.lvds_ssc_freq;
381 else if (HAS_PCH_SPLIT(i915))
382 return 120000;
383 else if (DISPLAY_VER(i915) != 2)
384 return 96000;
385 else
386 return 48000;
387 }
388
i9xx_dpll_get_hw_state(struct intel_crtc * crtc,struct intel_dpll_hw_state * dpll_hw_state)389 void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
390 struct intel_dpll_hw_state *dpll_hw_state)
391 {
392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
393 struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
394
395 if (DISPLAY_VER(dev_priv) >= 4) {
396 u32 tmp;
397
398 /* No way to read it out on pipes B and C */
399 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
400 tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
401 else
402 tmp = intel_de_read(dev_priv,
403 DPLL_MD(dev_priv, crtc->pipe));
404
405 hw_state->dpll_md = tmp;
406 }
407
408 hw_state->dpll = intel_de_read(dev_priv, DPLL(dev_priv, crtc->pipe));
409
410 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
411 hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe));
412 hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe));
413 } else {
414 /* Mask out read-only status bits. */
415 hw_state->dpll &= ~(DPLL_LOCK_VLV |
416 DPLL_PORTC_READY_MASK |
417 DPLL_PORTB_READY_MASK);
418 }
419 }
420
421 /* Returns the clock of the currently programmed mode of the given pipe. */
i9xx_crtc_clock_get(struct intel_crtc_state * crtc_state)422 void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
423 {
424 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
425 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
426 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
427 u32 dpll = hw_state->dpll;
428 u32 fp;
429 struct dpll clock;
430 int port_clock;
431 int refclk = i9xx_pll_refclk(crtc_state);
432
433 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
434 fp = hw_state->fp0;
435 else
436 fp = hw_state->fp1;
437
438 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
439 if (IS_PINEVIEW(dev_priv)) {
440 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
441 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
442 } else {
443 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
444 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
445 }
446
447 if (DISPLAY_VER(dev_priv) != 2) {
448 if (IS_PINEVIEW(dev_priv))
449 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
450 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
451 else
452 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
453 DPLL_FPA01_P1_POST_DIV_SHIFT);
454
455 switch (dpll & DPLL_MODE_MASK) {
456 case DPLLB_MODE_DAC_SERIAL:
457 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
458 5 : 10;
459 break;
460 case DPLLB_MODE_LVDS:
461 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
462 7 : 14;
463 break;
464 default:
465 drm_dbg_kms(&dev_priv->drm,
466 "Unknown DPLL mode %08x in programmed "
467 "mode\n", (int)(dpll & DPLL_MODE_MASK));
468 return;
469 }
470
471 if (IS_PINEVIEW(dev_priv))
472 port_clock = pnv_calc_dpll_params(refclk, &clock);
473 else
474 port_clock = i9xx_calc_dpll_params(refclk, &clock);
475 } else {
476 enum pipe lvds_pipe;
477
478 if (IS_I85X(dev_priv) &&
479 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
480 lvds_pipe == crtc->pipe) {
481 u32 lvds = intel_de_read(dev_priv, LVDS);
482
483 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
484 DPLL_FPA01_P1_POST_DIV_SHIFT);
485
486 if (lvds & LVDS_CLKB_POWER_UP)
487 clock.p2 = 7;
488 else
489 clock.p2 = 14;
490 } else {
491 if (dpll & PLL_P1_DIVIDE_BY_TWO)
492 clock.p1 = 2;
493 else {
494 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
495 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
496 }
497 if (dpll & PLL_P2_DIVIDE_BY_4)
498 clock.p2 = 4;
499 else
500 clock.p2 = 2;
501 }
502
503 port_clock = i9xx_calc_dpll_params(refclk, &clock);
504 }
505
506 /*
507 * This value includes pixel_multiplier. We will use
508 * port_clock to compute adjusted_mode.crtc_clock in the
509 * encoder's get_config() function.
510 */
511 crtc_state->port_clock = port_clock;
512 }
513
vlv_crtc_clock_get(struct intel_crtc_state * crtc_state)514 void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
515 {
516 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
517 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
518 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
519 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
520 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
521 int refclk = 100000;
522 struct dpll clock;
523 u32 tmp;
524
525 /* In case of DSI, DPLL will not be used */
526 if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
527 return;
528
529 vlv_dpio_get(dev_priv);
530 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch));
531 vlv_dpio_put(dev_priv);
532
533 clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
534 clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
535 clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
536 clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
537 clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
538
539 crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
540 }
541
chv_crtc_clock_get(struct intel_crtc_state * crtc_state)542 void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
543 {
544 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
545 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
546 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
547 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
548 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
549 struct dpll clock;
550 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
551 int refclk = 100000;
552
553 /* In case of DSI, DPLL will not be used */
554 if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
555 return;
556
557 vlv_dpio_get(dev_priv);
558 cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch));
559 pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch));
560 pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch));
561 pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch));
562 pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
563 vlv_dpio_put(dev_priv);
564
565 clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
566 clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
567 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
568 clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
569 clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
570 clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
571 clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
572
573 crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
574 }
575
576 /*
577 * Returns whether the given set of divisors are valid for a given refclk with
578 * the given connectors.
579 */
intel_pll_is_valid(struct drm_i915_private * dev_priv,const struct intel_limit * limit,const struct dpll * clock)580 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
581 const struct intel_limit *limit,
582 const struct dpll *clock)
583 {
584 if (clock->n < limit->n.min || limit->n.max < clock->n)
585 return false;
586 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
587 return false;
588 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
589 return false;
590 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
591 return false;
592
593 if (!IS_PINEVIEW(dev_priv) &&
594 !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
595 !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv))
596 if (clock->m1 <= clock->m2)
597 return false;
598
599 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
600 !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
601 if (clock->p < limit->p.min || limit->p.max < clock->p)
602 return false;
603 if (clock->m < limit->m.min || limit->m.max < clock->m)
604 return false;
605 }
606
607 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
608 return false;
609 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
610 * connector, etc., rather than just a single range.
611 */
612 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
613 return false;
614
615 return true;
616 }
617
618 static int
i9xx_select_p2_div(const struct intel_limit * limit,const struct intel_crtc_state * crtc_state,int target)619 i9xx_select_p2_div(const struct intel_limit *limit,
620 const struct intel_crtc_state *crtc_state,
621 int target)
622 {
623 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
624
625 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
626 /*
627 * For LVDS just rely on its current settings for dual-channel.
628 * We haven't figured out how to reliably set up different
629 * single/dual channel state, if we even can.
630 */
631 if (intel_is_dual_link_lvds(dev_priv))
632 return limit->p2.p2_fast;
633 else
634 return limit->p2.p2_slow;
635 } else {
636 if (target < limit->p2.dot_limit)
637 return limit->p2.p2_slow;
638 else
639 return limit->p2.p2_fast;
640 }
641 }
642
643 /*
644 * Returns a set of divisors for the desired target clock with the given
645 * refclk, or FALSE.
646 *
647 * Target and reference clocks are specified in kHz.
648 *
649 * If match_clock is provided, then best_clock P divider must match the P
650 * divider from @match_clock used for LVDS downclocking.
651 */
652 static bool
i9xx_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,const struct dpll * match_clock,struct dpll * best_clock)653 i9xx_find_best_dpll(const struct intel_limit *limit,
654 struct intel_crtc_state *crtc_state,
655 int target, int refclk,
656 const struct dpll *match_clock,
657 struct dpll *best_clock)
658 {
659 struct drm_device *dev = crtc_state->uapi.crtc->dev;
660 struct dpll clock;
661 int err = target;
662
663 memset(best_clock, 0, sizeof(*best_clock));
664
665 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
666
667 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
668 clock.m1++) {
669 for (clock.m2 = limit->m2.min;
670 clock.m2 <= limit->m2.max; clock.m2++) {
671 if (clock.m2 >= clock.m1)
672 break;
673 for (clock.n = limit->n.min;
674 clock.n <= limit->n.max; clock.n++) {
675 for (clock.p1 = limit->p1.min;
676 clock.p1 <= limit->p1.max; clock.p1++) {
677 int this_err;
678
679 i9xx_calc_dpll_params(refclk, &clock);
680 if (!intel_pll_is_valid(to_i915(dev),
681 limit,
682 &clock))
683 continue;
684 if (match_clock &&
685 clock.p != match_clock->p)
686 continue;
687
688 this_err = abs(clock.dot - target);
689 if (this_err < err) {
690 *best_clock = clock;
691 err = this_err;
692 }
693 }
694 }
695 }
696 }
697
698 return (err != target);
699 }
700
701 /*
702 * Returns a set of divisors for the desired target clock with the given
703 * refclk, or FALSE.
704 *
705 * Target and reference clocks are specified in kHz.
706 *
707 * If match_clock is provided, then best_clock P divider must match the P
708 * divider from @match_clock used for LVDS downclocking.
709 */
710 static bool
pnv_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,const struct dpll * match_clock,struct dpll * best_clock)711 pnv_find_best_dpll(const struct intel_limit *limit,
712 struct intel_crtc_state *crtc_state,
713 int target, int refclk,
714 const struct dpll *match_clock,
715 struct dpll *best_clock)
716 {
717 struct drm_device *dev = crtc_state->uapi.crtc->dev;
718 struct dpll clock;
719 int err = target;
720
721 memset(best_clock, 0, sizeof(*best_clock));
722
723 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
724
725 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
726 clock.m1++) {
727 for (clock.m2 = limit->m2.min;
728 clock.m2 <= limit->m2.max; clock.m2++) {
729 for (clock.n = limit->n.min;
730 clock.n <= limit->n.max; clock.n++) {
731 for (clock.p1 = limit->p1.min;
732 clock.p1 <= limit->p1.max; clock.p1++) {
733 int this_err;
734
735 pnv_calc_dpll_params(refclk, &clock);
736 if (!intel_pll_is_valid(to_i915(dev),
737 limit,
738 &clock))
739 continue;
740 if (match_clock &&
741 clock.p != match_clock->p)
742 continue;
743
744 this_err = abs(clock.dot - target);
745 if (this_err < err) {
746 *best_clock = clock;
747 err = this_err;
748 }
749 }
750 }
751 }
752 }
753
754 return (err != target);
755 }
756
757 /*
758 * Returns a set of divisors for the desired target clock with the given
759 * refclk, or FALSE.
760 *
761 * Target and reference clocks are specified in kHz.
762 *
763 * If match_clock is provided, then best_clock P divider must match the P
764 * divider from @match_clock used for LVDS downclocking.
765 */
766 static bool
g4x_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,const struct dpll * match_clock,struct dpll * best_clock)767 g4x_find_best_dpll(const struct intel_limit *limit,
768 struct intel_crtc_state *crtc_state,
769 int target, int refclk,
770 const struct dpll *match_clock,
771 struct dpll *best_clock)
772 {
773 struct drm_device *dev = crtc_state->uapi.crtc->dev;
774 struct dpll clock;
775 int max_n;
776 bool found = false;
777 /* approximately equals target * 0.00585 */
778 int err_most = (target >> 8) + (target >> 9);
779
780 memset(best_clock, 0, sizeof(*best_clock));
781
782 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
783
784 max_n = limit->n.max;
785 /* based on hardware requirement, prefer smaller n to precision */
786 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
787 /* based on hardware requirement, prefer larger m1,m2 */
788 for (clock.m1 = limit->m1.max;
789 clock.m1 >= limit->m1.min; clock.m1--) {
790 for (clock.m2 = limit->m2.max;
791 clock.m2 >= limit->m2.min; clock.m2--) {
792 for (clock.p1 = limit->p1.max;
793 clock.p1 >= limit->p1.min; clock.p1--) {
794 int this_err;
795
796 i9xx_calc_dpll_params(refclk, &clock);
797 if (!intel_pll_is_valid(to_i915(dev),
798 limit,
799 &clock))
800 continue;
801
802 this_err = abs(clock.dot - target);
803 if (this_err < err_most) {
804 *best_clock = clock;
805 err_most = this_err;
806 max_n = clock.n;
807 found = true;
808 }
809 }
810 }
811 }
812 }
813 return found;
814 }
815
816 /*
817 * Check if the calculated PLL configuration is more optimal compared to the
818 * best configuration and error found so far. Return the calculated error.
819 */
vlv_PLL_is_optimal(struct drm_device * dev,int target_freq,const struct dpll * calculated_clock,const struct dpll * best_clock,unsigned int best_error_ppm,unsigned int * error_ppm)820 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
821 const struct dpll *calculated_clock,
822 const struct dpll *best_clock,
823 unsigned int best_error_ppm,
824 unsigned int *error_ppm)
825 {
826 /*
827 * For CHV ignore the error and consider only the P value.
828 * Prefer a bigger P value based on HW requirements.
829 */
830 if (IS_CHERRYVIEW(to_i915(dev))) {
831 *error_ppm = 0;
832
833 return calculated_clock->p > best_clock->p;
834 }
835
836 if (drm_WARN_ON_ONCE(dev, !target_freq))
837 return false;
838
839 *error_ppm = div_u64(1000000ULL *
840 abs(target_freq - calculated_clock->dot),
841 target_freq);
842 /*
843 * Prefer a better P value over a better (smaller) error if the error
844 * is small. Ensure this preference for future configurations too by
845 * setting the error to 0.
846 */
847 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
848 *error_ppm = 0;
849
850 return true;
851 }
852
853 return *error_ppm + 10 < best_error_ppm;
854 }
855
856 /*
857 * Returns a set of divisors for the desired target clock with the given
858 * refclk, or FALSE.
859 */
860 static bool
vlv_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,const struct dpll * match_clock,struct dpll * best_clock)861 vlv_find_best_dpll(const struct intel_limit *limit,
862 struct intel_crtc_state *crtc_state,
863 int target, int refclk,
864 const struct dpll *match_clock,
865 struct dpll *best_clock)
866 {
867 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
868 struct drm_device *dev = crtc->base.dev;
869 struct dpll clock;
870 unsigned int bestppm = 1000000;
871 /* min update 19.2 MHz */
872 int max_n = min(limit->n.max, refclk / 19200);
873 bool found = false;
874
875 memset(best_clock, 0, sizeof(*best_clock));
876
877 /* based on hardware requirement, prefer smaller n to precision */
878 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
879 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
880 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
881 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
882 clock.p = clock.p1 * clock.p2 * 5;
883 /* based on hardware requirement, prefer bigger m1,m2 values */
884 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
885 unsigned int ppm;
886
887 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
888 refclk * clock.m1);
889
890 vlv_calc_dpll_params(refclk, &clock);
891
892 if (!intel_pll_is_valid(to_i915(dev),
893 limit,
894 &clock))
895 continue;
896
897 if (!vlv_PLL_is_optimal(dev, target,
898 &clock,
899 best_clock,
900 bestppm, &ppm))
901 continue;
902
903 *best_clock = clock;
904 bestppm = ppm;
905 found = true;
906 }
907 }
908 }
909 }
910
911 return found;
912 }
913
914 /*
915 * Returns a set of divisors for the desired target clock with the given
916 * refclk, or FALSE.
917 */
918 static bool
chv_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,const struct dpll * match_clock,struct dpll * best_clock)919 chv_find_best_dpll(const struct intel_limit *limit,
920 struct intel_crtc_state *crtc_state,
921 int target, int refclk,
922 const struct dpll *match_clock,
923 struct dpll *best_clock)
924 {
925 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
926 struct drm_device *dev = crtc->base.dev;
927 unsigned int best_error_ppm;
928 struct dpll clock;
929 u64 m2;
930 int found = false;
931
932 memset(best_clock, 0, sizeof(*best_clock));
933 best_error_ppm = 1000000;
934
935 /*
936 * Based on hardware doc, the n always set to 1, and m1 always
937 * set to 2. If requires to support 200Mhz refclk, we need to
938 * revisit this because n may not 1 anymore.
939 */
940 clock.n = 1;
941 clock.m1 = 2;
942
943 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
944 for (clock.p2 = limit->p2.p2_fast;
945 clock.p2 >= limit->p2.p2_slow;
946 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
947 unsigned int error_ppm;
948
949 clock.p = clock.p1 * clock.p2 * 5;
950
951 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
952 refclk * clock.m1);
953
954 if (m2 > INT_MAX/clock.m1)
955 continue;
956
957 clock.m2 = m2;
958
959 chv_calc_dpll_params(refclk, &clock);
960
961 if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
962 continue;
963
964 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
965 best_error_ppm, &error_ppm))
966 continue;
967
968 *best_clock = clock;
969 best_error_ppm = error_ppm;
970 found = true;
971 }
972 }
973
974 return found;
975 }
976
bxt_find_best_dpll(struct intel_crtc_state * crtc_state,struct dpll * best_clock)977 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
978 struct dpll *best_clock)
979 {
980 const struct intel_limit *limit = &intel_limits_bxt;
981 int refclk = 100000;
982
983 return chv_find_best_dpll(limit, crtc_state,
984 crtc_state->port_clock, refclk,
985 NULL, best_clock);
986 }
987
i9xx_dpll_compute_fp(const struct dpll * dpll)988 u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
989 {
990 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
991 }
992
pnv_dpll_compute_fp(const struct dpll * dpll)993 static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
994 {
995 return (1 << dpll->n) << 16 | dpll->m2;
996 }
997
i965_dpll_md(const struct intel_crtc_state * crtc_state)998 static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
999 {
1000 return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1001 }
1002
i9xx_dpll(const struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1003 static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
1004 const struct dpll *clock,
1005 const struct dpll *reduced_clock)
1006 {
1007 struct intel_display *display = to_intel_display(crtc_state);
1008 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1009 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1010 u32 dpll;
1011
1012 dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1013
1014 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1015 dpll |= DPLLB_MODE_LVDS;
1016 else
1017 dpll |= DPLLB_MODE_DAC_SERIAL;
1018
1019 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
1020 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
1021 dpll |= (crtc_state->pixel_multiplier - 1)
1022 << SDVO_MULTIPLIER_SHIFT_HIRES;
1023 }
1024
1025 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1026 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1027 dpll |= DPLL_SDVO_HIGH_SPEED;
1028
1029 if (intel_crtc_has_dp_encoder(crtc_state))
1030 dpll |= DPLL_SDVO_HIGH_SPEED;
1031
1032 /* compute bitmask from p1 value */
1033 if (IS_G4X(dev_priv)) {
1034 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1035 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1036 } else if (IS_PINEVIEW(dev_priv)) {
1037 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
1038 WARN_ON(reduced_clock->p1 != clock->p1);
1039 } else {
1040 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1041 WARN_ON(reduced_clock->p1 != clock->p1);
1042 }
1043
1044 switch (clock->p2) {
1045 case 5:
1046 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1047 break;
1048 case 7:
1049 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1050 break;
1051 case 10:
1052 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1053 break;
1054 case 14:
1055 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1056 break;
1057 }
1058 WARN_ON(reduced_clock->p2 != clock->p2);
1059
1060 if (DISPLAY_VER(dev_priv) >= 4)
1061 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1062
1063 if (crtc_state->sdvo_tv_clock)
1064 dpll |= PLL_REF_INPUT_TVCLKINBC;
1065 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1066 intel_panel_use_ssc(display))
1067 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1068 else
1069 dpll |= PLL_REF_INPUT_DREFCLK;
1070
1071 return dpll;
1072 }
1073
i9xx_compute_dpll(struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1074 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
1075 const struct dpll *clock,
1076 const struct dpll *reduced_clock)
1077 {
1078 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1079 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1080 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1081
1082 if (IS_PINEVIEW(dev_priv)) {
1083 hw_state->fp0 = pnv_dpll_compute_fp(clock);
1084 hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
1085 } else {
1086 hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1087 hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1088 }
1089
1090 hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
1091
1092 if (DISPLAY_VER(dev_priv) >= 4)
1093 hw_state->dpll_md = i965_dpll_md(crtc_state);
1094 }
1095
i8xx_dpll(const struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1096 static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
1097 const struct dpll *clock,
1098 const struct dpll *reduced_clock)
1099 {
1100 struct intel_display *display = to_intel_display(crtc_state);
1101 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1102 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1103 u32 dpll;
1104
1105 dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1106
1107 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1108 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1109 } else {
1110 if (clock->p1 == 2)
1111 dpll |= PLL_P1_DIVIDE_BY_TWO;
1112 else
1113 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1114 if (clock->p2 == 4)
1115 dpll |= PLL_P2_DIVIDE_BY_4;
1116 }
1117 WARN_ON(reduced_clock->p1 != clock->p1);
1118 WARN_ON(reduced_clock->p2 != clock->p2);
1119
1120 /*
1121 * Bspec:
1122 * "[Almador Errata}: For the correct operation of the muxed DVO pins
1123 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
1124 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
1125 * Enable) must be set to “1” in both the DPLL A Control Register
1126 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
1127 *
1128 * For simplicity We simply keep both bits always enabled in
1129 * both DPLLS. The spec says we should disable the DVO 2X clock
1130 * when not needed, but this seems to work fine in practice.
1131 */
1132 if (IS_I830(dev_priv) ||
1133 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
1134 dpll |= DPLL_DVO_2X_MODE;
1135
1136 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1137 intel_panel_use_ssc(display))
1138 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1139 else
1140 dpll |= PLL_REF_INPUT_DREFCLK;
1141
1142 return dpll;
1143 }
1144
i8xx_compute_dpll(struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1145 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
1146 const struct dpll *clock,
1147 const struct dpll *reduced_clock)
1148 {
1149 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1150
1151 hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1152 hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1153
1154 hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
1155 }
1156
hsw_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1157 static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
1158 struct intel_crtc *crtc)
1159 {
1160 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1161 struct intel_crtc_state *crtc_state =
1162 intel_atomic_get_new_crtc_state(state, crtc);
1163 struct intel_encoder *encoder =
1164 intel_get_crtc_new_encoder(state, crtc_state);
1165 int ret;
1166
1167 if (DISPLAY_VER(dev_priv) < 11 &&
1168 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1169 return 0;
1170
1171 ret = intel_compute_shared_dplls(state, crtc, encoder);
1172 if (ret)
1173 return ret;
1174
1175 /* FIXME this is a mess */
1176 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1177 return 0;
1178
1179 /* CRT dotclock is determined via other means */
1180 if (!crtc_state->has_pch_encoder)
1181 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1182
1183 return 0;
1184 }
1185
hsw_crtc_get_shared_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1186 static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
1187 struct intel_crtc *crtc)
1188 {
1189 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1190 struct intel_crtc_state *crtc_state =
1191 intel_atomic_get_new_crtc_state(state, crtc);
1192 struct intel_encoder *encoder =
1193 intel_get_crtc_new_encoder(state, crtc_state);
1194
1195 if (DISPLAY_VER(dev_priv) < 11 &&
1196 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1197 return 0;
1198
1199 return intel_reserve_shared_dplls(state, crtc, encoder);
1200 }
1201
dg2_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1202 static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
1203 struct intel_crtc *crtc)
1204 {
1205 struct intel_crtc_state *crtc_state =
1206 intel_atomic_get_new_crtc_state(state, crtc);
1207 struct intel_encoder *encoder =
1208 intel_get_crtc_new_encoder(state, crtc_state);
1209 int ret;
1210
1211 ret = intel_mpllb_calc_state(crtc_state, encoder);
1212 if (ret)
1213 return ret;
1214
1215 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1216
1217 return 0;
1218 }
1219
mtl_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1220 static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
1221 struct intel_crtc *crtc)
1222 {
1223 struct intel_crtc_state *crtc_state =
1224 intel_atomic_get_new_crtc_state(state, crtc);
1225 struct intel_encoder *encoder =
1226 intel_get_crtc_new_encoder(state, crtc_state);
1227 int ret;
1228
1229 ret = intel_cx0pll_calc_state(crtc_state, encoder);
1230 if (ret)
1231 return ret;
1232
1233 /* TODO: Do the readback via intel_compute_shared_dplls() */
1234 crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
1235
1236 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1237
1238 return 0;
1239 }
1240
ilk_fb_cb_factor(const struct intel_crtc_state * crtc_state)1241 static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
1242 {
1243 struct intel_display *display = to_intel_display(crtc_state);
1244 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1245 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1246
1247 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1248 ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) ||
1249 (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
1250 return 25;
1251
1252 if (crtc_state->sdvo_tv_clock)
1253 return 20;
1254
1255 return 21;
1256 }
1257
ilk_needs_fb_cb_tune(const struct dpll * dpll,int factor)1258 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
1259 {
1260 return dpll->m < factor * dpll->n;
1261 }
1262
ilk_dpll_compute_fp(const struct dpll * clock,int factor)1263 static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
1264 {
1265 u32 fp;
1266
1267 fp = i9xx_dpll_compute_fp(clock);
1268 if (ilk_needs_fb_cb_tune(clock, factor))
1269 fp |= FP_CB_TUNE;
1270
1271 return fp;
1272 }
1273
ilk_dpll(const struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1274 static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
1275 const struct dpll *clock,
1276 const struct dpll *reduced_clock)
1277 {
1278 struct intel_display *display = to_intel_display(crtc_state);
1279 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1280 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1281 u32 dpll;
1282
1283 dpll = DPLL_VCO_ENABLE;
1284
1285 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1286 dpll |= DPLLB_MODE_LVDS;
1287 else
1288 dpll |= DPLLB_MODE_DAC_SERIAL;
1289
1290 dpll |= (crtc_state->pixel_multiplier - 1)
1291 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1292
1293 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1294 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1295 dpll |= DPLL_SDVO_HIGH_SPEED;
1296
1297 if (intel_crtc_has_dp_encoder(crtc_state))
1298 dpll |= DPLL_SDVO_HIGH_SPEED;
1299
1300 /*
1301 * The high speed IO clock is only really required for
1302 * SDVO/HDMI/DP, but we also enable it for CRT to make it
1303 * possible to share the DPLL between CRT and HDMI. Enabling
1304 * the clock needlessly does no real harm, except use up a
1305 * bit of power potentially.
1306 *
1307 * We'll limit this to IVB with 3 pipes, since it has only two
1308 * DPLLs and so DPLL sharing is the only way to get three pipes
1309 * driving PCH ports at the same time. On SNB we could do this,
1310 * and potentially avoid enabling the second DPLL, but it's not
1311 * clear if it''s a win or loss power wise. No point in doing
1312 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1313 */
1314 if (INTEL_NUM_PIPES(dev_priv) == 3 &&
1315 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1316 dpll |= DPLL_SDVO_HIGH_SPEED;
1317
1318 /* compute bitmask from p1 value */
1319 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1320 /* also FPA1 */
1321 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1322
1323 switch (clock->p2) {
1324 case 5:
1325 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1326 break;
1327 case 7:
1328 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1329 break;
1330 case 10:
1331 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1332 break;
1333 case 14:
1334 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1335 break;
1336 }
1337 WARN_ON(reduced_clock->p2 != clock->p2);
1338
1339 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1340 intel_panel_use_ssc(display))
1341 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1342 else
1343 dpll |= PLL_REF_INPUT_DREFCLK;
1344
1345 return dpll;
1346 }
1347
ilk_compute_dpll(struct intel_crtc_state * crtc_state,const struct dpll * clock,const struct dpll * reduced_clock)1348 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
1349 const struct dpll *clock,
1350 const struct dpll *reduced_clock)
1351 {
1352 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1353 int factor = ilk_fb_cb_factor(crtc_state);
1354
1355 hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
1356 hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
1357
1358 hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
1359 }
1360
ilk_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1361 static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
1362 struct intel_crtc *crtc)
1363 {
1364 struct intel_display *display = to_intel_display(state);
1365 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1366 struct intel_crtc_state *crtc_state =
1367 intel_atomic_get_new_crtc_state(state, crtc);
1368 const struct intel_limit *limit;
1369 int refclk = 120000;
1370 int ret;
1371
1372 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1373 if (!crtc_state->has_pch_encoder)
1374 return 0;
1375
1376 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1377 if (intel_panel_use_ssc(display)) {
1378 drm_dbg_kms(&dev_priv->drm,
1379 "using SSC reference clock of %d kHz\n",
1380 dev_priv->display.vbt.lvds_ssc_freq);
1381 refclk = dev_priv->display.vbt.lvds_ssc_freq;
1382 }
1383
1384 if (intel_is_dual_link_lvds(dev_priv)) {
1385 if (refclk == 100000)
1386 limit = &ilk_limits_dual_lvds_100m;
1387 else
1388 limit = &ilk_limits_dual_lvds;
1389 } else {
1390 if (refclk == 100000)
1391 limit = &ilk_limits_single_lvds_100m;
1392 else
1393 limit = &ilk_limits_single_lvds;
1394 }
1395 } else {
1396 limit = &ilk_limits_dac;
1397 }
1398
1399 if (!crtc_state->clock_set &&
1400 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1401 refclk, NULL, &crtc_state->dpll))
1402 return -EINVAL;
1403
1404 i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1405
1406 ilk_compute_dpll(crtc_state, &crtc_state->dpll,
1407 &crtc_state->dpll);
1408
1409 ret = intel_compute_shared_dplls(state, crtc, NULL);
1410 if (ret)
1411 return ret;
1412
1413 crtc_state->port_clock = crtc_state->dpll.dot;
1414 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1415
1416 return ret;
1417 }
1418
ilk_crtc_get_shared_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1419 static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
1420 struct intel_crtc *crtc)
1421 {
1422 struct intel_crtc_state *crtc_state =
1423 intel_atomic_get_new_crtc_state(state, crtc);
1424
1425 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1426 if (!crtc_state->has_pch_encoder)
1427 return 0;
1428
1429 return intel_reserve_shared_dplls(state, crtc, NULL);
1430 }
1431
vlv_dpll(const struct intel_crtc_state * crtc_state)1432 static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
1433 {
1434 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1435 u32 dpll;
1436
1437 dpll = DPLL_INTEGRATED_REF_CLK_VLV |
1438 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1439
1440 if (crtc->pipe != PIPE_A)
1441 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1442
1443 /* DPLL not used with DSI, but still need the rest set up */
1444 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1445 dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
1446
1447 return dpll;
1448 }
1449
vlv_compute_dpll(struct intel_crtc_state * crtc_state)1450 void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
1451 {
1452 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1453
1454 hw_state->dpll = vlv_dpll(crtc_state);
1455 hw_state->dpll_md = i965_dpll_md(crtc_state);
1456 }
1457
chv_dpll(const struct intel_crtc_state * crtc_state)1458 static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
1459 {
1460 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1461 u32 dpll;
1462
1463 dpll = DPLL_SSC_REF_CLK_CHV |
1464 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1465
1466 if (crtc->pipe != PIPE_A)
1467 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1468
1469 /* DPLL not used with DSI, but still need the rest set up */
1470 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1471 dpll |= DPLL_VCO_ENABLE;
1472
1473 return dpll;
1474 }
1475
chv_compute_dpll(struct intel_crtc_state * crtc_state)1476 void chv_compute_dpll(struct intel_crtc_state *crtc_state)
1477 {
1478 struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1479
1480 hw_state->dpll = chv_dpll(crtc_state);
1481 hw_state->dpll_md = i965_dpll_md(crtc_state);
1482 }
1483
chv_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1484 static int chv_crtc_compute_clock(struct intel_atomic_state *state,
1485 struct intel_crtc *crtc)
1486 {
1487 struct intel_crtc_state *crtc_state =
1488 intel_atomic_get_new_crtc_state(state, crtc);
1489 const struct intel_limit *limit = &intel_limits_chv;
1490 int refclk = 100000;
1491
1492 if (!crtc_state->clock_set &&
1493 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1494 refclk, NULL, &crtc_state->dpll))
1495 return -EINVAL;
1496
1497 chv_calc_dpll_params(refclk, &crtc_state->dpll);
1498
1499 chv_compute_dpll(crtc_state);
1500
1501 /* FIXME this is a mess */
1502 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1503 return 0;
1504
1505 crtc_state->port_clock = crtc_state->dpll.dot;
1506 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1507
1508 return 0;
1509 }
1510
vlv_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1511 static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
1512 struct intel_crtc *crtc)
1513 {
1514 struct intel_crtc_state *crtc_state =
1515 intel_atomic_get_new_crtc_state(state, crtc);
1516 const struct intel_limit *limit = &intel_limits_vlv;
1517 int refclk = 100000;
1518
1519 if (!crtc_state->clock_set &&
1520 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1521 refclk, NULL, &crtc_state->dpll))
1522 return -EINVAL;
1523
1524 vlv_calc_dpll_params(refclk, &crtc_state->dpll);
1525
1526 vlv_compute_dpll(crtc_state);
1527
1528 /* FIXME this is a mess */
1529 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1530 return 0;
1531
1532 crtc_state->port_clock = crtc_state->dpll.dot;
1533 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1534
1535 return 0;
1536 }
1537
g4x_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1538 static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
1539 struct intel_crtc *crtc)
1540 {
1541 struct intel_display *display = to_intel_display(state);
1542 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1543 struct intel_crtc_state *crtc_state =
1544 intel_atomic_get_new_crtc_state(state, crtc);
1545 const struct intel_limit *limit;
1546 int refclk = 96000;
1547
1548 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1549 if (intel_panel_use_ssc(display)) {
1550 refclk = dev_priv->display.vbt.lvds_ssc_freq;
1551 drm_dbg_kms(&dev_priv->drm,
1552 "using SSC reference clock of %d kHz\n",
1553 refclk);
1554 }
1555
1556 if (intel_is_dual_link_lvds(dev_priv))
1557 limit = &intel_limits_g4x_dual_channel_lvds;
1558 else
1559 limit = &intel_limits_g4x_single_channel_lvds;
1560 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
1561 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
1562 limit = &intel_limits_g4x_hdmi;
1563 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
1564 limit = &intel_limits_g4x_sdvo;
1565 } else {
1566 /* The option is for other outputs */
1567 limit = &intel_limits_i9xx_sdvo;
1568 }
1569
1570 if (!crtc_state->clock_set &&
1571 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1572 refclk, NULL, &crtc_state->dpll))
1573 return -EINVAL;
1574
1575 i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1576
1577 i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1578 &crtc_state->dpll);
1579
1580 crtc_state->port_clock = crtc_state->dpll.dot;
1581 /* FIXME this is a mess */
1582 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1583 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1584
1585 return 0;
1586 }
1587
pnv_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1588 static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
1589 struct intel_crtc *crtc)
1590 {
1591 struct intel_display *display = to_intel_display(state);
1592 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1593 struct intel_crtc_state *crtc_state =
1594 intel_atomic_get_new_crtc_state(state, crtc);
1595 const struct intel_limit *limit;
1596 int refclk = 96000;
1597
1598 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1599 if (intel_panel_use_ssc(display)) {
1600 refclk = dev_priv->display.vbt.lvds_ssc_freq;
1601 drm_dbg_kms(&dev_priv->drm,
1602 "using SSC reference clock of %d kHz\n",
1603 refclk);
1604 }
1605
1606 limit = &pnv_limits_lvds;
1607 } else {
1608 limit = &pnv_limits_sdvo;
1609 }
1610
1611 if (!crtc_state->clock_set &&
1612 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1613 refclk, NULL, &crtc_state->dpll))
1614 return -EINVAL;
1615
1616 pnv_calc_dpll_params(refclk, &crtc_state->dpll);
1617
1618 i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1619 &crtc_state->dpll);
1620
1621 crtc_state->port_clock = crtc_state->dpll.dot;
1622 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1623
1624 return 0;
1625 }
1626
i9xx_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1627 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
1628 struct intel_crtc *crtc)
1629 {
1630 struct intel_display *display = to_intel_display(state);
1631 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1632 struct intel_crtc_state *crtc_state =
1633 intel_atomic_get_new_crtc_state(state, crtc);
1634 const struct intel_limit *limit;
1635 int refclk = 96000;
1636
1637 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1638 if (intel_panel_use_ssc(display)) {
1639 refclk = dev_priv->display.vbt.lvds_ssc_freq;
1640 drm_dbg_kms(&dev_priv->drm,
1641 "using SSC reference clock of %d kHz\n",
1642 refclk);
1643 }
1644
1645 limit = &intel_limits_i9xx_lvds;
1646 } else {
1647 limit = &intel_limits_i9xx_sdvo;
1648 }
1649
1650 if (!crtc_state->clock_set &&
1651 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1652 refclk, NULL, &crtc_state->dpll))
1653 return -EINVAL;
1654
1655 i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1656
1657 i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1658 &crtc_state->dpll);
1659
1660 crtc_state->port_clock = crtc_state->dpll.dot;
1661 /* FIXME this is a mess */
1662 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1663 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1664
1665 return 0;
1666 }
1667
i8xx_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1668 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
1669 struct intel_crtc *crtc)
1670 {
1671 struct intel_display *display = to_intel_display(state);
1672 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1673 struct intel_crtc_state *crtc_state =
1674 intel_atomic_get_new_crtc_state(state, crtc);
1675 const struct intel_limit *limit;
1676 int refclk = 48000;
1677
1678 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1679 if (intel_panel_use_ssc(display)) {
1680 refclk = dev_priv->display.vbt.lvds_ssc_freq;
1681 drm_dbg_kms(&dev_priv->drm,
1682 "using SSC reference clock of %d kHz\n",
1683 refclk);
1684 }
1685
1686 limit = &intel_limits_i8xx_lvds;
1687 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
1688 limit = &intel_limits_i8xx_dvo;
1689 } else {
1690 limit = &intel_limits_i8xx_dac;
1691 }
1692
1693 if (!crtc_state->clock_set &&
1694 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1695 refclk, NULL, &crtc_state->dpll))
1696 return -EINVAL;
1697
1698 i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1699
1700 i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
1701 &crtc_state->dpll);
1702
1703 crtc_state->port_clock = crtc_state->dpll.dot;
1704 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1705
1706 return 0;
1707 }
1708
1709 static const struct intel_dpll_funcs mtl_dpll_funcs = {
1710 .crtc_compute_clock = mtl_crtc_compute_clock,
1711 };
1712
1713 static const struct intel_dpll_funcs dg2_dpll_funcs = {
1714 .crtc_compute_clock = dg2_crtc_compute_clock,
1715 };
1716
1717 static const struct intel_dpll_funcs hsw_dpll_funcs = {
1718 .crtc_compute_clock = hsw_crtc_compute_clock,
1719 .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
1720 };
1721
1722 static const struct intel_dpll_funcs ilk_dpll_funcs = {
1723 .crtc_compute_clock = ilk_crtc_compute_clock,
1724 .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
1725 };
1726
1727 static const struct intel_dpll_funcs chv_dpll_funcs = {
1728 .crtc_compute_clock = chv_crtc_compute_clock,
1729 };
1730
1731 static const struct intel_dpll_funcs vlv_dpll_funcs = {
1732 .crtc_compute_clock = vlv_crtc_compute_clock,
1733 };
1734
1735 static const struct intel_dpll_funcs g4x_dpll_funcs = {
1736 .crtc_compute_clock = g4x_crtc_compute_clock,
1737 };
1738
1739 static const struct intel_dpll_funcs pnv_dpll_funcs = {
1740 .crtc_compute_clock = pnv_crtc_compute_clock,
1741 };
1742
1743 static const struct intel_dpll_funcs i9xx_dpll_funcs = {
1744 .crtc_compute_clock = i9xx_crtc_compute_clock,
1745 };
1746
1747 static const struct intel_dpll_funcs i8xx_dpll_funcs = {
1748 .crtc_compute_clock = i8xx_crtc_compute_clock,
1749 };
1750
intel_dpll_crtc_compute_clock(struct intel_atomic_state * state,struct intel_crtc * crtc)1751 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
1752 struct intel_crtc *crtc)
1753 {
1754 struct drm_i915_private *i915 = to_i915(state->base.dev);
1755 struct intel_crtc_state *crtc_state =
1756 intel_atomic_get_new_crtc_state(state, crtc);
1757 int ret;
1758
1759 drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
1760
1761 memset(&crtc_state->dpll_hw_state, 0,
1762 sizeof(crtc_state->dpll_hw_state));
1763
1764 if (!crtc_state->hw.enable)
1765 return 0;
1766
1767 ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
1768 if (ret) {
1769 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
1770 crtc->base.base.id, crtc->base.name);
1771 return ret;
1772 }
1773
1774 return 0;
1775 }
1776
intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1777 int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
1778 struct intel_crtc *crtc)
1779 {
1780 struct drm_i915_private *i915 = to_i915(state->base.dev);
1781 struct intel_crtc_state *crtc_state =
1782 intel_atomic_get_new_crtc_state(state, crtc);
1783 int ret;
1784
1785 drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
1786 drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
1787
1788 if (!crtc_state->hw.enable || crtc_state->shared_dpll)
1789 return 0;
1790
1791 if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
1792 return 0;
1793
1794 ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
1795 if (ret) {
1796 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
1797 crtc->base.base.id, crtc->base.name);
1798 return ret;
1799 }
1800
1801 return 0;
1802 }
1803
1804 void
intel_dpll_init_clock_hook(struct drm_i915_private * dev_priv)1805 intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
1806 {
1807 if (DISPLAY_VER(dev_priv) >= 14)
1808 dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
1809 else if (IS_DG2(dev_priv))
1810 dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
1811 else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
1812 dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
1813 else if (HAS_PCH_SPLIT(dev_priv))
1814 dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
1815 else if (IS_CHERRYVIEW(dev_priv))
1816 dev_priv->display.funcs.dpll = &chv_dpll_funcs;
1817 else if (IS_VALLEYVIEW(dev_priv))
1818 dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
1819 else if (IS_G4X(dev_priv))
1820 dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
1821 else if (IS_PINEVIEW(dev_priv))
1822 dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
1823 else if (DISPLAY_VER(dev_priv) != 2)
1824 dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
1825 else
1826 dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
1827 }
1828
i9xx_has_pps(struct drm_i915_private * dev_priv)1829 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1830 {
1831 if (IS_I830(dev_priv))
1832 return false;
1833
1834 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1835 }
1836
i9xx_enable_pll(const struct intel_crtc_state * crtc_state)1837 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
1838 {
1839 struct intel_display *display = to_intel_display(crtc_state);
1840 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1841 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1842 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1843 enum pipe pipe = crtc->pipe;
1844 int i;
1845
1846 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1847
1848 /* PLL is protected by panel, make sure we can write it */
1849 if (i9xx_has_pps(dev_priv))
1850 assert_pps_unlocked(display, pipe);
1851
1852 intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
1853 intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
1854
1855 /*
1856 * Apparently we need to have VGA mode enabled prior to changing
1857 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1858 * dividers, even though the register value does change.
1859 */
1860 intel_de_write(dev_priv, DPLL(dev_priv, pipe),
1861 hw_state->dpll & ~DPLL_VGA_MODE_DIS);
1862 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
1863
1864 /* Wait for the clocks to stabilize. */
1865 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
1866 udelay(150);
1867
1868 if (DISPLAY_VER(dev_priv) >= 4) {
1869 intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
1870 hw_state->dpll_md);
1871 } else {
1872 /* The pixel multiplier can only be updated once the
1873 * DPLL is enabled and the clocks are stable.
1874 *
1875 * So write it again.
1876 */
1877 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
1878 }
1879
1880 /* We do this three times for luck */
1881 for (i = 0; i < 3; i++) {
1882 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
1883 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
1884 udelay(150); /* wait for warmup */
1885 }
1886 }
1887
vlv_pllb_recal_opamp(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch)1888 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
1889 enum dpio_phy phy, enum dpio_channel ch)
1890 {
1891 u32 tmp;
1892
1893 /*
1894 * PLLB opamp always calibrates to max value of 0x3f, force enable it
1895 * and set it to a reasonable value instead.
1896 */
1897 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
1898 tmp &= 0xffffff00;
1899 tmp |= 0x00000030;
1900 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
1901
1902 tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
1903 tmp &= 0x00ffffff;
1904 tmp |= 0x8c000000;
1905 vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
1906
1907 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
1908 tmp &= 0xffffff00;
1909 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
1910
1911 tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
1912 tmp &= 0x00ffffff;
1913 tmp |= 0xb0000000;
1914 vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
1915 }
1916
vlv_prepare_pll(const struct intel_crtc_state * crtc_state)1917 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
1918 {
1919 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1920 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1921 const struct dpll *clock = &crtc_state->dpll;
1922 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
1923 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
1924 enum pipe pipe = crtc->pipe;
1925 u32 tmp, coreclk;
1926
1927 vlv_dpio_get(dev_priv);
1928
1929 /* See eDP HDMI DPIO driver vbios notes doc */
1930
1931 /* PLL B needs special handling */
1932 if (pipe == PIPE_B)
1933 vlv_pllb_recal_opamp(dev_priv, phy, ch);
1934
1935 /* Set up Tx target for periodic Rcomp update */
1936 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
1937
1938 /* Disable target IRef on PLL */
1939 tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch));
1940 tmp &= 0x00ffffff;
1941 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp);
1942
1943 /* Disable fast lock */
1944 vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
1945
1946 /* Set idtafcrecal before PLL is enabled */
1947 tmp = DPIO_M1_DIV(clock->m1) |
1948 DPIO_M2_DIV(clock->m2) |
1949 DPIO_P1_DIV(clock->p1) |
1950 DPIO_P2_DIV(clock->p2) |
1951 DPIO_N_DIV(clock->n) |
1952 DPIO_K_DIV(1);
1953
1954 /*
1955 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
1956 * but we don't support that).
1957 * Note: don't use the DAC post divider as it seems unstable.
1958 */
1959 tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
1960 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
1961
1962 tmp |= DPIO_ENABLE_CALIBRATION;
1963 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
1964
1965 /* Set HBR and RBR LPF coefficients */
1966 if (crtc_state->port_clock == 162000 ||
1967 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
1968 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1969 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
1970 0x009f0003);
1971 else
1972 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
1973 0x00d0000f);
1974
1975 if (intel_crtc_has_dp_encoder(crtc_state)) {
1976 /* Use SSC source */
1977 if (pipe == PIPE_A)
1978 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1979 0x0df40000);
1980 else
1981 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1982 0x0df70000);
1983 } else { /* HDMI or VGA */
1984 /* Use bend source */
1985 if (pipe == PIPE_A)
1986 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1987 0x0df70000);
1988 else
1989 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1990 0x0df40000);
1991 }
1992
1993 coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch));
1994 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
1995 if (intel_crtc_has_dp_encoder(crtc_state))
1996 coreclk |= 0x01000000;
1997 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk);
1998
1999 vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000);
2000
2001 vlv_dpio_put(dev_priv);
2002 }
2003
_vlv_enable_pll(const struct intel_crtc_state * crtc_state)2004 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
2005 {
2006 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2007 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2008 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2009 enum pipe pipe = crtc->pipe;
2010
2011 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
2012 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
2013 udelay(150);
2014
2015 if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
2016 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
2017 }
2018
vlv_enable_pll(const struct intel_crtc_state * crtc_state)2019 void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
2020 {
2021 struct intel_display *display = to_intel_display(crtc_state);
2022 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2023 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2024 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2025 enum pipe pipe = crtc->pipe;
2026
2027 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2028
2029 /* PLL is protected by panel, make sure we can write it */
2030 assert_pps_unlocked(display, pipe);
2031
2032 /* Enable Refclk */
2033 intel_de_write(dev_priv, DPLL(dev_priv, pipe),
2034 hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
2035
2036 if (hw_state->dpll & DPLL_VCO_ENABLE) {
2037 vlv_prepare_pll(crtc_state);
2038 _vlv_enable_pll(crtc_state);
2039 }
2040
2041 intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), hw_state->dpll_md);
2042 intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
2043 }
2044
chv_prepare_pll(const struct intel_crtc_state * crtc_state)2045 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
2046 {
2047 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2048 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2049 const struct dpll *clock = &crtc_state->dpll;
2050 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2051 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2052 u32 tmp, loopfilter, tribuf_calcntr;
2053 u32 m2_frac;
2054
2055 m2_frac = clock->m2 & 0x3fffff;
2056
2057 vlv_dpio_get(dev_priv);
2058
2059 /* p1 and p2 divider */
2060 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch),
2061 DPIO_CHV_S1_DIV(5) |
2062 DPIO_CHV_P1_DIV(clock->p1) |
2063 DPIO_CHV_P2_DIV(clock->p2) |
2064 DPIO_CHV_K_DIV(1));
2065
2066 /* Feedback post-divider - m2 */
2067 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch),
2068 DPIO_CHV_M2_DIV(clock->m2 >> 22));
2069
2070 /* Feedback refclk divider - n and m1 */
2071 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch),
2072 DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
2073 DPIO_CHV_N_DIV(1));
2074
2075 /* M2 fraction division */
2076 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch),
2077 DPIO_CHV_M2_FRAC_DIV(m2_frac));
2078
2079 /* M2 fraction division enable */
2080 tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
2081 tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
2082 tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
2083 if (m2_frac)
2084 tmp |= DPIO_CHV_FRAC_DIV_EN;
2085 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp);
2086
2087 /* Program digital lock detect threshold */
2088 tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch));
2089 tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
2090 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
2091 tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
2092 if (!m2_frac)
2093 tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
2094 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp);
2095
2096 /* Loop filter */
2097 if (clock->vco == 5400000) {
2098 loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
2099 DPIO_CHV_INT_COEFF(0x8) |
2100 DPIO_CHV_GAIN_CTRL(0x1);
2101 tribuf_calcntr = 0x9;
2102 } else if (clock->vco <= 6200000) {
2103 loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
2104 DPIO_CHV_INT_COEFF(0xB) |
2105 DPIO_CHV_GAIN_CTRL(0x3);
2106 tribuf_calcntr = 0x9;
2107 } else if (clock->vco <= 6480000) {
2108 loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2109 DPIO_CHV_INT_COEFF(0x9) |
2110 DPIO_CHV_GAIN_CTRL(0x3);
2111 tribuf_calcntr = 0x8;
2112 } else {
2113 /* Not supported. Apply the same limits as in the max case */
2114 loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2115 DPIO_CHV_INT_COEFF(0x9) |
2116 DPIO_CHV_GAIN_CTRL(0x3);
2117 tribuf_calcntr = 0;
2118 }
2119 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter);
2120
2121 tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch));
2122 tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
2123 tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
2124 vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp);
2125
2126 /* AFC Recal */
2127 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch),
2128 vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) |
2129 DPIO_AFC_RECAL);
2130
2131 vlv_dpio_put(dev_priv);
2132 }
2133
_chv_enable_pll(const struct intel_crtc_state * crtc_state)2134 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
2135 {
2136 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2137 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2138 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2139 enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2140 enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2141 enum pipe pipe = crtc->pipe;
2142 u32 tmp;
2143
2144 vlv_dpio_get(dev_priv);
2145
2146 /* Enable back the 10bit clock to display controller */
2147 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
2148 tmp |= DPIO_DCLKP_EN;
2149 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp);
2150
2151 vlv_dpio_put(dev_priv);
2152
2153 /*
2154 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
2155 */
2156 udelay(1);
2157
2158 /* Enable PLL */
2159 intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
2160
2161 /* Check PLL is locked */
2162 if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
2163 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
2164 }
2165
chv_enable_pll(const struct intel_crtc_state * crtc_state)2166 void chv_enable_pll(const struct intel_crtc_state *crtc_state)
2167 {
2168 struct intel_display *display = to_intel_display(crtc_state);
2169 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2170 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2171 const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2172 enum pipe pipe = crtc->pipe;
2173
2174 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2175
2176 /* PLL is protected by panel, make sure we can write it */
2177 assert_pps_unlocked(display, pipe);
2178
2179 /* Enable Refclk and SSC */
2180 intel_de_write(dev_priv, DPLL(dev_priv, pipe),
2181 hw_state->dpll & ~DPLL_VCO_ENABLE);
2182
2183 if (hw_state->dpll & DPLL_VCO_ENABLE) {
2184 chv_prepare_pll(crtc_state);
2185 _chv_enable_pll(crtc_state);
2186 }
2187
2188 if (pipe != PIPE_A) {
2189 /*
2190 * WaPixelRepeatModeFixForC0:chv
2191 *
2192 * DPLLCMD is AWOL. Use chicken bits to propagate
2193 * the value from DPLLBMD to either pipe B or C.
2194 */
2195 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
2196 intel_de_write(dev_priv, DPLL_MD(dev_priv, PIPE_B),
2197 hw_state->dpll_md);
2198 intel_de_write(dev_priv, CBR4_VLV, 0);
2199 dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md;
2200
2201 /*
2202 * DPLLB VGA mode also seems to cause problems.
2203 * We should always have it disabled.
2204 */
2205 drm_WARN_ON(&dev_priv->drm,
2206 (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) &
2207 DPLL_VGA_MODE_DIS) == 0);
2208 } else {
2209 intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
2210 hw_state->dpll_md);
2211 intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
2212 }
2213 }
2214
2215 /**
2216 * vlv_force_pll_on - forcibly enable just the PLL
2217 * @dev_priv: i915 private structure
2218 * @pipe: pipe PLL to enable
2219 * @dpll: PLL configuration
2220 *
2221 * Enable the PLL for @pipe using the supplied @dpll config. To be used
2222 * in cases where we need the PLL enabled even when @pipe is not going to
2223 * be enabled.
2224 */
vlv_force_pll_on(struct drm_i915_private * dev_priv,enum pipe pipe,const struct dpll * dpll)2225 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
2226 const struct dpll *dpll)
2227 {
2228 struct intel_display *display = &dev_priv->display;
2229 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
2230 struct intel_crtc_state *crtc_state;
2231
2232 crtc_state = intel_crtc_state_alloc(crtc);
2233 if (!crtc_state)
2234 return -ENOMEM;
2235
2236 crtc_state->cpu_transcoder = (enum transcoder)pipe;
2237 crtc_state->pixel_multiplier = 1;
2238 crtc_state->dpll = *dpll;
2239 crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
2240
2241 if (IS_CHERRYVIEW(dev_priv)) {
2242 chv_compute_dpll(crtc_state);
2243 chv_enable_pll(crtc_state);
2244 } else {
2245 vlv_compute_dpll(crtc_state);
2246 vlv_enable_pll(crtc_state);
2247 }
2248
2249 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
2250
2251 return 0;
2252 }
2253
vlv_disable_pll(struct drm_i915_private * dev_priv,enum pipe pipe)2254 void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
2255 {
2256 u32 val;
2257
2258 /* Make sure the pipe isn't still relying on us */
2259 assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
2260
2261 val = DPLL_INTEGRATED_REF_CLK_VLV |
2262 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2263 if (pipe != PIPE_A)
2264 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2265
2266 intel_de_write(dev_priv, DPLL(dev_priv, pipe), val);
2267 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
2268 }
2269
chv_disable_pll(struct drm_i915_private * dev_priv,enum pipe pipe)2270 void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
2271 {
2272 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
2273 enum dpio_phy phy = vlv_pipe_to_phy(pipe);
2274 u32 val;
2275
2276 /* Make sure the pipe isn't still relying on us */
2277 assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
2278
2279 val = DPLL_SSC_REF_CLK_CHV |
2280 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2281 if (pipe != PIPE_A)
2282 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2283
2284 intel_de_write(dev_priv, DPLL(dev_priv, pipe), val);
2285 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
2286
2287 vlv_dpio_get(dev_priv);
2288
2289 /* Disable 10bit clock to display controller */
2290 val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
2291 val &= ~DPIO_DCLKP_EN;
2292 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val);
2293
2294 vlv_dpio_put(dev_priv);
2295 }
2296
i9xx_disable_pll(const struct intel_crtc_state * crtc_state)2297 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
2298 {
2299 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2300 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2301 enum pipe pipe = crtc->pipe;
2302
2303 /* Don't disable pipe or pipe PLLs if needed */
2304 if (IS_I830(dev_priv))
2305 return;
2306
2307 /* Make sure the pipe isn't still relying on us */
2308 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2309
2310 intel_de_write(dev_priv, DPLL(dev_priv, pipe), DPLL_VGA_MODE_DIS);
2311 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
2312 }
2313
2314
2315 /**
2316 * vlv_force_pll_off - forcibly disable just the PLL
2317 * @dev_priv: i915 private structure
2318 * @pipe: pipe PLL to disable
2319 *
2320 * Disable the PLL for @pipe. To be used in cases where we need
2321 * the PLL enabled even when @pipe is not going to be enabled.
2322 */
vlv_force_pll_off(struct drm_i915_private * dev_priv,enum pipe pipe)2323 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
2324 {
2325 if (IS_CHERRYVIEW(dev_priv))
2326 chv_disable_pll(dev_priv, pipe);
2327 else
2328 vlv_disable_pll(dev_priv, pipe);
2329 }
2330
2331 /* Only for pre-ILK configs */
assert_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)2332 static void assert_pll(struct drm_i915_private *dev_priv,
2333 enum pipe pipe, bool state)
2334 {
2335 struct intel_display *display = &dev_priv->display;
2336 bool cur_state;
2337
2338 cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
2339 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
2340 "PLL state assertion failure (expected %s, current %s)\n",
2341 str_on_off(state), str_on_off(cur_state));
2342 }
2343
assert_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)2344 void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
2345 {
2346 assert_pll(i915, pipe, true);
2347 }
2348
assert_pll_disabled(struct drm_i915_private * i915,enum pipe pipe)2349 void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
2350 {
2351 assert_pll(i915, pipe, false);
2352 }
2353