xref: /linux/drivers/gpu/drm/i915/display/intel_dpll.c (revision de848da12f752170c2ebe114804a985314fd5a6a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/string_helpers.h>
8 
9 #include "i915_reg.h"
10 #include "intel_atomic.h"
11 #include "intel_crtc.h"
12 #include "intel_cx0_phy.h"
13 #include "intel_de.h"
14 #include "intel_display.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_dpll.h"
18 #include "intel_lvds.h"
19 #include "intel_lvds_regs.h"
20 #include "intel_panel.h"
21 #include "intel_pps.h"
22 #include "intel_snps_phy.h"
23 #include "vlv_dpio_phy_regs.h"
24 #include "vlv_sideband.h"
25 
26 struct intel_dpll_funcs {
27 	int (*crtc_compute_clock)(struct intel_atomic_state *state,
28 				  struct intel_crtc *crtc);
29 	int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
30 				    struct intel_crtc *crtc);
31 };
32 
33 struct intel_limit {
34 	struct {
35 		int min, max;
36 	} dot, vco, n, m, m1, m2, p, p1;
37 
38 	struct {
39 		int dot_limit;
40 		int p2_slow, p2_fast;
41 	} p2;
42 };
43 static const struct intel_limit intel_limits_i8xx_dac = {
44 	.dot = { .min = 25000, .max = 350000 },
45 	.vco = { .min = 908000, .max = 1512000 },
46 	.n = { .min = 2, .max = 16 },
47 	.m = { .min = 96, .max = 140 },
48 	.m1 = { .min = 18, .max = 26 },
49 	.m2 = { .min = 6, .max = 16 },
50 	.p = { .min = 4, .max = 128 },
51 	.p1 = { .min = 2, .max = 33 },
52 	.p2 = { .dot_limit = 165000,
53 		.p2_slow = 4, .p2_fast = 2 },
54 };
55 
56 static const struct intel_limit intel_limits_i8xx_dvo = {
57 	.dot = { .min = 25000, .max = 350000 },
58 	.vco = { .min = 908000, .max = 1512000 },
59 	.n = { .min = 2, .max = 16 },
60 	.m = { .min = 96, .max = 140 },
61 	.m1 = { .min = 18, .max = 26 },
62 	.m2 = { .min = 6, .max = 16 },
63 	.p = { .min = 4, .max = 128 },
64 	.p1 = { .min = 2, .max = 33 },
65 	.p2 = { .dot_limit = 165000,
66 		.p2_slow = 4, .p2_fast = 4 },
67 };
68 
69 static const struct intel_limit intel_limits_i8xx_lvds = {
70 	.dot = { .min = 25000, .max = 350000 },
71 	.vco = { .min = 908000, .max = 1512000 },
72 	.n = { .min = 2, .max = 16 },
73 	.m = { .min = 96, .max = 140 },
74 	.m1 = { .min = 18, .max = 26 },
75 	.m2 = { .min = 6, .max = 16 },
76 	.p = { .min = 4, .max = 128 },
77 	.p1 = { .min = 1, .max = 6 },
78 	.p2 = { .dot_limit = 165000,
79 		.p2_slow = 14, .p2_fast = 7 },
80 };
81 
82 static const struct intel_limit intel_limits_i9xx_sdvo = {
83 	.dot = { .min = 20000, .max = 400000 },
84 	.vco = { .min = 1400000, .max = 2800000 },
85 	.n = { .min = 1, .max = 6 },
86 	.m = { .min = 70, .max = 120 },
87 	.m1 = { .min = 8, .max = 18 },
88 	.m2 = { .min = 3, .max = 7 },
89 	.p = { .min = 5, .max = 80 },
90 	.p1 = { .min = 1, .max = 8 },
91 	.p2 = { .dot_limit = 200000,
92 		.p2_slow = 10, .p2_fast = 5 },
93 };
94 
95 static const struct intel_limit intel_limits_i9xx_lvds = {
96 	.dot = { .min = 20000, .max = 400000 },
97 	.vco = { .min = 1400000, .max = 2800000 },
98 	.n = { .min = 1, .max = 6 },
99 	.m = { .min = 70, .max = 120 },
100 	.m1 = { .min = 8, .max = 18 },
101 	.m2 = { .min = 3, .max = 7 },
102 	.p = { .min = 7, .max = 98 },
103 	.p1 = { .min = 1, .max = 8 },
104 	.p2 = { .dot_limit = 112000,
105 		.p2_slow = 14, .p2_fast = 7 },
106 };
107 
108 
109 static const struct intel_limit intel_limits_g4x_sdvo = {
110 	.dot = { .min = 25000, .max = 270000 },
111 	.vco = { .min = 1750000, .max = 3500000},
112 	.n = { .min = 1, .max = 4 },
113 	.m = { .min = 104, .max = 138 },
114 	.m1 = { .min = 17, .max = 23 },
115 	.m2 = { .min = 5, .max = 11 },
116 	.p = { .min = 10, .max = 30 },
117 	.p1 = { .min = 1, .max = 3},
118 	.p2 = { .dot_limit = 270000,
119 		.p2_slow = 10,
120 		.p2_fast = 10
121 	},
122 };
123 
124 static const struct intel_limit intel_limits_g4x_hdmi = {
125 	.dot = { .min = 22000, .max = 400000 },
126 	.vco = { .min = 1750000, .max = 3500000},
127 	.n = { .min = 1, .max = 4 },
128 	.m = { .min = 104, .max = 138 },
129 	.m1 = { .min = 16, .max = 23 },
130 	.m2 = { .min = 5, .max = 11 },
131 	.p = { .min = 5, .max = 80 },
132 	.p1 = { .min = 1, .max = 8},
133 	.p2 = { .dot_limit = 165000,
134 		.p2_slow = 10, .p2_fast = 5 },
135 };
136 
137 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
138 	.dot = { .min = 20000, .max = 115000 },
139 	.vco = { .min = 1750000, .max = 3500000 },
140 	.n = { .min = 1, .max = 3 },
141 	.m = { .min = 104, .max = 138 },
142 	.m1 = { .min = 17, .max = 23 },
143 	.m2 = { .min = 5, .max = 11 },
144 	.p = { .min = 28, .max = 112 },
145 	.p1 = { .min = 2, .max = 8 },
146 	.p2 = { .dot_limit = 0,
147 		.p2_slow = 14, .p2_fast = 14
148 	},
149 };
150 
151 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
152 	.dot = { .min = 80000, .max = 224000 },
153 	.vco = { .min = 1750000, .max = 3500000 },
154 	.n = { .min = 1, .max = 3 },
155 	.m = { .min = 104, .max = 138 },
156 	.m1 = { .min = 17, .max = 23 },
157 	.m2 = { .min = 5, .max = 11 },
158 	.p = { .min = 14, .max = 42 },
159 	.p1 = { .min = 2, .max = 6 },
160 	.p2 = { .dot_limit = 0,
161 		.p2_slow = 7, .p2_fast = 7
162 	},
163 };
164 
165 static const struct intel_limit pnv_limits_sdvo = {
166 	.dot = { .min = 20000, .max = 400000},
167 	.vco = { .min = 1700000, .max = 3500000 },
168 	/* Pineview's Ncounter is a ring counter */
169 	.n = { .min = 3, .max = 6 },
170 	.m = { .min = 2, .max = 256 },
171 	/* Pineview only has one combined m divider, which we treat as m2. */
172 	.m1 = { .min = 0, .max = 0 },
173 	.m2 = { .min = 0, .max = 254 },
174 	.p = { .min = 5, .max = 80 },
175 	.p1 = { .min = 1, .max = 8 },
176 	.p2 = { .dot_limit = 200000,
177 		.p2_slow = 10, .p2_fast = 5 },
178 };
179 
180 static const struct intel_limit pnv_limits_lvds = {
181 	.dot = { .min = 20000, .max = 400000 },
182 	.vco = { .min = 1700000, .max = 3500000 },
183 	.n = { .min = 3, .max = 6 },
184 	.m = { .min = 2, .max = 256 },
185 	.m1 = { .min = 0, .max = 0 },
186 	.m2 = { .min = 0, .max = 254 },
187 	.p = { .min = 7, .max = 112 },
188 	.p1 = { .min = 1, .max = 8 },
189 	.p2 = { .dot_limit = 112000,
190 		.p2_slow = 14, .p2_fast = 14 },
191 };
192 
193 /* Ironlake / Sandybridge
194  *
195  * We calculate clock using (register_value + 2) for N/M1/M2, so here
196  * the range value for them is (actual_value - 2).
197  */
198 static const struct intel_limit ilk_limits_dac = {
199 	.dot = { .min = 25000, .max = 350000 },
200 	.vco = { .min = 1760000, .max = 3510000 },
201 	.n = { .min = 1, .max = 5 },
202 	.m = { .min = 79, .max = 127 },
203 	.m1 = { .min = 12, .max = 22 },
204 	.m2 = { .min = 5, .max = 9 },
205 	.p = { .min = 5, .max = 80 },
206 	.p1 = { .min = 1, .max = 8 },
207 	.p2 = { .dot_limit = 225000,
208 		.p2_slow = 10, .p2_fast = 5 },
209 };
210 
211 static const struct intel_limit ilk_limits_single_lvds = {
212 	.dot = { .min = 25000, .max = 350000 },
213 	.vco = { .min = 1760000, .max = 3510000 },
214 	.n = { .min = 1, .max = 3 },
215 	.m = { .min = 79, .max = 118 },
216 	.m1 = { .min = 12, .max = 22 },
217 	.m2 = { .min = 5, .max = 9 },
218 	.p = { .min = 28, .max = 112 },
219 	.p1 = { .min = 2, .max = 8 },
220 	.p2 = { .dot_limit = 225000,
221 		.p2_slow = 14, .p2_fast = 14 },
222 };
223 
224 static const struct intel_limit ilk_limits_dual_lvds = {
225 	.dot = { .min = 25000, .max = 350000 },
226 	.vco = { .min = 1760000, .max = 3510000 },
227 	.n = { .min = 1, .max = 3 },
228 	.m = { .min = 79, .max = 127 },
229 	.m1 = { .min = 12, .max = 22 },
230 	.m2 = { .min = 5, .max = 9 },
231 	.p = { .min = 14, .max = 56 },
232 	.p1 = { .min = 2, .max = 8 },
233 	.p2 = { .dot_limit = 225000,
234 		.p2_slow = 7, .p2_fast = 7 },
235 };
236 
237 /* LVDS 100mhz refclk limits. */
238 static const struct intel_limit ilk_limits_single_lvds_100m = {
239 	.dot = { .min = 25000, .max = 350000 },
240 	.vco = { .min = 1760000, .max = 3510000 },
241 	.n = { .min = 1, .max = 2 },
242 	.m = { .min = 79, .max = 126 },
243 	.m1 = { .min = 12, .max = 22 },
244 	.m2 = { .min = 5, .max = 9 },
245 	.p = { .min = 28, .max = 112 },
246 	.p1 = { .min = 2, .max = 8 },
247 	.p2 = { .dot_limit = 225000,
248 		.p2_slow = 14, .p2_fast = 14 },
249 };
250 
251 static const struct intel_limit ilk_limits_dual_lvds_100m = {
252 	.dot = { .min = 25000, .max = 350000 },
253 	.vco = { .min = 1760000, .max = 3510000 },
254 	.n = { .min = 1, .max = 3 },
255 	.m = { .min = 79, .max = 126 },
256 	.m1 = { .min = 12, .max = 22 },
257 	.m2 = { .min = 5, .max = 9 },
258 	.p = { .min = 14, .max = 42 },
259 	.p1 = { .min = 2, .max = 6 },
260 	.p2 = { .dot_limit = 225000,
261 		.p2_slow = 7, .p2_fast = 7 },
262 };
263 
264 static const struct intel_limit intel_limits_vlv = {
265 	 /*
266 	  * These are based on the data rate limits (measured in fast clocks)
267 	  * since those are the strictest limits we have. The fast
268 	  * clock and actual rate limits are more relaxed, so checking
269 	  * them would make no difference.
270 	  */
271 	.dot = { .min = 25000, .max = 270000 },
272 	.vco = { .min = 4000000, .max = 6000000 },
273 	.n = { .min = 1, .max = 7 },
274 	.m1 = { .min = 2, .max = 3 },
275 	.m2 = { .min = 11, .max = 156 },
276 	.p1 = { .min = 2, .max = 3 },
277 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
278 };
279 
280 static const struct intel_limit intel_limits_chv = {
281 	/*
282 	 * These are based on the data rate limits (measured in fast clocks)
283 	 * since those are the strictest limits we have.  The fast
284 	 * clock and actual rate limits are more relaxed, so checking
285 	 * them would make no difference.
286 	 */
287 	.dot = { .min = 25000, .max = 540000 },
288 	.vco = { .min = 4800000, .max = 6480000 },
289 	.n = { .min = 1, .max = 1 },
290 	.m1 = { .min = 2, .max = 2 },
291 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
292 	.p1 = { .min = 2, .max = 4 },
293 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
294 };
295 
296 static const struct intel_limit intel_limits_bxt = {
297 	.dot = { .min = 25000, .max = 594000 },
298 	.vco = { .min = 4800000, .max = 6700000 },
299 	.n = { .min = 1, .max = 1 },
300 	.m1 = { .min = 2, .max = 2 },
301 	/* FIXME: find real m2 limits */
302 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
303 	.p1 = { .min = 2, .max = 4 },
304 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
305 };
306 
307 /*
308  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
309  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
310  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
311  * The helpers' return value is the rate of the clock that is fed to the
312  * display engine's pipe which can be the above fast dot clock rate or a
313  * divided-down version of it.
314  */
315 /* m1 is reserved as 0 in Pineview, n is a ring counter */
316 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
317 {
318 	clock->m = clock->m2 + 2;
319 	clock->p = clock->p1 * clock->p2;
320 
321 	clock->vco = clock->n == 0 ? 0 :
322 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
323 	clock->dot = clock->p == 0 ? 0 :
324 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
325 
326 	return clock->dot;
327 }
328 
329 static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
330 {
331 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
332 }
333 
334 int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
335 {
336 	clock->m = i9xx_dpll_compute_m(clock);
337 	clock->p = clock->p1 * clock->p2;
338 
339 	clock->vco = clock->n + 2 == 0 ? 0 :
340 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
341 	clock->dot = clock->p == 0 ? 0 :
342 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
343 
344 	return clock->dot;
345 }
346 
347 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
348 {
349 	clock->m = clock->m1 * clock->m2;
350 	clock->p = clock->p1 * clock->p2 * 5;
351 
352 	clock->vco = clock->n == 0 ? 0 :
353 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
354 	clock->dot = clock->p == 0 ? 0 :
355 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
356 
357 	return clock->dot;
358 }
359 
360 int chv_calc_dpll_params(int refclk, struct dpll *clock)
361 {
362 	clock->m = clock->m1 * clock->m2;
363 	clock->p = clock->p1 * clock->p2 * 5;
364 
365 	clock->vco = clock->n == 0 ? 0 :
366 		DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22);
367 	clock->dot = clock->p == 0 ? 0 :
368 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
369 
370 	return clock->dot;
371 }
372 
373 static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
374 {
375 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
376 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
377 
378 	if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
379 		return i915->display.vbt.lvds_ssc_freq;
380 	else if (HAS_PCH_SPLIT(i915))
381 		return 120000;
382 	else if (DISPLAY_VER(i915) != 2)
383 		return 96000;
384 	else
385 		return 48000;
386 }
387 
388 void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
389 			    struct intel_dpll_hw_state *dpll_hw_state)
390 {
391 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
392 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
393 
394 	if (DISPLAY_VER(dev_priv) >= 4) {
395 		u32 tmp;
396 
397 		/* No way to read it out on pipes B and C */
398 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
399 			tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
400 		else
401 			tmp = intel_de_read(dev_priv,
402 					    DPLL_MD(dev_priv, crtc->pipe));
403 
404 		hw_state->dpll_md = tmp;
405 	}
406 
407 	hw_state->dpll = intel_de_read(dev_priv, DPLL(dev_priv, crtc->pipe));
408 
409 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
410 		hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe));
411 		hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe));
412 	} else {
413 		/* Mask out read-only status bits. */
414 		hw_state->dpll &= ~(DPLL_LOCK_VLV |
415 				    DPLL_PORTC_READY_MASK |
416 				    DPLL_PORTB_READY_MASK);
417 	}
418 }
419 
420 /* Returns the clock of the currently programmed mode of the given pipe. */
421 void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
422 {
423 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
424 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
425 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
426 	u32 dpll = hw_state->dpll;
427 	u32 fp;
428 	struct dpll clock;
429 	int port_clock;
430 	int refclk = i9xx_pll_refclk(crtc_state);
431 
432 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
433 		fp = hw_state->fp0;
434 	else
435 		fp = hw_state->fp1;
436 
437 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
438 	if (IS_PINEVIEW(dev_priv)) {
439 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
440 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
441 	} else {
442 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
443 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
444 	}
445 
446 	if (DISPLAY_VER(dev_priv) != 2) {
447 		if (IS_PINEVIEW(dev_priv))
448 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
449 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
450 		else
451 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
452 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
453 
454 		switch (dpll & DPLL_MODE_MASK) {
455 		case DPLLB_MODE_DAC_SERIAL:
456 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
457 				5 : 10;
458 			break;
459 		case DPLLB_MODE_LVDS:
460 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
461 				7 : 14;
462 			break;
463 		default:
464 			drm_dbg_kms(&dev_priv->drm,
465 				    "Unknown DPLL mode %08x in programmed "
466 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
467 			return;
468 		}
469 
470 		if (IS_PINEVIEW(dev_priv))
471 			port_clock = pnv_calc_dpll_params(refclk, &clock);
472 		else
473 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
474 	} else {
475 		enum pipe lvds_pipe;
476 
477 		if (IS_I85X(dev_priv) &&
478 		    intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
479 		    lvds_pipe == crtc->pipe) {
480 			u32 lvds = intel_de_read(dev_priv, LVDS);
481 
482 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
483 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
484 
485 			if (lvds & LVDS_CLKB_POWER_UP)
486 				clock.p2 = 7;
487 			else
488 				clock.p2 = 14;
489 		} else {
490 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
491 				clock.p1 = 2;
492 			else {
493 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
494 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
495 			}
496 			if (dpll & PLL_P2_DIVIDE_BY_4)
497 				clock.p2 = 4;
498 			else
499 				clock.p2 = 2;
500 		}
501 
502 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
503 	}
504 
505 	/*
506 	 * This value includes pixel_multiplier. We will use
507 	 * port_clock to compute adjusted_mode.crtc_clock in the
508 	 * encoder's get_config() function.
509 	 */
510 	crtc_state->port_clock = port_clock;
511 }
512 
513 void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
514 {
515 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
516 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
517 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
518 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
519 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
520 	int refclk = 100000;
521 	struct dpll clock;
522 	u32 tmp;
523 
524 	/* In case of DSI, DPLL will not be used */
525 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
526 		return;
527 
528 	vlv_dpio_get(dev_priv);
529 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch));
530 	vlv_dpio_put(dev_priv);
531 
532 	clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
533 	clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
534 	clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
535 	clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
536 	clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
537 
538 	crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
539 }
540 
541 void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
542 {
543 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
544 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
545 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
546 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
547 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
548 	struct dpll clock;
549 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
550 	int refclk = 100000;
551 
552 	/* In case of DSI, DPLL will not be used */
553 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
554 		return;
555 
556 	vlv_dpio_get(dev_priv);
557 	cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch));
558 	pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch));
559 	pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch));
560 	pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch));
561 	pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
562 	vlv_dpio_put(dev_priv);
563 
564 	clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
565 	clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
566 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
567 		clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
568 	clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
569 	clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
570 	clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
571 
572 	crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
573 }
574 
575 /*
576  * Returns whether the given set of divisors are valid for a given refclk with
577  * the given connectors.
578  */
579 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
580 			       const struct intel_limit *limit,
581 			       const struct dpll *clock)
582 {
583 	if (clock->n < limit->n.min || limit->n.max < clock->n)
584 		return false;
585 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
586 		return false;
587 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
588 		return false;
589 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
590 		return false;
591 
592 	if (!IS_PINEVIEW(dev_priv) && !IS_LP(dev_priv))
593 		if (clock->m1 <= clock->m2)
594 			return false;
595 
596 	if (!IS_LP(dev_priv)) {
597 		if (clock->p < limit->p.min || limit->p.max < clock->p)
598 			return false;
599 		if (clock->m < limit->m.min || limit->m.max < clock->m)
600 			return false;
601 	}
602 
603 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
604 		return false;
605 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
606 	 * connector, etc., rather than just a single range.
607 	 */
608 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
609 		return false;
610 
611 	return true;
612 }
613 
614 static int
615 i9xx_select_p2_div(const struct intel_limit *limit,
616 		   const struct intel_crtc_state *crtc_state,
617 		   int target)
618 {
619 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
620 
621 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
622 		/*
623 		 * For LVDS just rely on its current settings for dual-channel.
624 		 * We haven't figured out how to reliably set up different
625 		 * single/dual channel state, if we even can.
626 		 */
627 		if (intel_is_dual_link_lvds(dev_priv))
628 			return limit->p2.p2_fast;
629 		else
630 			return limit->p2.p2_slow;
631 	} else {
632 		if (target < limit->p2.dot_limit)
633 			return limit->p2.p2_slow;
634 		else
635 			return limit->p2.p2_fast;
636 	}
637 }
638 
639 /*
640  * Returns a set of divisors for the desired target clock with the given
641  * refclk, or FALSE.
642  *
643  * Target and reference clocks are specified in kHz.
644  *
645  * If match_clock is provided, then best_clock P divider must match the P
646  * divider from @match_clock used for LVDS downclocking.
647  */
648 static bool
649 i9xx_find_best_dpll(const struct intel_limit *limit,
650 		    struct intel_crtc_state *crtc_state,
651 		    int target, int refclk,
652 		    const struct dpll *match_clock,
653 		    struct dpll *best_clock)
654 {
655 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
656 	struct dpll clock;
657 	int err = target;
658 
659 	memset(best_clock, 0, sizeof(*best_clock));
660 
661 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
662 
663 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
664 	     clock.m1++) {
665 		for (clock.m2 = limit->m2.min;
666 		     clock.m2 <= limit->m2.max; clock.m2++) {
667 			if (clock.m2 >= clock.m1)
668 				break;
669 			for (clock.n = limit->n.min;
670 			     clock.n <= limit->n.max; clock.n++) {
671 				for (clock.p1 = limit->p1.min;
672 					clock.p1 <= limit->p1.max; clock.p1++) {
673 					int this_err;
674 
675 					i9xx_calc_dpll_params(refclk, &clock);
676 					if (!intel_pll_is_valid(to_i915(dev),
677 								limit,
678 								&clock))
679 						continue;
680 					if (match_clock &&
681 					    clock.p != match_clock->p)
682 						continue;
683 
684 					this_err = abs(clock.dot - target);
685 					if (this_err < err) {
686 						*best_clock = clock;
687 						err = this_err;
688 					}
689 				}
690 			}
691 		}
692 	}
693 
694 	return (err != target);
695 }
696 
697 /*
698  * Returns a set of divisors for the desired target clock with the given
699  * refclk, or FALSE.
700  *
701  * Target and reference clocks are specified in kHz.
702  *
703  * If match_clock is provided, then best_clock P divider must match the P
704  * divider from @match_clock used for LVDS downclocking.
705  */
706 static bool
707 pnv_find_best_dpll(const struct intel_limit *limit,
708 		   struct intel_crtc_state *crtc_state,
709 		   int target, int refclk,
710 		   const struct dpll *match_clock,
711 		   struct dpll *best_clock)
712 {
713 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
714 	struct dpll clock;
715 	int err = target;
716 
717 	memset(best_clock, 0, sizeof(*best_clock));
718 
719 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
720 
721 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
722 	     clock.m1++) {
723 		for (clock.m2 = limit->m2.min;
724 		     clock.m2 <= limit->m2.max; clock.m2++) {
725 			for (clock.n = limit->n.min;
726 			     clock.n <= limit->n.max; clock.n++) {
727 				for (clock.p1 = limit->p1.min;
728 					clock.p1 <= limit->p1.max; clock.p1++) {
729 					int this_err;
730 
731 					pnv_calc_dpll_params(refclk, &clock);
732 					if (!intel_pll_is_valid(to_i915(dev),
733 								limit,
734 								&clock))
735 						continue;
736 					if (match_clock &&
737 					    clock.p != match_clock->p)
738 						continue;
739 
740 					this_err = abs(clock.dot - target);
741 					if (this_err < err) {
742 						*best_clock = clock;
743 						err = this_err;
744 					}
745 				}
746 			}
747 		}
748 	}
749 
750 	return (err != target);
751 }
752 
753 /*
754  * Returns a set of divisors for the desired target clock with the given
755  * refclk, or FALSE.
756  *
757  * Target and reference clocks are specified in kHz.
758  *
759  * If match_clock is provided, then best_clock P divider must match the P
760  * divider from @match_clock used for LVDS downclocking.
761  */
762 static bool
763 g4x_find_best_dpll(const struct intel_limit *limit,
764 		   struct intel_crtc_state *crtc_state,
765 		   int target, int refclk,
766 		   const struct dpll *match_clock,
767 		   struct dpll *best_clock)
768 {
769 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
770 	struct dpll clock;
771 	int max_n;
772 	bool found = false;
773 	/* approximately equals target * 0.00585 */
774 	int err_most = (target >> 8) + (target >> 9);
775 
776 	memset(best_clock, 0, sizeof(*best_clock));
777 
778 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
779 
780 	max_n = limit->n.max;
781 	/* based on hardware requirement, prefer smaller n to precision */
782 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
783 		/* based on hardware requirement, prefere larger m1,m2 */
784 		for (clock.m1 = limit->m1.max;
785 		     clock.m1 >= limit->m1.min; clock.m1--) {
786 			for (clock.m2 = limit->m2.max;
787 			     clock.m2 >= limit->m2.min; clock.m2--) {
788 				for (clock.p1 = limit->p1.max;
789 				     clock.p1 >= limit->p1.min; clock.p1--) {
790 					int this_err;
791 
792 					i9xx_calc_dpll_params(refclk, &clock);
793 					if (!intel_pll_is_valid(to_i915(dev),
794 								limit,
795 								&clock))
796 						continue;
797 
798 					this_err = abs(clock.dot - target);
799 					if (this_err < err_most) {
800 						*best_clock = clock;
801 						err_most = this_err;
802 						max_n = clock.n;
803 						found = true;
804 					}
805 				}
806 			}
807 		}
808 	}
809 	return found;
810 }
811 
812 /*
813  * Check if the calculated PLL configuration is more optimal compared to the
814  * best configuration and error found so far. Return the calculated error.
815  */
816 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
817 			       const struct dpll *calculated_clock,
818 			       const struct dpll *best_clock,
819 			       unsigned int best_error_ppm,
820 			       unsigned int *error_ppm)
821 {
822 	/*
823 	 * For CHV ignore the error and consider only the P value.
824 	 * Prefer a bigger P value based on HW requirements.
825 	 */
826 	if (IS_CHERRYVIEW(to_i915(dev))) {
827 		*error_ppm = 0;
828 
829 		return calculated_clock->p > best_clock->p;
830 	}
831 
832 	if (drm_WARN_ON_ONCE(dev, !target_freq))
833 		return false;
834 
835 	*error_ppm = div_u64(1000000ULL *
836 				abs(target_freq - calculated_clock->dot),
837 			     target_freq);
838 	/*
839 	 * Prefer a better P value over a better (smaller) error if the error
840 	 * is small. Ensure this preference for future configurations too by
841 	 * setting the error to 0.
842 	 */
843 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
844 		*error_ppm = 0;
845 
846 		return true;
847 	}
848 
849 	return *error_ppm + 10 < best_error_ppm;
850 }
851 
852 /*
853  * Returns a set of divisors for the desired target clock with the given
854  * refclk, or FALSE.
855  */
856 static bool
857 vlv_find_best_dpll(const struct intel_limit *limit,
858 		   struct intel_crtc_state *crtc_state,
859 		   int target, int refclk,
860 		   const struct dpll *match_clock,
861 		   struct dpll *best_clock)
862 {
863 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
864 	struct drm_device *dev = crtc->base.dev;
865 	struct dpll clock;
866 	unsigned int bestppm = 1000000;
867 	/* min update 19.2 MHz */
868 	int max_n = min(limit->n.max, refclk / 19200);
869 	bool found = false;
870 
871 	memset(best_clock, 0, sizeof(*best_clock));
872 
873 	/* based on hardware requirement, prefer smaller n to precision */
874 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
875 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
876 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
877 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
878 				clock.p = clock.p1 * clock.p2 * 5;
879 				/* based on hardware requirement, prefer bigger m1,m2 values */
880 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
881 					unsigned int ppm;
882 
883 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
884 								     refclk * clock.m1);
885 
886 					vlv_calc_dpll_params(refclk, &clock);
887 
888 					if (!intel_pll_is_valid(to_i915(dev),
889 								limit,
890 								&clock))
891 						continue;
892 
893 					if (!vlv_PLL_is_optimal(dev, target,
894 								&clock,
895 								best_clock,
896 								bestppm, &ppm))
897 						continue;
898 
899 					*best_clock = clock;
900 					bestppm = ppm;
901 					found = true;
902 				}
903 			}
904 		}
905 	}
906 
907 	return found;
908 }
909 
910 /*
911  * Returns a set of divisors for the desired target clock with the given
912  * refclk, or FALSE.
913  */
914 static bool
915 chv_find_best_dpll(const struct intel_limit *limit,
916 		   struct intel_crtc_state *crtc_state,
917 		   int target, int refclk,
918 		   const struct dpll *match_clock,
919 		   struct dpll *best_clock)
920 {
921 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
922 	struct drm_device *dev = crtc->base.dev;
923 	unsigned int best_error_ppm;
924 	struct dpll clock;
925 	u64 m2;
926 	int found = false;
927 
928 	memset(best_clock, 0, sizeof(*best_clock));
929 	best_error_ppm = 1000000;
930 
931 	/*
932 	 * Based on hardware doc, the n always set to 1, and m1 always
933 	 * set to 2.  If requires to support 200Mhz refclk, we need to
934 	 * revisit this because n may not 1 anymore.
935 	 */
936 	clock.n = 1;
937 	clock.m1 = 2;
938 
939 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
940 		for (clock.p2 = limit->p2.p2_fast;
941 				clock.p2 >= limit->p2.p2_slow;
942 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
943 			unsigned int error_ppm;
944 
945 			clock.p = clock.p1 * clock.p2 * 5;
946 
947 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
948 						   refclk * clock.m1);
949 
950 			if (m2 > INT_MAX/clock.m1)
951 				continue;
952 
953 			clock.m2 = m2;
954 
955 			chv_calc_dpll_params(refclk, &clock);
956 
957 			if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
958 				continue;
959 
960 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
961 						best_error_ppm, &error_ppm))
962 				continue;
963 
964 			*best_clock = clock;
965 			best_error_ppm = error_ppm;
966 			found = true;
967 		}
968 	}
969 
970 	return found;
971 }
972 
973 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
974 			struct dpll *best_clock)
975 {
976 	const struct intel_limit *limit = &intel_limits_bxt;
977 	int refclk = 100000;
978 
979 	return chv_find_best_dpll(limit, crtc_state,
980 				  crtc_state->port_clock, refclk,
981 				  NULL, best_clock);
982 }
983 
984 u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
985 {
986 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
987 }
988 
989 static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
990 {
991 	return (1 << dpll->n) << 16 | dpll->m2;
992 }
993 
994 static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
995 {
996 	return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
997 }
998 
999 static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
1000 		     const struct dpll *clock,
1001 		     const struct dpll *reduced_clock)
1002 {
1003 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1004 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1005 	u32 dpll;
1006 
1007 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1008 
1009 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1010 		dpll |= DPLLB_MODE_LVDS;
1011 	else
1012 		dpll |= DPLLB_MODE_DAC_SERIAL;
1013 
1014 	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
1015 	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
1016 		dpll |= (crtc_state->pixel_multiplier - 1)
1017 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
1018 	}
1019 
1020 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1021 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1022 		dpll |= DPLL_SDVO_HIGH_SPEED;
1023 
1024 	if (intel_crtc_has_dp_encoder(crtc_state))
1025 		dpll |= DPLL_SDVO_HIGH_SPEED;
1026 
1027 	/* compute bitmask from p1 value */
1028 	if (IS_G4X(dev_priv)) {
1029 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1030 		dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1031 	} else if (IS_PINEVIEW(dev_priv)) {
1032 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
1033 		WARN_ON(reduced_clock->p1 != clock->p1);
1034 	} else {
1035 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1036 		WARN_ON(reduced_clock->p1 != clock->p1);
1037 	}
1038 
1039 	switch (clock->p2) {
1040 	case 5:
1041 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1042 		break;
1043 	case 7:
1044 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1045 		break;
1046 	case 10:
1047 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1048 		break;
1049 	case 14:
1050 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1051 		break;
1052 	}
1053 	WARN_ON(reduced_clock->p2 != clock->p2);
1054 
1055 	if (DISPLAY_VER(dev_priv) >= 4)
1056 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1057 
1058 	if (crtc_state->sdvo_tv_clock)
1059 		dpll |= PLL_REF_INPUT_TVCLKINBC;
1060 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1061 		 intel_panel_use_ssc(dev_priv))
1062 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1063 	else
1064 		dpll |= PLL_REF_INPUT_DREFCLK;
1065 
1066 	return dpll;
1067 }
1068 
1069 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
1070 			      const struct dpll *clock,
1071 			      const struct dpll *reduced_clock)
1072 {
1073 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1074 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1075 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1076 
1077 	if (IS_PINEVIEW(dev_priv)) {
1078 		hw_state->fp0 = pnv_dpll_compute_fp(clock);
1079 		hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
1080 	} else {
1081 		hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1082 		hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1083 	}
1084 
1085 	hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
1086 
1087 	if (DISPLAY_VER(dev_priv) >= 4)
1088 		hw_state->dpll_md = i965_dpll_md(crtc_state);
1089 }
1090 
1091 static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
1092 		     const struct dpll *clock,
1093 		     const struct dpll *reduced_clock)
1094 {
1095 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1096 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1097 	u32 dpll;
1098 
1099 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1100 
1101 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1102 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1103 	} else {
1104 		if (clock->p1 == 2)
1105 			dpll |= PLL_P1_DIVIDE_BY_TWO;
1106 		else
1107 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1108 		if (clock->p2 == 4)
1109 			dpll |= PLL_P2_DIVIDE_BY_4;
1110 	}
1111 	WARN_ON(reduced_clock->p1 != clock->p1);
1112 	WARN_ON(reduced_clock->p2 != clock->p2);
1113 
1114 	/*
1115 	 * Bspec:
1116 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
1117 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
1118 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
1119 	 *  Enable) must be set to “1” in both the DPLL A Control Register
1120 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
1121 	 *
1122 	 * For simplicity We simply keep both bits always enabled in
1123 	 * both DPLLS. The spec says we should disable the DVO 2X clock
1124 	 * when not needed, but this seems to work fine in practice.
1125 	 */
1126 	if (IS_I830(dev_priv) ||
1127 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
1128 		dpll |= DPLL_DVO_2X_MODE;
1129 
1130 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1131 	    intel_panel_use_ssc(dev_priv))
1132 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1133 	else
1134 		dpll |= PLL_REF_INPUT_DREFCLK;
1135 
1136 	return dpll;
1137 }
1138 
1139 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
1140 			      const struct dpll *clock,
1141 			      const struct dpll *reduced_clock)
1142 {
1143 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1144 
1145 	hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1146 	hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1147 
1148 	hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
1149 }
1150 
1151 static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
1152 				  struct intel_crtc *crtc)
1153 {
1154 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1155 	struct intel_crtc_state *crtc_state =
1156 		intel_atomic_get_new_crtc_state(state, crtc);
1157 	struct intel_encoder *encoder =
1158 		intel_get_crtc_new_encoder(state, crtc_state);
1159 	int ret;
1160 
1161 	if (DISPLAY_VER(dev_priv) < 11 &&
1162 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1163 		return 0;
1164 
1165 	ret = intel_compute_shared_dplls(state, crtc, encoder);
1166 	if (ret)
1167 		return ret;
1168 
1169 	/* FIXME this is a mess */
1170 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1171 		return 0;
1172 
1173 	/* CRT dotclock is determined via other means */
1174 	if (!crtc_state->has_pch_encoder)
1175 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1176 
1177 	return 0;
1178 }
1179 
1180 static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
1181 				    struct intel_crtc *crtc)
1182 {
1183 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1184 	struct intel_crtc_state *crtc_state =
1185 		intel_atomic_get_new_crtc_state(state, crtc);
1186 	struct intel_encoder *encoder =
1187 		intel_get_crtc_new_encoder(state, crtc_state);
1188 
1189 	if (DISPLAY_VER(dev_priv) < 11 &&
1190 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1191 		return 0;
1192 
1193 	return intel_reserve_shared_dplls(state, crtc, encoder);
1194 }
1195 
1196 static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
1197 				  struct intel_crtc *crtc)
1198 {
1199 	struct intel_crtc_state *crtc_state =
1200 		intel_atomic_get_new_crtc_state(state, crtc);
1201 	struct intel_encoder *encoder =
1202 		intel_get_crtc_new_encoder(state, crtc_state);
1203 	int ret;
1204 
1205 	ret = intel_mpllb_calc_state(crtc_state, encoder);
1206 	if (ret)
1207 		return ret;
1208 
1209 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1210 
1211 	return 0;
1212 }
1213 
1214 static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
1215 				  struct intel_crtc *crtc)
1216 {
1217 	struct intel_crtc_state *crtc_state =
1218 		intel_atomic_get_new_crtc_state(state, crtc);
1219 	struct intel_encoder *encoder =
1220 		intel_get_crtc_new_encoder(state, crtc_state);
1221 	int ret;
1222 
1223 	ret = intel_cx0pll_calc_state(crtc_state, encoder);
1224 	if (ret)
1225 		return ret;
1226 
1227 	/* TODO: Do the readback via intel_compute_shared_dplls() */
1228 	crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
1229 
1230 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1231 
1232 	return 0;
1233 }
1234 
1235 static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
1236 {
1237 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1238 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1239 
1240 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1241 	    ((intel_panel_use_ssc(i915) && i915->display.vbt.lvds_ssc_freq == 100000) ||
1242 	     (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
1243 		return 25;
1244 
1245 	if (crtc_state->sdvo_tv_clock)
1246 		return 20;
1247 
1248 	return 21;
1249 }
1250 
1251 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
1252 {
1253 	return dpll->m < factor * dpll->n;
1254 }
1255 
1256 static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
1257 {
1258 	u32 fp;
1259 
1260 	fp = i9xx_dpll_compute_fp(clock);
1261 	if (ilk_needs_fb_cb_tune(clock, factor))
1262 		fp |= FP_CB_TUNE;
1263 
1264 	return fp;
1265 }
1266 
1267 static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
1268 		    const struct dpll *clock,
1269 		    const struct dpll *reduced_clock)
1270 {
1271 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1272 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1273 	u32 dpll;
1274 
1275 	dpll = DPLL_VCO_ENABLE;
1276 
1277 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1278 		dpll |= DPLLB_MODE_LVDS;
1279 	else
1280 		dpll |= DPLLB_MODE_DAC_SERIAL;
1281 
1282 	dpll |= (crtc_state->pixel_multiplier - 1)
1283 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1284 
1285 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1286 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1287 		dpll |= DPLL_SDVO_HIGH_SPEED;
1288 
1289 	if (intel_crtc_has_dp_encoder(crtc_state))
1290 		dpll |= DPLL_SDVO_HIGH_SPEED;
1291 
1292 	/*
1293 	 * The high speed IO clock is only really required for
1294 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
1295 	 * possible to share the DPLL between CRT and HDMI. Enabling
1296 	 * the clock needlessly does no real harm, except use up a
1297 	 * bit of power potentially.
1298 	 *
1299 	 * We'll limit this to IVB with 3 pipes, since it has only two
1300 	 * DPLLs and so DPLL sharing is the only way to get three pipes
1301 	 * driving PCH ports at the same time. On SNB we could do this,
1302 	 * and potentially avoid enabling the second DPLL, but it's not
1303 	 * clear if it''s a win or loss power wise. No point in doing
1304 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1305 	 */
1306 	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
1307 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1308 		dpll |= DPLL_SDVO_HIGH_SPEED;
1309 
1310 	/* compute bitmask from p1 value */
1311 	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1312 	/* also FPA1 */
1313 	dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1314 
1315 	switch (clock->p2) {
1316 	case 5:
1317 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1318 		break;
1319 	case 7:
1320 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1321 		break;
1322 	case 10:
1323 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1324 		break;
1325 	case 14:
1326 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1327 		break;
1328 	}
1329 	WARN_ON(reduced_clock->p2 != clock->p2);
1330 
1331 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1332 	    intel_panel_use_ssc(dev_priv))
1333 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1334 	else
1335 		dpll |= PLL_REF_INPUT_DREFCLK;
1336 
1337 	return dpll;
1338 }
1339 
1340 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
1341 			     const struct dpll *clock,
1342 			     const struct dpll *reduced_clock)
1343 {
1344 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1345 	int factor = ilk_fb_cb_factor(crtc_state);
1346 
1347 	hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
1348 	hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
1349 
1350 	hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
1351 }
1352 
1353 static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
1354 				  struct intel_crtc *crtc)
1355 {
1356 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1357 	struct intel_crtc_state *crtc_state =
1358 		intel_atomic_get_new_crtc_state(state, crtc);
1359 	const struct intel_limit *limit;
1360 	int refclk = 120000;
1361 	int ret;
1362 
1363 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1364 	if (!crtc_state->has_pch_encoder)
1365 		return 0;
1366 
1367 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1368 		if (intel_panel_use_ssc(dev_priv)) {
1369 			drm_dbg_kms(&dev_priv->drm,
1370 				    "using SSC reference clock of %d kHz\n",
1371 				    dev_priv->display.vbt.lvds_ssc_freq);
1372 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1373 		}
1374 
1375 		if (intel_is_dual_link_lvds(dev_priv)) {
1376 			if (refclk == 100000)
1377 				limit = &ilk_limits_dual_lvds_100m;
1378 			else
1379 				limit = &ilk_limits_dual_lvds;
1380 		} else {
1381 			if (refclk == 100000)
1382 				limit = &ilk_limits_single_lvds_100m;
1383 			else
1384 				limit = &ilk_limits_single_lvds;
1385 		}
1386 	} else {
1387 		limit = &ilk_limits_dac;
1388 	}
1389 
1390 	if (!crtc_state->clock_set &&
1391 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1392 				refclk, NULL, &crtc_state->dpll))
1393 		return -EINVAL;
1394 
1395 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1396 
1397 	ilk_compute_dpll(crtc_state, &crtc_state->dpll,
1398 			 &crtc_state->dpll);
1399 
1400 	ret = intel_compute_shared_dplls(state, crtc, NULL);
1401 	if (ret)
1402 		return ret;
1403 
1404 	crtc_state->port_clock = crtc_state->dpll.dot;
1405 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1406 
1407 	return ret;
1408 }
1409 
1410 static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
1411 				    struct intel_crtc *crtc)
1412 {
1413 	struct intel_crtc_state *crtc_state =
1414 		intel_atomic_get_new_crtc_state(state, crtc);
1415 
1416 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1417 	if (!crtc_state->has_pch_encoder)
1418 		return 0;
1419 
1420 	return intel_reserve_shared_dplls(state, crtc, NULL);
1421 }
1422 
1423 static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
1424 {
1425 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1426 	u32 dpll;
1427 
1428 	dpll = DPLL_INTEGRATED_REF_CLK_VLV |
1429 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1430 
1431 	if (crtc->pipe != PIPE_A)
1432 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1433 
1434 	/* DPLL not used with DSI, but still need the rest set up */
1435 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1436 		dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
1437 
1438 	return dpll;
1439 }
1440 
1441 void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
1442 {
1443 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1444 
1445 	hw_state->dpll = vlv_dpll(crtc_state);
1446 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1447 }
1448 
1449 static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
1450 {
1451 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1452 	u32 dpll;
1453 
1454 	dpll = DPLL_SSC_REF_CLK_CHV |
1455 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1456 
1457 	if (crtc->pipe != PIPE_A)
1458 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1459 
1460 	/* DPLL not used with DSI, but still need the rest set up */
1461 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1462 		dpll |= DPLL_VCO_ENABLE;
1463 
1464 	return dpll;
1465 }
1466 
1467 void chv_compute_dpll(struct intel_crtc_state *crtc_state)
1468 {
1469 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1470 
1471 	hw_state->dpll = chv_dpll(crtc_state);
1472 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1473 }
1474 
1475 static int chv_crtc_compute_clock(struct intel_atomic_state *state,
1476 				  struct intel_crtc *crtc)
1477 {
1478 	struct intel_crtc_state *crtc_state =
1479 		intel_atomic_get_new_crtc_state(state, crtc);
1480 	const struct intel_limit *limit = &intel_limits_chv;
1481 	int refclk = 100000;
1482 
1483 	if (!crtc_state->clock_set &&
1484 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1485 				refclk, NULL, &crtc_state->dpll))
1486 		return -EINVAL;
1487 
1488 	chv_calc_dpll_params(refclk, &crtc_state->dpll);
1489 
1490 	chv_compute_dpll(crtc_state);
1491 
1492 	/* FIXME this is a mess */
1493 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1494 		return 0;
1495 
1496 	crtc_state->port_clock = crtc_state->dpll.dot;
1497 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1498 
1499 	return 0;
1500 }
1501 
1502 static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
1503 				  struct intel_crtc *crtc)
1504 {
1505 	struct intel_crtc_state *crtc_state =
1506 		intel_atomic_get_new_crtc_state(state, crtc);
1507 	const struct intel_limit *limit = &intel_limits_vlv;
1508 	int refclk = 100000;
1509 
1510 	if (!crtc_state->clock_set &&
1511 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1512 				refclk, NULL, &crtc_state->dpll))
1513 		return -EINVAL;
1514 
1515 	vlv_calc_dpll_params(refclk, &crtc_state->dpll);
1516 
1517 	vlv_compute_dpll(crtc_state);
1518 
1519 	/* FIXME this is a mess */
1520 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1521 		return 0;
1522 
1523 	crtc_state->port_clock = crtc_state->dpll.dot;
1524 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1525 
1526 	return 0;
1527 }
1528 
1529 static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
1530 				  struct intel_crtc *crtc)
1531 {
1532 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1533 	struct intel_crtc_state *crtc_state =
1534 		intel_atomic_get_new_crtc_state(state, crtc);
1535 	const struct intel_limit *limit;
1536 	int refclk = 96000;
1537 
1538 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1539 		if (intel_panel_use_ssc(dev_priv)) {
1540 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1541 			drm_dbg_kms(&dev_priv->drm,
1542 				    "using SSC reference clock of %d kHz\n",
1543 				    refclk);
1544 		}
1545 
1546 		if (intel_is_dual_link_lvds(dev_priv))
1547 			limit = &intel_limits_g4x_dual_channel_lvds;
1548 		else
1549 			limit = &intel_limits_g4x_single_channel_lvds;
1550 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
1551 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
1552 		limit = &intel_limits_g4x_hdmi;
1553 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
1554 		limit = &intel_limits_g4x_sdvo;
1555 	} else {
1556 		/* The option is for other outputs */
1557 		limit = &intel_limits_i9xx_sdvo;
1558 	}
1559 
1560 	if (!crtc_state->clock_set &&
1561 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1562 				refclk, NULL, &crtc_state->dpll))
1563 		return -EINVAL;
1564 
1565 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1566 
1567 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1568 			  &crtc_state->dpll);
1569 
1570 	crtc_state->port_clock = crtc_state->dpll.dot;
1571 	/* FIXME this is a mess */
1572 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1573 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1574 
1575 	return 0;
1576 }
1577 
1578 static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
1579 				  struct intel_crtc *crtc)
1580 {
1581 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1582 	struct intel_crtc_state *crtc_state =
1583 		intel_atomic_get_new_crtc_state(state, crtc);
1584 	const struct intel_limit *limit;
1585 	int refclk = 96000;
1586 
1587 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1588 		if (intel_panel_use_ssc(dev_priv)) {
1589 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1590 			drm_dbg_kms(&dev_priv->drm,
1591 				    "using SSC reference clock of %d kHz\n",
1592 				    refclk);
1593 		}
1594 
1595 		limit = &pnv_limits_lvds;
1596 	} else {
1597 		limit = &pnv_limits_sdvo;
1598 	}
1599 
1600 	if (!crtc_state->clock_set &&
1601 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1602 				refclk, NULL, &crtc_state->dpll))
1603 		return -EINVAL;
1604 
1605 	pnv_calc_dpll_params(refclk, &crtc_state->dpll);
1606 
1607 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1608 			  &crtc_state->dpll);
1609 
1610 	crtc_state->port_clock = crtc_state->dpll.dot;
1611 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1612 
1613 	return 0;
1614 }
1615 
1616 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
1617 				   struct intel_crtc *crtc)
1618 {
1619 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1620 	struct intel_crtc_state *crtc_state =
1621 		intel_atomic_get_new_crtc_state(state, crtc);
1622 	const struct intel_limit *limit;
1623 	int refclk = 96000;
1624 
1625 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1626 		if (intel_panel_use_ssc(dev_priv)) {
1627 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1628 			drm_dbg_kms(&dev_priv->drm,
1629 				    "using SSC reference clock of %d kHz\n",
1630 				    refclk);
1631 		}
1632 
1633 		limit = &intel_limits_i9xx_lvds;
1634 	} else {
1635 		limit = &intel_limits_i9xx_sdvo;
1636 	}
1637 
1638 	if (!crtc_state->clock_set &&
1639 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1640 				 refclk, NULL, &crtc_state->dpll))
1641 		return -EINVAL;
1642 
1643 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1644 
1645 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1646 			  &crtc_state->dpll);
1647 
1648 	crtc_state->port_clock = crtc_state->dpll.dot;
1649 	/* FIXME this is a mess */
1650 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1651 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1652 
1653 	return 0;
1654 }
1655 
1656 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
1657 				   struct intel_crtc *crtc)
1658 {
1659 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1660 	struct intel_crtc_state *crtc_state =
1661 		intel_atomic_get_new_crtc_state(state, crtc);
1662 	const struct intel_limit *limit;
1663 	int refclk = 48000;
1664 
1665 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1666 		if (intel_panel_use_ssc(dev_priv)) {
1667 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1668 			drm_dbg_kms(&dev_priv->drm,
1669 				    "using SSC reference clock of %d kHz\n",
1670 				    refclk);
1671 		}
1672 
1673 		limit = &intel_limits_i8xx_lvds;
1674 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
1675 		limit = &intel_limits_i8xx_dvo;
1676 	} else {
1677 		limit = &intel_limits_i8xx_dac;
1678 	}
1679 
1680 	if (!crtc_state->clock_set &&
1681 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1682 				 refclk, NULL, &crtc_state->dpll))
1683 		return -EINVAL;
1684 
1685 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1686 
1687 	i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
1688 			  &crtc_state->dpll);
1689 
1690 	crtc_state->port_clock = crtc_state->dpll.dot;
1691 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1692 
1693 	return 0;
1694 }
1695 
1696 static const struct intel_dpll_funcs mtl_dpll_funcs = {
1697 	.crtc_compute_clock = mtl_crtc_compute_clock,
1698 };
1699 
1700 static const struct intel_dpll_funcs dg2_dpll_funcs = {
1701 	.crtc_compute_clock = dg2_crtc_compute_clock,
1702 };
1703 
1704 static const struct intel_dpll_funcs hsw_dpll_funcs = {
1705 	.crtc_compute_clock = hsw_crtc_compute_clock,
1706 	.crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
1707 };
1708 
1709 static const struct intel_dpll_funcs ilk_dpll_funcs = {
1710 	.crtc_compute_clock = ilk_crtc_compute_clock,
1711 	.crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
1712 };
1713 
1714 static const struct intel_dpll_funcs chv_dpll_funcs = {
1715 	.crtc_compute_clock = chv_crtc_compute_clock,
1716 };
1717 
1718 static const struct intel_dpll_funcs vlv_dpll_funcs = {
1719 	.crtc_compute_clock = vlv_crtc_compute_clock,
1720 };
1721 
1722 static const struct intel_dpll_funcs g4x_dpll_funcs = {
1723 	.crtc_compute_clock = g4x_crtc_compute_clock,
1724 };
1725 
1726 static const struct intel_dpll_funcs pnv_dpll_funcs = {
1727 	.crtc_compute_clock = pnv_crtc_compute_clock,
1728 };
1729 
1730 static const struct intel_dpll_funcs i9xx_dpll_funcs = {
1731 	.crtc_compute_clock = i9xx_crtc_compute_clock,
1732 };
1733 
1734 static const struct intel_dpll_funcs i8xx_dpll_funcs = {
1735 	.crtc_compute_clock = i8xx_crtc_compute_clock,
1736 };
1737 
1738 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
1739 				  struct intel_crtc *crtc)
1740 {
1741 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1742 	struct intel_crtc_state *crtc_state =
1743 		intel_atomic_get_new_crtc_state(state, crtc);
1744 	int ret;
1745 
1746 	drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
1747 
1748 	memset(&crtc_state->dpll_hw_state, 0,
1749 	       sizeof(crtc_state->dpll_hw_state));
1750 
1751 	if (!crtc_state->hw.enable)
1752 		return 0;
1753 
1754 	ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
1755 	if (ret) {
1756 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
1757 			    crtc->base.base.id, crtc->base.name);
1758 		return ret;
1759 	}
1760 
1761 	return 0;
1762 }
1763 
1764 int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
1765 				    struct intel_crtc *crtc)
1766 {
1767 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1768 	struct intel_crtc_state *crtc_state =
1769 		intel_atomic_get_new_crtc_state(state, crtc);
1770 	int ret;
1771 
1772 	drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
1773 	drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
1774 
1775 	if (!crtc_state->hw.enable || crtc_state->shared_dpll)
1776 		return 0;
1777 
1778 	if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
1779 		return 0;
1780 
1781 	ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
1782 	if (ret) {
1783 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
1784 			    crtc->base.base.id, crtc->base.name);
1785 		return ret;
1786 	}
1787 
1788 	return 0;
1789 }
1790 
1791 void
1792 intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
1793 {
1794 	if (DISPLAY_VER(dev_priv) >= 14)
1795 		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
1796 	else if (IS_DG2(dev_priv))
1797 		dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
1798 	else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
1799 		dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
1800 	else if (HAS_PCH_SPLIT(dev_priv))
1801 		dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
1802 	else if (IS_CHERRYVIEW(dev_priv))
1803 		dev_priv->display.funcs.dpll = &chv_dpll_funcs;
1804 	else if (IS_VALLEYVIEW(dev_priv))
1805 		dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
1806 	else if (IS_G4X(dev_priv))
1807 		dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
1808 	else if (IS_PINEVIEW(dev_priv))
1809 		dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
1810 	else if (DISPLAY_VER(dev_priv) != 2)
1811 		dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
1812 	else
1813 		dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
1814 }
1815 
1816 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1817 {
1818 	if (IS_I830(dev_priv))
1819 		return false;
1820 
1821 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1822 }
1823 
1824 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
1825 {
1826 	struct intel_display *display = to_intel_display(crtc_state);
1827 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1828 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1829 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1830 	enum pipe pipe = crtc->pipe;
1831 	int i;
1832 
1833 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1834 
1835 	/* PLL is protected by panel, make sure we can write it */
1836 	if (i9xx_has_pps(dev_priv))
1837 		assert_pps_unlocked(display, pipe);
1838 
1839 	intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
1840 	intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
1841 
1842 	/*
1843 	 * Apparently we need to have VGA mode enabled prior to changing
1844 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1845 	 * dividers, even though the register value does change.
1846 	 */
1847 	intel_de_write(dev_priv, DPLL(dev_priv, pipe),
1848 		       hw_state->dpll & ~DPLL_VGA_MODE_DIS);
1849 	intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
1850 
1851 	/* Wait for the clocks to stabilize. */
1852 	intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
1853 	udelay(150);
1854 
1855 	if (DISPLAY_VER(dev_priv) >= 4) {
1856 		intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
1857 			       hw_state->dpll_md);
1858 	} else {
1859 		/* The pixel multiplier can only be updated once the
1860 		 * DPLL is enabled and the clocks are stable.
1861 		 *
1862 		 * So write it again.
1863 		 */
1864 		intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
1865 	}
1866 
1867 	/* We do this three times for luck */
1868 	for (i = 0; i < 3; i++) {
1869 		intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
1870 		intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
1871 		udelay(150); /* wait for warmup */
1872 	}
1873 }
1874 
1875 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
1876 				 enum dpio_phy phy, enum dpio_channel ch)
1877 {
1878 	u32 tmp;
1879 
1880 	/*
1881 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
1882 	 * and set it to a reasonable value instead.
1883 	 */
1884 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
1885 	tmp &= 0xffffff00;
1886 	tmp |= 0x00000030;
1887 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
1888 
1889 	tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
1890 	tmp &= 0x00ffffff;
1891 	tmp |= 0x8c000000;
1892 	vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
1893 
1894 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
1895 	tmp &= 0xffffff00;
1896 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
1897 
1898 	tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
1899 	tmp &= 0x00ffffff;
1900 	tmp |= 0xb0000000;
1901 	vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
1902 }
1903 
1904 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
1905 {
1906 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1907 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1908 	const struct dpll *clock = &crtc_state->dpll;
1909 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
1910 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
1911 	enum pipe pipe = crtc->pipe;
1912 	u32 tmp, coreclk;
1913 
1914 	vlv_dpio_get(dev_priv);
1915 
1916 	/* See eDP HDMI DPIO driver vbios notes doc */
1917 
1918 	/* PLL B needs special handling */
1919 	if (pipe == PIPE_B)
1920 		vlv_pllb_recal_opamp(dev_priv, phy, ch);
1921 
1922 	/* Set up Tx target for periodic Rcomp update */
1923 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
1924 
1925 	/* Disable target IRef on PLL */
1926 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch));
1927 	tmp &= 0x00ffffff;
1928 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp);
1929 
1930 	/* Disable fast lock */
1931 	vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
1932 
1933 	/* Set idtafcrecal before PLL is enabled */
1934 	tmp = DPIO_M1_DIV(clock->m1) |
1935 		DPIO_M2_DIV(clock->m2) |
1936 		DPIO_P1_DIV(clock->p1) |
1937 		DPIO_P2_DIV(clock->p2) |
1938 		DPIO_N_DIV(clock->n) |
1939 		DPIO_K_DIV(1);
1940 
1941 	/*
1942 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
1943 	 * but we don't support that).
1944 	 * Note: don't use the DAC post divider as it seems unstable.
1945 	 */
1946 	tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
1947 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
1948 
1949 	tmp |= DPIO_ENABLE_CALIBRATION;
1950 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
1951 
1952 	/* Set HBR and RBR LPF coefficients */
1953 	if (crtc_state->port_clock == 162000 ||
1954 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
1955 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1956 		vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
1957 				 0x009f0003);
1958 	else
1959 		vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
1960 				 0x00d0000f);
1961 
1962 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1963 		/* Use SSC source */
1964 		if (pipe == PIPE_A)
1965 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1966 					 0x0df40000);
1967 		else
1968 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1969 					 0x0df70000);
1970 	} else { /* HDMI or VGA */
1971 		/* Use bend source */
1972 		if (pipe == PIPE_A)
1973 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1974 					 0x0df70000);
1975 		else
1976 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1977 					 0x0df40000);
1978 	}
1979 
1980 	coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch));
1981 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
1982 	if (intel_crtc_has_dp_encoder(crtc_state))
1983 		coreclk |= 0x01000000;
1984 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk);
1985 
1986 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000);
1987 
1988 	vlv_dpio_put(dev_priv);
1989 }
1990 
1991 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1992 {
1993 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1994 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1995 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1996 	enum pipe pipe = crtc->pipe;
1997 
1998 	intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
1999 	intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
2000 	udelay(150);
2001 
2002 	if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
2003 		drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
2004 }
2005 
2006 void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
2007 {
2008 	struct intel_display *display = to_intel_display(crtc_state);
2009 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2010 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2011 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2012 	enum pipe pipe = crtc->pipe;
2013 
2014 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2015 
2016 	/* PLL is protected by panel, make sure we can write it */
2017 	assert_pps_unlocked(display, pipe);
2018 
2019 	/* Enable Refclk */
2020 	intel_de_write(dev_priv, DPLL(dev_priv, pipe),
2021 		       hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
2022 
2023 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2024 		vlv_prepare_pll(crtc_state);
2025 		_vlv_enable_pll(crtc_state);
2026 	}
2027 
2028 	intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), hw_state->dpll_md);
2029 	intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
2030 }
2031 
2032 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
2033 {
2034 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2035 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2036 	const struct dpll *clock = &crtc_state->dpll;
2037 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2038 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2039 	u32 tmp, loopfilter, tribuf_calcntr;
2040 	u32 m2_frac;
2041 
2042 	m2_frac = clock->m2 & 0x3fffff;
2043 
2044 	vlv_dpio_get(dev_priv);
2045 
2046 	/* p1 and p2 divider */
2047 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch),
2048 		       DPIO_CHV_S1_DIV(5) |
2049 		       DPIO_CHV_P1_DIV(clock->p1) |
2050 		       DPIO_CHV_P2_DIV(clock->p2) |
2051 		       DPIO_CHV_K_DIV(1));
2052 
2053 	/* Feedback post-divider - m2 */
2054 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch),
2055 		       DPIO_CHV_M2_DIV(clock->m2 >> 22));
2056 
2057 	/* Feedback refclk divider - n and m1 */
2058 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch),
2059 		       DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
2060 		       DPIO_CHV_N_DIV(1));
2061 
2062 	/* M2 fraction division */
2063 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch),
2064 		       DPIO_CHV_M2_FRAC_DIV(m2_frac));
2065 
2066 	/* M2 fraction division enable */
2067 	tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
2068 	tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
2069 	tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
2070 	if (m2_frac)
2071 		tmp |= DPIO_CHV_FRAC_DIV_EN;
2072 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp);
2073 
2074 	/* Program digital lock detect threshold */
2075 	tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch));
2076 	tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
2077 		      DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
2078 	tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
2079 	if (!m2_frac)
2080 		tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
2081 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp);
2082 
2083 	/* Loop filter */
2084 	if (clock->vco == 5400000) {
2085 		loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
2086 			DPIO_CHV_INT_COEFF(0x8) |
2087 			DPIO_CHV_GAIN_CTRL(0x1);
2088 		tribuf_calcntr = 0x9;
2089 	} else if (clock->vco <= 6200000) {
2090 		loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
2091 			DPIO_CHV_INT_COEFF(0xB) |
2092 			DPIO_CHV_GAIN_CTRL(0x3);
2093 		tribuf_calcntr = 0x9;
2094 	} else if (clock->vco <= 6480000) {
2095 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2096 			DPIO_CHV_INT_COEFF(0x9) |
2097 			DPIO_CHV_GAIN_CTRL(0x3);
2098 		tribuf_calcntr = 0x8;
2099 	} else {
2100 		/* Not supported. Apply the same limits as in the max case */
2101 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2102 			DPIO_CHV_INT_COEFF(0x9) |
2103 			DPIO_CHV_GAIN_CTRL(0x3);
2104 		tribuf_calcntr = 0;
2105 	}
2106 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter);
2107 
2108 	tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch));
2109 	tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
2110 	tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
2111 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp);
2112 
2113 	/* AFC Recal */
2114 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch),
2115 		       vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) |
2116 		       DPIO_AFC_RECAL);
2117 
2118 	vlv_dpio_put(dev_priv);
2119 }
2120 
2121 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
2122 {
2123 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2124 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2125 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2126 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2127 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2128 	enum pipe pipe = crtc->pipe;
2129 	u32 tmp;
2130 
2131 	vlv_dpio_get(dev_priv);
2132 
2133 	/* Enable back the 10bit clock to display controller */
2134 	tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
2135 	tmp |= DPIO_DCLKP_EN;
2136 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp);
2137 
2138 	vlv_dpio_put(dev_priv);
2139 
2140 	/*
2141 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
2142 	 */
2143 	udelay(1);
2144 
2145 	/* Enable PLL */
2146 	intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
2147 
2148 	/* Check PLL is locked */
2149 	if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
2150 		drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
2151 }
2152 
2153 void chv_enable_pll(const struct intel_crtc_state *crtc_state)
2154 {
2155 	struct intel_display *display = to_intel_display(crtc_state);
2156 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2157 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2158 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2159 	enum pipe pipe = crtc->pipe;
2160 
2161 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2162 
2163 	/* PLL is protected by panel, make sure we can write it */
2164 	assert_pps_unlocked(display, pipe);
2165 
2166 	/* Enable Refclk and SSC */
2167 	intel_de_write(dev_priv, DPLL(dev_priv, pipe),
2168 		       hw_state->dpll & ~DPLL_VCO_ENABLE);
2169 
2170 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2171 		chv_prepare_pll(crtc_state);
2172 		_chv_enable_pll(crtc_state);
2173 	}
2174 
2175 	if (pipe != PIPE_A) {
2176 		/*
2177 		 * WaPixelRepeatModeFixForC0:chv
2178 		 *
2179 		 * DPLLCMD is AWOL. Use chicken bits to propagate
2180 		 * the value from DPLLBMD to either pipe B or C.
2181 		 */
2182 		intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
2183 		intel_de_write(dev_priv, DPLL_MD(dev_priv, PIPE_B),
2184 			       hw_state->dpll_md);
2185 		intel_de_write(dev_priv, CBR4_VLV, 0);
2186 		dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md;
2187 
2188 		/*
2189 		 * DPLLB VGA mode also seems to cause problems.
2190 		 * We should always have it disabled.
2191 		 */
2192 		drm_WARN_ON(&dev_priv->drm,
2193 			    (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) &
2194 			     DPLL_VGA_MODE_DIS) == 0);
2195 	} else {
2196 		intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
2197 			       hw_state->dpll_md);
2198 		intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
2199 	}
2200 }
2201 
2202 /**
2203  * vlv_force_pll_on - forcibly enable just the PLL
2204  * @dev_priv: i915 private structure
2205  * @pipe: pipe PLL to enable
2206  * @dpll: PLL configuration
2207  *
2208  * Enable the PLL for @pipe using the supplied @dpll config. To be used
2209  * in cases where we need the PLL enabled even when @pipe is not going to
2210  * be enabled.
2211  */
2212 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
2213 		     const struct dpll *dpll)
2214 {
2215 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
2216 	struct intel_crtc_state *crtc_state;
2217 
2218 	crtc_state = intel_crtc_state_alloc(crtc);
2219 	if (!crtc_state)
2220 		return -ENOMEM;
2221 
2222 	crtc_state->cpu_transcoder = (enum transcoder)pipe;
2223 	crtc_state->pixel_multiplier = 1;
2224 	crtc_state->dpll = *dpll;
2225 	crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
2226 
2227 	if (IS_CHERRYVIEW(dev_priv)) {
2228 		chv_compute_dpll(crtc_state);
2229 		chv_enable_pll(crtc_state);
2230 	} else {
2231 		vlv_compute_dpll(crtc_state);
2232 		vlv_enable_pll(crtc_state);
2233 	}
2234 
2235 	intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
2236 
2237 	return 0;
2238 }
2239 
2240 void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
2241 {
2242 	u32 val;
2243 
2244 	/* Make sure the pipe isn't still relying on us */
2245 	assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
2246 
2247 	val = DPLL_INTEGRATED_REF_CLK_VLV |
2248 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2249 	if (pipe != PIPE_A)
2250 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2251 
2252 	intel_de_write(dev_priv, DPLL(dev_priv, pipe), val);
2253 	intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
2254 }
2255 
2256 void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
2257 {
2258 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
2259 	enum dpio_phy phy = vlv_pipe_to_phy(pipe);
2260 	u32 val;
2261 
2262 	/* Make sure the pipe isn't still relying on us */
2263 	assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
2264 
2265 	val = DPLL_SSC_REF_CLK_CHV |
2266 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2267 	if (pipe != PIPE_A)
2268 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2269 
2270 	intel_de_write(dev_priv, DPLL(dev_priv, pipe), val);
2271 	intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
2272 
2273 	vlv_dpio_get(dev_priv);
2274 
2275 	/* Disable 10bit clock to display controller */
2276 	val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
2277 	val &= ~DPIO_DCLKP_EN;
2278 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val);
2279 
2280 	vlv_dpio_put(dev_priv);
2281 }
2282 
2283 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
2284 {
2285 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2286 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2287 	enum pipe pipe = crtc->pipe;
2288 
2289 	/* Don't disable pipe or pipe PLLs if needed */
2290 	if (IS_I830(dev_priv))
2291 		return;
2292 
2293 	/* Make sure the pipe isn't still relying on us */
2294 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2295 
2296 	intel_de_write(dev_priv, DPLL(dev_priv, pipe), DPLL_VGA_MODE_DIS);
2297 	intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
2298 }
2299 
2300 
2301 /**
2302  * vlv_force_pll_off - forcibly disable just the PLL
2303  * @dev_priv: i915 private structure
2304  * @pipe: pipe PLL to disable
2305  *
2306  * Disable the PLL for @pipe. To be used in cases where we need
2307  * the PLL enabled even when @pipe is not going to be enabled.
2308  */
2309 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
2310 {
2311 	if (IS_CHERRYVIEW(dev_priv))
2312 		chv_disable_pll(dev_priv, pipe);
2313 	else
2314 		vlv_disable_pll(dev_priv, pipe);
2315 }
2316 
2317 /* Only for pre-ILK configs */
2318 static void assert_pll(struct drm_i915_private *dev_priv,
2319 		       enum pipe pipe, bool state)
2320 {
2321 	bool cur_state;
2322 
2323 	cur_state = intel_de_read(dev_priv, DPLL(dev_priv, pipe)) & DPLL_VCO_ENABLE;
2324 	I915_STATE_WARN(dev_priv, cur_state != state,
2325 			"PLL state assertion failure (expected %s, current %s)\n",
2326 			str_on_off(state), str_on_off(cur_state));
2327 }
2328 
2329 void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
2330 {
2331 	assert_pll(i915, pipe, true);
2332 }
2333 
2334 void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
2335 {
2336 	assert_pll(i915, pipe, false);
2337 }
2338