xref: /linux/drivers/gpu/drm/i915/display/intel_dpll.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/string_helpers.h>
8 
9 #include "i915_drv.h"
10 #include "i915_reg.h"
11 #include "intel_atomic.h"
12 #include "intel_crtc.h"
13 #include "intel_cx0_phy.h"
14 #include "intel_de.h"
15 #include "intel_display.h"
16 #include "intel_display_types.h"
17 #include "intel_dpio_phy.h"
18 #include "intel_dpll.h"
19 #include "intel_lvds.h"
20 #include "intel_lvds_regs.h"
21 #include "intel_panel.h"
22 #include "intel_pps.h"
23 #include "intel_snps_phy.h"
24 #include "vlv_dpio_phy_regs.h"
25 #include "vlv_sideband.h"
26 
27 struct intel_dpll_funcs {
28 	int (*crtc_compute_clock)(struct intel_atomic_state *state,
29 				  struct intel_crtc *crtc);
30 	int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
31 				    struct intel_crtc *crtc);
32 };
33 
34 struct intel_limit {
35 	struct {
36 		int min, max;
37 	} dot, vco, n, m, m1, m2, p, p1;
38 
39 	struct {
40 		int dot_limit;
41 		int p2_slow, p2_fast;
42 	} p2;
43 };
44 static const struct intel_limit intel_limits_i8xx_dac = {
45 	.dot = { .min = 25000, .max = 350000 },
46 	.vco = { .min = 908000, .max = 1512000 },
47 	.n = { .min = 2, .max = 16 },
48 	.m = { .min = 96, .max = 140 },
49 	.m1 = { .min = 18, .max = 26 },
50 	.m2 = { .min = 6, .max = 16 },
51 	.p = { .min = 4, .max = 128 },
52 	.p1 = { .min = 2, .max = 33 },
53 	.p2 = { .dot_limit = 165000,
54 		.p2_slow = 4, .p2_fast = 2 },
55 };
56 
57 static const struct intel_limit intel_limits_i8xx_dvo = {
58 	.dot = { .min = 25000, .max = 350000 },
59 	.vco = { .min = 908000, .max = 1512000 },
60 	.n = { .min = 2, .max = 16 },
61 	.m = { .min = 96, .max = 140 },
62 	.m1 = { .min = 18, .max = 26 },
63 	.m2 = { .min = 6, .max = 16 },
64 	.p = { .min = 4, .max = 128 },
65 	.p1 = { .min = 2, .max = 33 },
66 	.p2 = { .dot_limit = 165000,
67 		.p2_slow = 4, .p2_fast = 4 },
68 };
69 
70 static const struct intel_limit intel_limits_i8xx_lvds = {
71 	.dot = { .min = 25000, .max = 350000 },
72 	.vco = { .min = 908000, .max = 1512000 },
73 	.n = { .min = 2, .max = 16 },
74 	.m = { .min = 96, .max = 140 },
75 	.m1 = { .min = 18, .max = 26 },
76 	.m2 = { .min = 6, .max = 16 },
77 	.p = { .min = 4, .max = 128 },
78 	.p1 = { .min = 1, .max = 6 },
79 	.p2 = { .dot_limit = 165000,
80 		.p2_slow = 14, .p2_fast = 7 },
81 };
82 
83 static const struct intel_limit intel_limits_i9xx_sdvo = {
84 	.dot = { .min = 20000, .max = 400000 },
85 	.vco = { .min = 1400000, .max = 2800000 },
86 	.n = { .min = 1, .max = 6 },
87 	.m = { .min = 70, .max = 120 },
88 	.m1 = { .min = 8, .max = 18 },
89 	.m2 = { .min = 3, .max = 7 },
90 	.p = { .min = 5, .max = 80 },
91 	.p1 = { .min = 1, .max = 8 },
92 	.p2 = { .dot_limit = 200000,
93 		.p2_slow = 10, .p2_fast = 5 },
94 };
95 
96 static const struct intel_limit intel_limits_i9xx_lvds = {
97 	.dot = { .min = 20000, .max = 400000 },
98 	.vco = { .min = 1400000, .max = 2800000 },
99 	.n = { .min = 1, .max = 6 },
100 	.m = { .min = 70, .max = 120 },
101 	.m1 = { .min = 8, .max = 18 },
102 	.m2 = { .min = 3, .max = 7 },
103 	.p = { .min = 7, .max = 98 },
104 	.p1 = { .min = 1, .max = 8 },
105 	.p2 = { .dot_limit = 112000,
106 		.p2_slow = 14, .p2_fast = 7 },
107 };
108 
109 
110 static const struct intel_limit intel_limits_g4x_sdvo = {
111 	.dot = { .min = 25000, .max = 270000 },
112 	.vco = { .min = 1750000, .max = 3500000},
113 	.n = { .min = 1, .max = 4 },
114 	.m = { .min = 104, .max = 138 },
115 	.m1 = { .min = 17, .max = 23 },
116 	.m2 = { .min = 5, .max = 11 },
117 	.p = { .min = 10, .max = 30 },
118 	.p1 = { .min = 1, .max = 3},
119 	.p2 = { .dot_limit = 270000,
120 		.p2_slow = 10,
121 		.p2_fast = 10
122 	},
123 };
124 
125 static const struct intel_limit intel_limits_g4x_hdmi = {
126 	.dot = { .min = 22000, .max = 400000 },
127 	.vco = { .min = 1750000, .max = 3500000},
128 	.n = { .min = 1, .max = 4 },
129 	.m = { .min = 104, .max = 138 },
130 	.m1 = { .min = 16, .max = 23 },
131 	.m2 = { .min = 5, .max = 11 },
132 	.p = { .min = 5, .max = 80 },
133 	.p1 = { .min = 1, .max = 8},
134 	.p2 = { .dot_limit = 165000,
135 		.p2_slow = 10, .p2_fast = 5 },
136 };
137 
138 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
139 	.dot = { .min = 20000, .max = 115000 },
140 	.vco = { .min = 1750000, .max = 3500000 },
141 	.n = { .min = 1, .max = 3 },
142 	.m = { .min = 104, .max = 138 },
143 	.m1 = { .min = 17, .max = 23 },
144 	.m2 = { .min = 5, .max = 11 },
145 	.p = { .min = 28, .max = 112 },
146 	.p1 = { .min = 2, .max = 8 },
147 	.p2 = { .dot_limit = 0,
148 		.p2_slow = 14, .p2_fast = 14
149 	},
150 };
151 
152 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
153 	.dot = { .min = 80000, .max = 224000 },
154 	.vco = { .min = 1750000, .max = 3500000 },
155 	.n = { .min = 1, .max = 3 },
156 	.m = { .min = 104, .max = 138 },
157 	.m1 = { .min = 17, .max = 23 },
158 	.m2 = { .min = 5, .max = 11 },
159 	.p = { .min = 14, .max = 42 },
160 	.p1 = { .min = 2, .max = 6 },
161 	.p2 = { .dot_limit = 0,
162 		.p2_slow = 7, .p2_fast = 7
163 	},
164 };
165 
166 static const struct intel_limit pnv_limits_sdvo = {
167 	.dot = { .min = 20000, .max = 400000},
168 	.vco = { .min = 1700000, .max = 3500000 },
169 	/* Pineview's Ncounter is a ring counter */
170 	.n = { .min = 3, .max = 6 },
171 	.m = { .min = 2, .max = 256 },
172 	/* Pineview only has one combined m divider, which we treat as m2. */
173 	.m1 = { .min = 0, .max = 0 },
174 	.m2 = { .min = 0, .max = 254 },
175 	.p = { .min = 5, .max = 80 },
176 	.p1 = { .min = 1, .max = 8 },
177 	.p2 = { .dot_limit = 200000,
178 		.p2_slow = 10, .p2_fast = 5 },
179 };
180 
181 static const struct intel_limit pnv_limits_lvds = {
182 	.dot = { .min = 20000, .max = 400000 },
183 	.vco = { .min = 1700000, .max = 3500000 },
184 	.n = { .min = 3, .max = 6 },
185 	.m = { .min = 2, .max = 256 },
186 	.m1 = { .min = 0, .max = 0 },
187 	.m2 = { .min = 0, .max = 254 },
188 	.p = { .min = 7, .max = 112 },
189 	.p1 = { .min = 1, .max = 8 },
190 	.p2 = { .dot_limit = 112000,
191 		.p2_slow = 14, .p2_fast = 14 },
192 };
193 
194 /* Ironlake / Sandybridge
195  *
196  * We calculate clock using (register_value + 2) for N/M1/M2, so here
197  * the range value for them is (actual_value - 2).
198  */
199 static const struct intel_limit ilk_limits_dac = {
200 	.dot = { .min = 25000, .max = 350000 },
201 	.vco = { .min = 1760000, .max = 3510000 },
202 	.n = { .min = 1, .max = 5 },
203 	.m = { .min = 79, .max = 127 },
204 	.m1 = { .min = 12, .max = 22 },
205 	.m2 = { .min = 5, .max = 9 },
206 	.p = { .min = 5, .max = 80 },
207 	.p1 = { .min = 1, .max = 8 },
208 	.p2 = { .dot_limit = 225000,
209 		.p2_slow = 10, .p2_fast = 5 },
210 };
211 
212 static const struct intel_limit ilk_limits_single_lvds = {
213 	.dot = { .min = 25000, .max = 350000 },
214 	.vco = { .min = 1760000, .max = 3510000 },
215 	.n = { .min = 1, .max = 3 },
216 	.m = { .min = 79, .max = 118 },
217 	.m1 = { .min = 12, .max = 22 },
218 	.m2 = { .min = 5, .max = 9 },
219 	.p = { .min = 28, .max = 112 },
220 	.p1 = { .min = 2, .max = 8 },
221 	.p2 = { .dot_limit = 225000,
222 		.p2_slow = 14, .p2_fast = 14 },
223 };
224 
225 static const struct intel_limit ilk_limits_dual_lvds = {
226 	.dot = { .min = 25000, .max = 350000 },
227 	.vco = { .min = 1760000, .max = 3510000 },
228 	.n = { .min = 1, .max = 3 },
229 	.m = { .min = 79, .max = 127 },
230 	.m1 = { .min = 12, .max = 22 },
231 	.m2 = { .min = 5, .max = 9 },
232 	.p = { .min = 14, .max = 56 },
233 	.p1 = { .min = 2, .max = 8 },
234 	.p2 = { .dot_limit = 225000,
235 		.p2_slow = 7, .p2_fast = 7 },
236 };
237 
238 /* LVDS 100mhz refclk limits. */
239 static const struct intel_limit ilk_limits_single_lvds_100m = {
240 	.dot = { .min = 25000, .max = 350000 },
241 	.vco = { .min = 1760000, .max = 3510000 },
242 	.n = { .min = 1, .max = 2 },
243 	.m = { .min = 79, .max = 126 },
244 	.m1 = { .min = 12, .max = 22 },
245 	.m2 = { .min = 5, .max = 9 },
246 	.p = { .min = 28, .max = 112 },
247 	.p1 = { .min = 2, .max = 8 },
248 	.p2 = { .dot_limit = 225000,
249 		.p2_slow = 14, .p2_fast = 14 },
250 };
251 
252 static const struct intel_limit ilk_limits_dual_lvds_100m = {
253 	.dot = { .min = 25000, .max = 350000 },
254 	.vco = { .min = 1760000, .max = 3510000 },
255 	.n = { .min = 1, .max = 3 },
256 	.m = { .min = 79, .max = 126 },
257 	.m1 = { .min = 12, .max = 22 },
258 	.m2 = { .min = 5, .max = 9 },
259 	.p = { .min = 14, .max = 42 },
260 	.p1 = { .min = 2, .max = 6 },
261 	.p2 = { .dot_limit = 225000,
262 		.p2_slow = 7, .p2_fast = 7 },
263 };
264 
265 static const struct intel_limit intel_limits_vlv = {
266 	 /*
267 	  * These are based on the data rate limits (measured in fast clocks)
268 	  * since those are the strictest limits we have. The fast
269 	  * clock and actual rate limits are more relaxed, so checking
270 	  * them would make no difference.
271 	  */
272 	.dot = { .min = 25000, .max = 270000 },
273 	.vco = { .min = 4000000, .max = 6000000 },
274 	.n = { .min = 1, .max = 7 },
275 	.m1 = { .min = 2, .max = 3 },
276 	.m2 = { .min = 11, .max = 156 },
277 	.p1 = { .min = 2, .max = 3 },
278 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
279 };
280 
281 static const struct intel_limit intel_limits_chv = {
282 	/*
283 	 * These are based on the data rate limits (measured in fast clocks)
284 	 * since those are the strictest limits we have.  The fast
285 	 * clock and actual rate limits are more relaxed, so checking
286 	 * them would make no difference.
287 	 */
288 	.dot = { .min = 25000, .max = 540000 },
289 	.vco = { .min = 4800000, .max = 6480000 },
290 	.n = { .min = 1, .max = 1 },
291 	.m1 = { .min = 2, .max = 2 },
292 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
293 	.p1 = { .min = 2, .max = 4 },
294 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
295 };
296 
297 static const struct intel_limit intel_limits_bxt = {
298 	.dot = { .min = 25000, .max = 594000 },
299 	.vco = { .min = 4800000, .max = 6700000 },
300 	.n = { .min = 1, .max = 1 },
301 	.m1 = { .min = 2, .max = 2 },
302 	/* FIXME: find real m2 limits */
303 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
304 	.p1 = { .min = 2, .max = 4 },
305 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
306 };
307 
308 /*
309  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
310  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
311  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
312  * The helpers' return value is the rate of the clock that is fed to the
313  * display engine's pipe which can be the above fast dot clock rate or a
314  * divided-down version of it.
315  */
316 /* m1 is reserved as 0 in Pineview, n is a ring counter */
317 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
318 {
319 	clock->m = clock->m2 + 2;
320 	clock->p = clock->p1 * clock->p2;
321 
322 	clock->vco = clock->n == 0 ? 0 :
323 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
324 	clock->dot = clock->p == 0 ? 0 :
325 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
326 
327 	return clock->dot;
328 }
329 
330 static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
331 {
332 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
333 }
334 
335 int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
336 {
337 	clock->m = i9xx_dpll_compute_m(clock);
338 	clock->p = clock->p1 * clock->p2;
339 
340 	clock->vco = clock->n + 2 == 0 ? 0 :
341 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
342 	clock->dot = clock->p == 0 ? 0 :
343 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
344 
345 	return clock->dot;
346 }
347 
348 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
349 {
350 	clock->m = clock->m1 * clock->m2;
351 	clock->p = clock->p1 * clock->p2 * 5;
352 
353 	clock->vco = clock->n == 0 ? 0 :
354 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
355 	clock->dot = clock->p == 0 ? 0 :
356 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
357 
358 	return clock->dot;
359 }
360 
361 int chv_calc_dpll_params(int refclk, struct dpll *clock)
362 {
363 	clock->m = clock->m1 * clock->m2;
364 	clock->p = clock->p1 * clock->p2 * 5;
365 
366 	clock->vco = clock->n == 0 ? 0 :
367 		DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22);
368 	clock->dot = clock->p == 0 ? 0 :
369 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
370 
371 	return clock->dot;
372 }
373 
374 static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
375 {
376 	struct intel_display *display = to_intel_display(crtc_state);
377 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
378 
379 	if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
380 		return display->vbt.lvds_ssc_freq;
381 	else if (HAS_PCH_SPLIT(display))
382 		return 120000;
383 	else if (DISPLAY_VER(display) != 2)
384 		return 96000;
385 	else
386 		return 48000;
387 }
388 
389 void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
390 			    struct intel_dpll_hw_state *dpll_hw_state)
391 {
392 	struct intel_display *display = to_intel_display(crtc);
393 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
394 
395 	if (DISPLAY_VER(display) >= 4) {
396 		u32 tmp;
397 
398 		/* No way to read it out on pipes B and C */
399 		if (display->platform.cherryview && crtc->pipe != PIPE_A)
400 			tmp = display->state.chv_dpll_md[crtc->pipe];
401 		else
402 			tmp = intel_de_read(display,
403 					    DPLL_MD(display, crtc->pipe));
404 
405 		hw_state->dpll_md = tmp;
406 	}
407 
408 	hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
409 
410 	if (!display->platform.valleyview && !display->platform.cherryview) {
411 		hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
412 		hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
413 	} else {
414 		/* Mask out read-only status bits. */
415 		hw_state->dpll &= ~(DPLL_LOCK_VLV |
416 				    DPLL_PORTC_READY_MASK |
417 				    DPLL_PORTB_READY_MASK);
418 	}
419 }
420 
421 /* Returns the clock of the currently programmed mode of the given pipe. */
422 void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
423 {
424 	struct intel_display *display = to_intel_display(crtc_state);
425 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
426 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
427 	u32 dpll = hw_state->dpll;
428 	u32 fp;
429 	struct dpll clock;
430 	int port_clock;
431 	int refclk = i9xx_pll_refclk(crtc_state);
432 
433 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
434 		fp = hw_state->fp0;
435 	else
436 		fp = hw_state->fp1;
437 
438 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
439 	if (display->platform.pineview) {
440 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
441 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
442 	} else {
443 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
444 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
445 	}
446 
447 	if (DISPLAY_VER(display) != 2) {
448 		if (display->platform.pineview)
449 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
450 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
451 		else
452 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
453 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
454 
455 		switch (dpll & DPLL_MODE_MASK) {
456 		case DPLLB_MODE_DAC_SERIAL:
457 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
458 				5 : 10;
459 			break;
460 		case DPLLB_MODE_LVDS:
461 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
462 				7 : 14;
463 			break;
464 		default:
465 			drm_dbg_kms(display->drm,
466 				    "Unknown DPLL mode %08x in programmed "
467 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
468 			return;
469 		}
470 
471 		if (display->platform.pineview)
472 			port_clock = pnv_calc_dpll_params(refclk, &clock);
473 		else
474 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
475 	} else {
476 		enum pipe lvds_pipe;
477 
478 		if (display->platform.i85x &&
479 		    intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
480 		    lvds_pipe == crtc->pipe) {
481 			u32 lvds = intel_de_read(display, LVDS);
482 
483 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
484 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
485 
486 			if (lvds & LVDS_CLKB_POWER_UP)
487 				clock.p2 = 7;
488 			else
489 				clock.p2 = 14;
490 		} else {
491 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
492 				clock.p1 = 2;
493 			else {
494 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
495 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
496 			}
497 			if (dpll & PLL_P2_DIVIDE_BY_4)
498 				clock.p2 = 4;
499 			else
500 				clock.p2 = 2;
501 		}
502 
503 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
504 	}
505 
506 	/*
507 	 * This value includes pixel_multiplier. We will use
508 	 * port_clock to compute adjusted_mode.crtc_clock in the
509 	 * encoder's get_config() function.
510 	 */
511 	crtc_state->port_clock = port_clock;
512 }
513 
514 void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
515 {
516 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
517 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
518 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
519 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
520 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
521 	int refclk = 100000;
522 	struct dpll clock;
523 	u32 tmp;
524 
525 	/* In case of DSI, DPLL will not be used */
526 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
527 		return;
528 
529 	vlv_dpio_get(dev_priv);
530 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch));
531 	vlv_dpio_put(dev_priv);
532 
533 	clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
534 	clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
535 	clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
536 	clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
537 	clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
538 
539 	crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
540 }
541 
542 void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
543 {
544 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
545 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
546 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
547 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
548 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
549 	struct dpll clock;
550 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
551 	int refclk = 100000;
552 
553 	/* In case of DSI, DPLL will not be used */
554 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
555 		return;
556 
557 	vlv_dpio_get(dev_priv);
558 	cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch));
559 	pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch));
560 	pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch));
561 	pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch));
562 	pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
563 	vlv_dpio_put(dev_priv);
564 
565 	clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
566 	clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
567 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
568 		clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
569 	clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
570 	clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
571 	clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
572 
573 	crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
574 }
575 
576 /*
577  * Returns whether the given set of divisors are valid for a given refclk with
578  * the given connectors.
579  */
580 static bool intel_pll_is_valid(struct intel_display *display,
581 			       const struct intel_limit *limit,
582 			       const struct dpll *clock)
583 {
584 	if (clock->n < limit->n.min || limit->n.max < clock->n)
585 		return false;
586 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
587 		return false;
588 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
589 		return false;
590 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
591 		return false;
592 
593 	if (!display->platform.pineview &&
594 	    !display->platform.valleyview && !display->platform.cherryview &&
595 	    !display->platform.broxton && !display->platform.geminilake)
596 		if (clock->m1 <= clock->m2)
597 			return false;
598 
599 	if (!display->platform.valleyview && !display->platform.cherryview &&
600 	    !display->platform.broxton && !display->platform.geminilake) {
601 		if (clock->p < limit->p.min || limit->p.max < clock->p)
602 			return false;
603 		if (clock->m < limit->m.min || limit->m.max < clock->m)
604 			return false;
605 	}
606 
607 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
608 		return false;
609 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
610 	 * connector, etc., rather than just a single range.
611 	 */
612 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
613 		return false;
614 
615 	return true;
616 }
617 
618 static int
619 i9xx_select_p2_div(const struct intel_limit *limit,
620 		   const struct intel_crtc_state *crtc_state,
621 		   int target)
622 {
623 	struct intel_display *display = to_intel_display(crtc_state);
624 
625 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
626 		/*
627 		 * For LVDS just rely on its current settings for dual-channel.
628 		 * We haven't figured out how to reliably set up different
629 		 * single/dual channel state, if we even can.
630 		 */
631 		if (intel_is_dual_link_lvds(display))
632 			return limit->p2.p2_fast;
633 		else
634 			return limit->p2.p2_slow;
635 	} else {
636 		if (target < limit->p2.dot_limit)
637 			return limit->p2.p2_slow;
638 		else
639 			return limit->p2.p2_fast;
640 	}
641 }
642 
643 /*
644  * Returns a set of divisors for the desired target clock with the given
645  * refclk, or FALSE.
646  *
647  * Target and reference clocks are specified in kHz.
648  *
649  * If match_clock is provided, then best_clock P divider must match the P
650  * divider from @match_clock used for LVDS downclocking.
651  */
652 static bool
653 i9xx_find_best_dpll(const struct intel_limit *limit,
654 		    struct intel_crtc_state *crtc_state,
655 		    int target, int refclk,
656 		    const struct dpll *match_clock,
657 		    struct dpll *best_clock)
658 {
659 	struct intel_display *display = to_intel_display(crtc_state);
660 	struct dpll clock;
661 	int err = target;
662 
663 	memset(best_clock, 0, sizeof(*best_clock));
664 
665 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
666 
667 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
668 	     clock.m1++) {
669 		for (clock.m2 = limit->m2.min;
670 		     clock.m2 <= limit->m2.max; clock.m2++) {
671 			if (clock.m2 >= clock.m1)
672 				break;
673 			for (clock.n = limit->n.min;
674 			     clock.n <= limit->n.max; clock.n++) {
675 				for (clock.p1 = limit->p1.min;
676 					clock.p1 <= limit->p1.max; clock.p1++) {
677 					int this_err;
678 
679 					i9xx_calc_dpll_params(refclk, &clock);
680 					if (!intel_pll_is_valid(display,
681 								limit,
682 								&clock))
683 						continue;
684 					if (match_clock &&
685 					    clock.p != match_clock->p)
686 						continue;
687 
688 					this_err = abs(clock.dot - target);
689 					if (this_err < err) {
690 						*best_clock = clock;
691 						err = this_err;
692 					}
693 				}
694 			}
695 		}
696 	}
697 
698 	return (err != target);
699 }
700 
701 /*
702  * Returns a set of divisors for the desired target clock with the given
703  * refclk, or FALSE.
704  *
705  * Target and reference clocks are specified in kHz.
706  *
707  * If match_clock is provided, then best_clock P divider must match the P
708  * divider from @match_clock used for LVDS downclocking.
709  */
710 static bool
711 pnv_find_best_dpll(const struct intel_limit *limit,
712 		   struct intel_crtc_state *crtc_state,
713 		   int target, int refclk,
714 		   const struct dpll *match_clock,
715 		   struct dpll *best_clock)
716 {
717 	struct intel_display *display = to_intel_display(crtc_state);
718 	struct dpll clock;
719 	int err = target;
720 
721 	memset(best_clock, 0, sizeof(*best_clock));
722 
723 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
724 
725 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
726 	     clock.m1++) {
727 		for (clock.m2 = limit->m2.min;
728 		     clock.m2 <= limit->m2.max; clock.m2++) {
729 			for (clock.n = limit->n.min;
730 			     clock.n <= limit->n.max; clock.n++) {
731 				for (clock.p1 = limit->p1.min;
732 					clock.p1 <= limit->p1.max; clock.p1++) {
733 					int this_err;
734 
735 					pnv_calc_dpll_params(refclk, &clock);
736 					if (!intel_pll_is_valid(display,
737 								limit,
738 								&clock))
739 						continue;
740 					if (match_clock &&
741 					    clock.p != match_clock->p)
742 						continue;
743 
744 					this_err = abs(clock.dot - target);
745 					if (this_err < err) {
746 						*best_clock = clock;
747 						err = this_err;
748 					}
749 				}
750 			}
751 		}
752 	}
753 
754 	return (err != target);
755 }
756 
757 /*
758  * Returns a set of divisors for the desired target clock with the given
759  * refclk, or FALSE.
760  *
761  * Target and reference clocks are specified in kHz.
762  *
763  * If match_clock is provided, then best_clock P divider must match the P
764  * divider from @match_clock used for LVDS downclocking.
765  */
766 static bool
767 g4x_find_best_dpll(const struct intel_limit *limit,
768 		   struct intel_crtc_state *crtc_state,
769 		   int target, int refclk,
770 		   const struct dpll *match_clock,
771 		   struct dpll *best_clock)
772 {
773 	struct intel_display *display = to_intel_display(crtc_state);
774 	struct dpll clock;
775 	int max_n;
776 	bool found = false;
777 	/* approximately equals target * 0.00585 */
778 	int err_most = (target >> 8) + (target >> 9);
779 
780 	memset(best_clock, 0, sizeof(*best_clock));
781 
782 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
783 
784 	max_n = limit->n.max;
785 	/* based on hardware requirement, prefer smaller n to precision */
786 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
787 		/* based on hardware requirement, prefer larger m1,m2 */
788 		for (clock.m1 = limit->m1.max;
789 		     clock.m1 >= limit->m1.min; clock.m1--) {
790 			for (clock.m2 = limit->m2.max;
791 			     clock.m2 >= limit->m2.min; clock.m2--) {
792 				for (clock.p1 = limit->p1.max;
793 				     clock.p1 >= limit->p1.min; clock.p1--) {
794 					int this_err;
795 
796 					i9xx_calc_dpll_params(refclk, &clock);
797 					if (!intel_pll_is_valid(display,
798 								limit,
799 								&clock))
800 						continue;
801 
802 					this_err = abs(clock.dot - target);
803 					if (this_err < err_most) {
804 						*best_clock = clock;
805 						err_most = this_err;
806 						max_n = clock.n;
807 						found = true;
808 					}
809 				}
810 			}
811 		}
812 	}
813 	return found;
814 }
815 
816 /*
817  * Check if the calculated PLL configuration is more optimal compared to the
818  * best configuration and error found so far. Return the calculated error.
819  */
820 static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
821 			       const struct dpll *calculated_clock,
822 			       const struct dpll *best_clock,
823 			       unsigned int best_error_ppm,
824 			       unsigned int *error_ppm)
825 {
826 	/*
827 	 * For CHV ignore the error and consider only the P value.
828 	 * Prefer a bigger P value based on HW requirements.
829 	 */
830 	if (display->platform.cherryview) {
831 		*error_ppm = 0;
832 
833 		return calculated_clock->p > best_clock->p;
834 	}
835 
836 	if (drm_WARN_ON_ONCE(display->drm, !target_freq))
837 		return false;
838 
839 	*error_ppm = div_u64(1000000ULL *
840 				abs(target_freq - calculated_clock->dot),
841 			     target_freq);
842 	/*
843 	 * Prefer a better P value over a better (smaller) error if the error
844 	 * is small. Ensure this preference for future configurations too by
845 	 * setting the error to 0.
846 	 */
847 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
848 		*error_ppm = 0;
849 
850 		return true;
851 	}
852 
853 	return *error_ppm + 10 < best_error_ppm;
854 }
855 
856 /*
857  * Returns a set of divisors for the desired target clock with the given
858  * refclk, or FALSE.
859  */
860 static bool
861 vlv_find_best_dpll(const struct intel_limit *limit,
862 		   struct intel_crtc_state *crtc_state,
863 		   int target, int refclk,
864 		   const struct dpll *match_clock,
865 		   struct dpll *best_clock)
866 {
867 	struct intel_display *display = to_intel_display(crtc_state);
868 	struct dpll clock;
869 	unsigned int bestppm = 1000000;
870 	/* min update 19.2 MHz */
871 	int max_n = min(limit->n.max, refclk / 19200);
872 	bool found = false;
873 
874 	memset(best_clock, 0, sizeof(*best_clock));
875 
876 	/* based on hardware requirement, prefer smaller n to precision */
877 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
878 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
879 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
880 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
881 				clock.p = clock.p1 * clock.p2 * 5;
882 				/* based on hardware requirement, prefer bigger m1,m2 values */
883 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
884 					unsigned int ppm;
885 
886 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
887 								     refclk * clock.m1);
888 
889 					vlv_calc_dpll_params(refclk, &clock);
890 
891 					if (!intel_pll_is_valid(display,
892 								limit,
893 								&clock))
894 						continue;
895 
896 					if (!vlv_PLL_is_optimal(display, target,
897 								&clock,
898 								best_clock,
899 								bestppm, &ppm))
900 						continue;
901 
902 					*best_clock = clock;
903 					bestppm = ppm;
904 					found = true;
905 				}
906 			}
907 		}
908 	}
909 
910 	return found;
911 }
912 
913 /*
914  * Returns a set of divisors for the desired target clock with the given
915  * refclk, or FALSE.
916  */
917 static bool
918 chv_find_best_dpll(const struct intel_limit *limit,
919 		   struct intel_crtc_state *crtc_state,
920 		   int target, int refclk,
921 		   const struct dpll *match_clock,
922 		   struct dpll *best_clock)
923 {
924 	struct intel_display *display = to_intel_display(crtc_state);
925 	unsigned int best_error_ppm;
926 	struct dpll clock;
927 	u64 m2;
928 	int found = false;
929 
930 	memset(best_clock, 0, sizeof(*best_clock));
931 	best_error_ppm = 1000000;
932 
933 	/*
934 	 * Based on hardware doc, the n always set to 1, and m1 always
935 	 * set to 2.  If requires to support 200Mhz refclk, we need to
936 	 * revisit this because n may not 1 anymore.
937 	 */
938 	clock.n = 1;
939 	clock.m1 = 2;
940 
941 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
942 		for (clock.p2 = limit->p2.p2_fast;
943 				clock.p2 >= limit->p2.p2_slow;
944 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
945 			unsigned int error_ppm;
946 
947 			clock.p = clock.p1 * clock.p2 * 5;
948 
949 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
950 						   refclk * clock.m1);
951 
952 			if (m2 > INT_MAX/clock.m1)
953 				continue;
954 
955 			clock.m2 = m2;
956 
957 			chv_calc_dpll_params(refclk, &clock);
958 
959 			if (!intel_pll_is_valid(display, limit, &clock))
960 				continue;
961 
962 			if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
963 						best_error_ppm, &error_ppm))
964 				continue;
965 
966 			*best_clock = clock;
967 			best_error_ppm = error_ppm;
968 			found = true;
969 		}
970 	}
971 
972 	return found;
973 }
974 
975 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
976 			struct dpll *best_clock)
977 {
978 	const struct intel_limit *limit = &intel_limits_bxt;
979 	int refclk = 100000;
980 
981 	return chv_find_best_dpll(limit, crtc_state,
982 				  crtc_state->port_clock, refclk,
983 				  NULL, best_clock);
984 }
985 
986 u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
987 {
988 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
989 }
990 
991 static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
992 {
993 	return (1 << dpll->n) << 16 | dpll->m2;
994 }
995 
996 static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
997 {
998 	return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
999 }
1000 
1001 static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
1002 		     const struct dpll *clock,
1003 		     const struct dpll *reduced_clock)
1004 {
1005 	struct intel_display *display = to_intel_display(crtc_state);
1006 	u32 dpll;
1007 
1008 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1009 
1010 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1011 		dpll |= DPLLB_MODE_LVDS;
1012 	else
1013 		dpll |= DPLLB_MODE_DAC_SERIAL;
1014 
1015 	if (display->platform.i945g || display->platform.i945gm ||
1016 	    display->platform.g33 || display->platform.pineview) {
1017 		dpll |= (crtc_state->pixel_multiplier - 1)
1018 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
1019 	}
1020 
1021 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1022 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1023 		dpll |= DPLL_SDVO_HIGH_SPEED;
1024 
1025 	if (intel_crtc_has_dp_encoder(crtc_state))
1026 		dpll |= DPLL_SDVO_HIGH_SPEED;
1027 
1028 	/* compute bitmask from p1 value */
1029 	if (display->platform.g4x) {
1030 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1031 		dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1032 	} else if (display->platform.pineview) {
1033 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
1034 		WARN_ON(reduced_clock->p1 != clock->p1);
1035 	} else {
1036 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1037 		WARN_ON(reduced_clock->p1 != clock->p1);
1038 	}
1039 
1040 	switch (clock->p2) {
1041 	case 5:
1042 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1043 		break;
1044 	case 7:
1045 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1046 		break;
1047 	case 10:
1048 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1049 		break;
1050 	case 14:
1051 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1052 		break;
1053 	}
1054 	WARN_ON(reduced_clock->p2 != clock->p2);
1055 
1056 	if (DISPLAY_VER(display) >= 4)
1057 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1058 
1059 	if (crtc_state->sdvo_tv_clock)
1060 		dpll |= PLL_REF_INPUT_TVCLKINBC;
1061 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1062 		 intel_panel_use_ssc(display))
1063 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1064 	else
1065 		dpll |= PLL_REF_INPUT_DREFCLK;
1066 
1067 	return dpll;
1068 }
1069 
1070 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
1071 			      const struct dpll *clock,
1072 			      const struct dpll *reduced_clock)
1073 {
1074 	struct intel_display *display = to_intel_display(crtc_state);
1075 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1076 
1077 	if (display->platform.pineview) {
1078 		hw_state->fp0 = pnv_dpll_compute_fp(clock);
1079 		hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
1080 	} else {
1081 		hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1082 		hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1083 	}
1084 
1085 	hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
1086 
1087 	if (DISPLAY_VER(display) >= 4)
1088 		hw_state->dpll_md = i965_dpll_md(crtc_state);
1089 }
1090 
1091 static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
1092 		     const struct dpll *clock,
1093 		     const struct dpll *reduced_clock)
1094 {
1095 	struct intel_display *display = to_intel_display(crtc_state);
1096 	u32 dpll;
1097 
1098 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1099 
1100 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1101 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1102 	} else {
1103 		if (clock->p1 == 2)
1104 			dpll |= PLL_P1_DIVIDE_BY_TWO;
1105 		else
1106 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1107 		if (clock->p2 == 4)
1108 			dpll |= PLL_P2_DIVIDE_BY_4;
1109 	}
1110 	WARN_ON(reduced_clock->p1 != clock->p1);
1111 	WARN_ON(reduced_clock->p2 != clock->p2);
1112 
1113 	/*
1114 	 * Bspec:
1115 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
1116 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
1117 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
1118 	 *  Enable) must be set to “1” in both the DPLL A Control Register
1119 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
1120 	 *
1121 	 * For simplicity We simply keep both bits always enabled in
1122 	 * both DPLLS. The spec says we should disable the DVO 2X clock
1123 	 * when not needed, but this seems to work fine in practice.
1124 	 */
1125 	if (display->platform.i830 ||
1126 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
1127 		dpll |= DPLL_DVO_2X_MODE;
1128 
1129 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1130 	    intel_panel_use_ssc(display))
1131 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1132 	else
1133 		dpll |= PLL_REF_INPUT_DREFCLK;
1134 
1135 	return dpll;
1136 }
1137 
1138 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
1139 			      const struct dpll *clock,
1140 			      const struct dpll *reduced_clock)
1141 {
1142 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1143 
1144 	hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1145 	hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1146 
1147 	hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
1148 }
1149 
1150 static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
1151 				  struct intel_crtc *crtc)
1152 {
1153 	struct intel_display *display = to_intel_display(state);
1154 	struct intel_crtc_state *crtc_state =
1155 		intel_atomic_get_new_crtc_state(state, crtc);
1156 	struct intel_encoder *encoder =
1157 		intel_get_crtc_new_encoder(state, crtc_state);
1158 	int ret;
1159 
1160 	if (DISPLAY_VER(display) < 11 &&
1161 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1162 		return 0;
1163 
1164 	ret = intel_compute_shared_dplls(state, crtc, encoder);
1165 	if (ret)
1166 		return ret;
1167 
1168 	/* FIXME this is a mess */
1169 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1170 		return 0;
1171 
1172 	/* CRT dotclock is determined via other means */
1173 	if (!crtc_state->has_pch_encoder)
1174 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1175 
1176 	return 0;
1177 }
1178 
1179 static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
1180 				    struct intel_crtc *crtc)
1181 {
1182 	struct intel_display *display = to_intel_display(state);
1183 	struct intel_crtc_state *crtc_state =
1184 		intel_atomic_get_new_crtc_state(state, crtc);
1185 	struct intel_encoder *encoder =
1186 		intel_get_crtc_new_encoder(state, crtc_state);
1187 
1188 	if (DISPLAY_VER(display) < 11 &&
1189 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1190 		return 0;
1191 
1192 	return intel_reserve_shared_dplls(state, crtc, encoder);
1193 }
1194 
1195 static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
1196 				  struct intel_crtc *crtc)
1197 {
1198 	struct intel_crtc_state *crtc_state =
1199 		intel_atomic_get_new_crtc_state(state, crtc);
1200 	struct intel_encoder *encoder =
1201 		intel_get_crtc_new_encoder(state, crtc_state);
1202 	int ret;
1203 
1204 	ret = intel_mpllb_calc_state(crtc_state, encoder);
1205 	if (ret)
1206 		return ret;
1207 
1208 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1209 
1210 	return 0;
1211 }
1212 
1213 static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
1214 				  struct intel_crtc *crtc)
1215 {
1216 	struct intel_crtc_state *crtc_state =
1217 		intel_atomic_get_new_crtc_state(state, crtc);
1218 	struct intel_encoder *encoder =
1219 		intel_get_crtc_new_encoder(state, crtc_state);
1220 	int ret;
1221 
1222 	ret = intel_cx0pll_calc_state(crtc_state, encoder);
1223 	if (ret)
1224 		return ret;
1225 
1226 	/* TODO: Do the readback via intel_compute_shared_dplls() */
1227 	crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
1228 
1229 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1230 
1231 	return 0;
1232 }
1233 
1234 static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
1235 {
1236 	struct intel_display *display = to_intel_display(crtc_state);
1237 
1238 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1239 	    ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
1240 	     (HAS_PCH_IBX(display) && intel_is_dual_link_lvds(display))))
1241 		return 25;
1242 
1243 	if (crtc_state->sdvo_tv_clock)
1244 		return 20;
1245 
1246 	return 21;
1247 }
1248 
1249 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
1250 {
1251 	return dpll->m < factor * dpll->n;
1252 }
1253 
1254 static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
1255 {
1256 	u32 fp;
1257 
1258 	fp = i9xx_dpll_compute_fp(clock);
1259 	if (ilk_needs_fb_cb_tune(clock, factor))
1260 		fp |= FP_CB_TUNE;
1261 
1262 	return fp;
1263 }
1264 
1265 static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
1266 		    const struct dpll *clock,
1267 		    const struct dpll *reduced_clock)
1268 {
1269 	struct intel_display *display = to_intel_display(crtc_state);
1270 	u32 dpll;
1271 
1272 	dpll = DPLL_VCO_ENABLE;
1273 
1274 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1275 		dpll |= DPLLB_MODE_LVDS;
1276 	else
1277 		dpll |= DPLLB_MODE_DAC_SERIAL;
1278 
1279 	dpll |= (crtc_state->pixel_multiplier - 1)
1280 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1281 
1282 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1283 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1284 		dpll |= DPLL_SDVO_HIGH_SPEED;
1285 
1286 	if (intel_crtc_has_dp_encoder(crtc_state))
1287 		dpll |= DPLL_SDVO_HIGH_SPEED;
1288 
1289 	/*
1290 	 * The high speed IO clock is only really required for
1291 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
1292 	 * possible to share the DPLL between CRT and HDMI. Enabling
1293 	 * the clock needlessly does no real harm, except use up a
1294 	 * bit of power potentially.
1295 	 *
1296 	 * We'll limit this to IVB with 3 pipes, since it has only two
1297 	 * DPLLs and so DPLL sharing is the only way to get three pipes
1298 	 * driving PCH ports at the same time. On SNB we could do this,
1299 	 * and potentially avoid enabling the second DPLL, but it's not
1300 	 * clear if it''s a win or loss power wise. No point in doing
1301 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1302 	 */
1303 	if (INTEL_NUM_PIPES(display) == 3 &&
1304 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1305 		dpll |= DPLL_SDVO_HIGH_SPEED;
1306 
1307 	/* compute bitmask from p1 value */
1308 	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1309 	/* also FPA1 */
1310 	dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1311 
1312 	switch (clock->p2) {
1313 	case 5:
1314 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1315 		break;
1316 	case 7:
1317 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1318 		break;
1319 	case 10:
1320 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1321 		break;
1322 	case 14:
1323 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1324 		break;
1325 	}
1326 	WARN_ON(reduced_clock->p2 != clock->p2);
1327 
1328 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1329 	    intel_panel_use_ssc(display))
1330 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1331 	else
1332 		dpll |= PLL_REF_INPUT_DREFCLK;
1333 
1334 	return dpll;
1335 }
1336 
1337 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
1338 			     const struct dpll *clock,
1339 			     const struct dpll *reduced_clock)
1340 {
1341 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1342 	int factor = ilk_fb_cb_factor(crtc_state);
1343 
1344 	hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
1345 	hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
1346 
1347 	hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
1348 }
1349 
1350 static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
1351 				  struct intel_crtc *crtc)
1352 {
1353 	struct intel_display *display = to_intel_display(state);
1354 	struct intel_crtc_state *crtc_state =
1355 		intel_atomic_get_new_crtc_state(state, crtc);
1356 	const struct intel_limit *limit;
1357 	int refclk = 120000;
1358 	int ret;
1359 
1360 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1361 	if (!crtc_state->has_pch_encoder)
1362 		return 0;
1363 
1364 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1365 		if (intel_panel_use_ssc(display)) {
1366 			drm_dbg_kms(display->drm,
1367 				    "using SSC reference clock of %d kHz\n",
1368 				    display->vbt.lvds_ssc_freq);
1369 			refclk = display->vbt.lvds_ssc_freq;
1370 		}
1371 
1372 		if (intel_is_dual_link_lvds(display)) {
1373 			if (refclk == 100000)
1374 				limit = &ilk_limits_dual_lvds_100m;
1375 			else
1376 				limit = &ilk_limits_dual_lvds;
1377 		} else {
1378 			if (refclk == 100000)
1379 				limit = &ilk_limits_single_lvds_100m;
1380 			else
1381 				limit = &ilk_limits_single_lvds;
1382 		}
1383 	} else {
1384 		limit = &ilk_limits_dac;
1385 	}
1386 
1387 	if (!crtc_state->clock_set &&
1388 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1389 				refclk, NULL, &crtc_state->dpll))
1390 		return -EINVAL;
1391 
1392 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1393 
1394 	ilk_compute_dpll(crtc_state, &crtc_state->dpll,
1395 			 &crtc_state->dpll);
1396 
1397 	ret = intel_compute_shared_dplls(state, crtc, NULL);
1398 	if (ret)
1399 		return ret;
1400 
1401 	crtc_state->port_clock = crtc_state->dpll.dot;
1402 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1403 
1404 	return ret;
1405 }
1406 
1407 static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
1408 				    struct intel_crtc *crtc)
1409 {
1410 	struct intel_crtc_state *crtc_state =
1411 		intel_atomic_get_new_crtc_state(state, crtc);
1412 
1413 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1414 	if (!crtc_state->has_pch_encoder)
1415 		return 0;
1416 
1417 	return intel_reserve_shared_dplls(state, crtc, NULL);
1418 }
1419 
1420 static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
1421 {
1422 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1423 	u32 dpll;
1424 
1425 	dpll = DPLL_INTEGRATED_REF_CLK_VLV |
1426 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1427 
1428 	if (crtc->pipe != PIPE_A)
1429 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1430 
1431 	/* DPLL not used with DSI, but still need the rest set up */
1432 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1433 		dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
1434 
1435 	return dpll;
1436 }
1437 
1438 void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
1439 {
1440 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1441 
1442 	hw_state->dpll = vlv_dpll(crtc_state);
1443 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1444 }
1445 
1446 static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
1447 {
1448 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1449 	u32 dpll;
1450 
1451 	dpll = DPLL_SSC_REF_CLK_CHV |
1452 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1453 
1454 	if (crtc->pipe != PIPE_A)
1455 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1456 
1457 	/* DPLL not used with DSI, but still need the rest set up */
1458 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1459 		dpll |= DPLL_VCO_ENABLE;
1460 
1461 	return dpll;
1462 }
1463 
1464 void chv_compute_dpll(struct intel_crtc_state *crtc_state)
1465 {
1466 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1467 
1468 	hw_state->dpll = chv_dpll(crtc_state);
1469 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1470 }
1471 
1472 static int chv_crtc_compute_clock(struct intel_atomic_state *state,
1473 				  struct intel_crtc *crtc)
1474 {
1475 	struct intel_crtc_state *crtc_state =
1476 		intel_atomic_get_new_crtc_state(state, crtc);
1477 	const struct intel_limit *limit = &intel_limits_chv;
1478 	int refclk = 100000;
1479 
1480 	if (!crtc_state->clock_set &&
1481 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1482 				refclk, NULL, &crtc_state->dpll))
1483 		return -EINVAL;
1484 
1485 	chv_calc_dpll_params(refclk, &crtc_state->dpll);
1486 
1487 	chv_compute_dpll(crtc_state);
1488 
1489 	/* FIXME this is a mess */
1490 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1491 		return 0;
1492 
1493 	crtc_state->port_clock = crtc_state->dpll.dot;
1494 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1495 
1496 	return 0;
1497 }
1498 
1499 static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
1500 				  struct intel_crtc *crtc)
1501 {
1502 	struct intel_crtc_state *crtc_state =
1503 		intel_atomic_get_new_crtc_state(state, crtc);
1504 	const struct intel_limit *limit = &intel_limits_vlv;
1505 	int refclk = 100000;
1506 
1507 	if (!crtc_state->clock_set &&
1508 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1509 				refclk, NULL, &crtc_state->dpll))
1510 		return -EINVAL;
1511 
1512 	vlv_calc_dpll_params(refclk, &crtc_state->dpll);
1513 
1514 	vlv_compute_dpll(crtc_state);
1515 
1516 	/* FIXME this is a mess */
1517 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1518 		return 0;
1519 
1520 	crtc_state->port_clock = crtc_state->dpll.dot;
1521 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1522 
1523 	return 0;
1524 }
1525 
1526 static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
1527 				  struct intel_crtc *crtc)
1528 {
1529 	struct intel_display *display = to_intel_display(state);
1530 	struct intel_crtc_state *crtc_state =
1531 		intel_atomic_get_new_crtc_state(state, crtc);
1532 	const struct intel_limit *limit;
1533 	int refclk = 96000;
1534 
1535 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1536 		if (intel_panel_use_ssc(display)) {
1537 			refclk = display->vbt.lvds_ssc_freq;
1538 			drm_dbg_kms(display->drm,
1539 				    "using SSC reference clock of %d kHz\n",
1540 				    refclk);
1541 		}
1542 
1543 		if (intel_is_dual_link_lvds(display))
1544 			limit = &intel_limits_g4x_dual_channel_lvds;
1545 		else
1546 			limit = &intel_limits_g4x_single_channel_lvds;
1547 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
1548 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
1549 		limit = &intel_limits_g4x_hdmi;
1550 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
1551 		limit = &intel_limits_g4x_sdvo;
1552 	} else {
1553 		/* The option is for other outputs */
1554 		limit = &intel_limits_i9xx_sdvo;
1555 	}
1556 
1557 	if (!crtc_state->clock_set &&
1558 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1559 				refclk, NULL, &crtc_state->dpll))
1560 		return -EINVAL;
1561 
1562 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1563 
1564 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1565 			  &crtc_state->dpll);
1566 
1567 	crtc_state->port_clock = crtc_state->dpll.dot;
1568 	/* FIXME this is a mess */
1569 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1570 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1571 
1572 	return 0;
1573 }
1574 
1575 static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
1576 				  struct intel_crtc *crtc)
1577 {
1578 	struct intel_display *display = to_intel_display(state);
1579 	struct intel_crtc_state *crtc_state =
1580 		intel_atomic_get_new_crtc_state(state, crtc);
1581 	const struct intel_limit *limit;
1582 	int refclk = 96000;
1583 
1584 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1585 		if (intel_panel_use_ssc(display)) {
1586 			refclk = display->vbt.lvds_ssc_freq;
1587 			drm_dbg_kms(display->drm,
1588 				    "using SSC reference clock of %d kHz\n",
1589 				    refclk);
1590 		}
1591 
1592 		limit = &pnv_limits_lvds;
1593 	} else {
1594 		limit = &pnv_limits_sdvo;
1595 	}
1596 
1597 	if (!crtc_state->clock_set &&
1598 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1599 				refclk, NULL, &crtc_state->dpll))
1600 		return -EINVAL;
1601 
1602 	pnv_calc_dpll_params(refclk, &crtc_state->dpll);
1603 
1604 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1605 			  &crtc_state->dpll);
1606 
1607 	crtc_state->port_clock = crtc_state->dpll.dot;
1608 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1609 
1610 	return 0;
1611 }
1612 
1613 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
1614 				   struct intel_crtc *crtc)
1615 {
1616 	struct intel_display *display = to_intel_display(state);
1617 	struct intel_crtc_state *crtc_state =
1618 		intel_atomic_get_new_crtc_state(state, crtc);
1619 	const struct intel_limit *limit;
1620 	int refclk = 96000;
1621 
1622 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1623 		if (intel_panel_use_ssc(display)) {
1624 			refclk = display->vbt.lvds_ssc_freq;
1625 			drm_dbg_kms(display->drm,
1626 				    "using SSC reference clock of %d kHz\n",
1627 				    refclk);
1628 		}
1629 
1630 		limit = &intel_limits_i9xx_lvds;
1631 	} else {
1632 		limit = &intel_limits_i9xx_sdvo;
1633 	}
1634 
1635 	if (!crtc_state->clock_set &&
1636 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1637 				 refclk, NULL, &crtc_state->dpll))
1638 		return -EINVAL;
1639 
1640 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1641 
1642 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1643 			  &crtc_state->dpll);
1644 
1645 	crtc_state->port_clock = crtc_state->dpll.dot;
1646 	/* FIXME this is a mess */
1647 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1648 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1649 
1650 	return 0;
1651 }
1652 
1653 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
1654 				   struct intel_crtc *crtc)
1655 {
1656 	struct intel_display *display = to_intel_display(state);
1657 	struct intel_crtc_state *crtc_state =
1658 		intel_atomic_get_new_crtc_state(state, crtc);
1659 	const struct intel_limit *limit;
1660 	int refclk = 48000;
1661 
1662 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1663 		if (intel_panel_use_ssc(display)) {
1664 			refclk = display->vbt.lvds_ssc_freq;
1665 			drm_dbg_kms(display->drm,
1666 				    "using SSC reference clock of %d kHz\n",
1667 				    refclk);
1668 		}
1669 
1670 		limit = &intel_limits_i8xx_lvds;
1671 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
1672 		limit = &intel_limits_i8xx_dvo;
1673 	} else {
1674 		limit = &intel_limits_i8xx_dac;
1675 	}
1676 
1677 	if (!crtc_state->clock_set &&
1678 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1679 				 refclk, NULL, &crtc_state->dpll))
1680 		return -EINVAL;
1681 
1682 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1683 
1684 	i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
1685 			  &crtc_state->dpll);
1686 
1687 	crtc_state->port_clock = crtc_state->dpll.dot;
1688 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1689 
1690 	return 0;
1691 }
1692 
1693 static const struct intel_dpll_funcs mtl_dpll_funcs = {
1694 	.crtc_compute_clock = mtl_crtc_compute_clock,
1695 };
1696 
1697 static const struct intel_dpll_funcs dg2_dpll_funcs = {
1698 	.crtc_compute_clock = dg2_crtc_compute_clock,
1699 };
1700 
1701 static const struct intel_dpll_funcs hsw_dpll_funcs = {
1702 	.crtc_compute_clock = hsw_crtc_compute_clock,
1703 	.crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
1704 };
1705 
1706 static const struct intel_dpll_funcs ilk_dpll_funcs = {
1707 	.crtc_compute_clock = ilk_crtc_compute_clock,
1708 	.crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
1709 };
1710 
1711 static const struct intel_dpll_funcs chv_dpll_funcs = {
1712 	.crtc_compute_clock = chv_crtc_compute_clock,
1713 };
1714 
1715 static const struct intel_dpll_funcs vlv_dpll_funcs = {
1716 	.crtc_compute_clock = vlv_crtc_compute_clock,
1717 };
1718 
1719 static const struct intel_dpll_funcs g4x_dpll_funcs = {
1720 	.crtc_compute_clock = g4x_crtc_compute_clock,
1721 };
1722 
1723 static const struct intel_dpll_funcs pnv_dpll_funcs = {
1724 	.crtc_compute_clock = pnv_crtc_compute_clock,
1725 };
1726 
1727 static const struct intel_dpll_funcs i9xx_dpll_funcs = {
1728 	.crtc_compute_clock = i9xx_crtc_compute_clock,
1729 };
1730 
1731 static const struct intel_dpll_funcs i8xx_dpll_funcs = {
1732 	.crtc_compute_clock = i8xx_crtc_compute_clock,
1733 };
1734 
1735 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
1736 				  struct intel_crtc *crtc)
1737 {
1738 	struct intel_display *display = to_intel_display(state);
1739 	struct intel_crtc_state *crtc_state =
1740 		intel_atomic_get_new_crtc_state(state, crtc);
1741 	int ret;
1742 
1743 	drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1744 
1745 	memset(&crtc_state->dpll_hw_state, 0,
1746 	       sizeof(crtc_state->dpll_hw_state));
1747 
1748 	if (!crtc_state->hw.enable)
1749 		return 0;
1750 
1751 	ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
1752 	if (ret) {
1753 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
1754 			    crtc->base.base.id, crtc->base.name);
1755 		return ret;
1756 	}
1757 
1758 	return 0;
1759 }
1760 
1761 int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
1762 				    struct intel_crtc *crtc)
1763 {
1764 	struct intel_display *display = to_intel_display(state);
1765 	struct intel_crtc_state *crtc_state =
1766 		intel_atomic_get_new_crtc_state(state, crtc);
1767 	int ret;
1768 
1769 	drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1770 	drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
1771 
1772 	if (!crtc_state->hw.enable || crtc_state->shared_dpll)
1773 		return 0;
1774 
1775 	if (!display->funcs.dpll->crtc_get_shared_dpll)
1776 		return 0;
1777 
1778 	ret = display->funcs.dpll->crtc_get_shared_dpll(state, crtc);
1779 	if (ret) {
1780 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
1781 			    crtc->base.base.id, crtc->base.name);
1782 		return ret;
1783 	}
1784 
1785 	return 0;
1786 }
1787 
1788 void
1789 intel_dpll_init_clock_hook(struct intel_display *display)
1790 {
1791 	if (DISPLAY_VER(display) >= 14)
1792 		display->funcs.dpll = &mtl_dpll_funcs;
1793 	else if (display->platform.dg2)
1794 		display->funcs.dpll = &dg2_dpll_funcs;
1795 	else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
1796 		display->funcs.dpll = &hsw_dpll_funcs;
1797 	else if (HAS_PCH_SPLIT(display))
1798 		display->funcs.dpll = &ilk_dpll_funcs;
1799 	else if (display->platform.cherryview)
1800 		display->funcs.dpll = &chv_dpll_funcs;
1801 	else if (display->platform.valleyview)
1802 		display->funcs.dpll = &vlv_dpll_funcs;
1803 	else if (display->platform.g4x)
1804 		display->funcs.dpll = &g4x_dpll_funcs;
1805 	else if (display->platform.pineview)
1806 		display->funcs.dpll = &pnv_dpll_funcs;
1807 	else if (DISPLAY_VER(display) != 2)
1808 		display->funcs.dpll = &i9xx_dpll_funcs;
1809 	else
1810 		display->funcs.dpll = &i8xx_dpll_funcs;
1811 }
1812 
1813 static bool i9xx_has_pps(struct intel_display *display)
1814 {
1815 	if (display->platform.i830)
1816 		return false;
1817 
1818 	return display->platform.pineview || display->platform.mobile;
1819 }
1820 
1821 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
1822 {
1823 	struct intel_display *display = to_intel_display(crtc_state);
1824 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1825 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1826 	enum pipe pipe = crtc->pipe;
1827 	int i;
1828 
1829 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
1830 
1831 	/* PLL is protected by panel, make sure we can write it */
1832 	if (i9xx_has_pps(display))
1833 		assert_pps_unlocked(display, pipe);
1834 
1835 	intel_de_write(display, FP0(pipe), hw_state->fp0);
1836 	intel_de_write(display, FP1(pipe), hw_state->fp1);
1837 
1838 	/*
1839 	 * Apparently we need to have VGA mode enabled prior to changing
1840 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1841 	 * dividers, even though the register value does change.
1842 	 */
1843 	intel_de_write(display, DPLL(display, pipe),
1844 		       hw_state->dpll & ~DPLL_VGA_MODE_DIS);
1845 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1846 
1847 	/* Wait for the clocks to stabilize. */
1848 	intel_de_posting_read(display, DPLL(display, pipe));
1849 	udelay(150);
1850 
1851 	if (DISPLAY_VER(display) >= 4) {
1852 		intel_de_write(display, DPLL_MD(display, pipe),
1853 			       hw_state->dpll_md);
1854 	} else {
1855 		/* The pixel multiplier can only be updated once the
1856 		 * DPLL is enabled and the clocks are stable.
1857 		 *
1858 		 * So write it again.
1859 		 */
1860 		intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1861 	}
1862 
1863 	/* We do this three times for luck */
1864 	for (i = 0; i < 3; i++) {
1865 		intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1866 		intel_de_posting_read(display, DPLL(display, pipe));
1867 		udelay(150); /* wait for warmup */
1868 	}
1869 }
1870 
1871 static void vlv_pllb_recal_opamp(struct intel_display *display,
1872 				 enum dpio_phy phy, enum dpio_channel ch)
1873 {
1874 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1875 	u32 tmp;
1876 
1877 	/*
1878 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
1879 	 * and set it to a reasonable value instead.
1880 	 */
1881 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
1882 	tmp &= 0xffffff00;
1883 	tmp |= 0x00000030;
1884 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
1885 
1886 	tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
1887 	tmp &= 0x00ffffff;
1888 	tmp |= 0x8c000000;
1889 	vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
1890 
1891 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
1892 	tmp &= 0xffffff00;
1893 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
1894 
1895 	tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
1896 	tmp &= 0x00ffffff;
1897 	tmp |= 0xb0000000;
1898 	vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
1899 }
1900 
1901 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
1902 {
1903 	struct intel_display *display = to_intel_display(crtc_state);
1904 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1905 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1906 	const struct dpll *clock = &crtc_state->dpll;
1907 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
1908 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
1909 	enum pipe pipe = crtc->pipe;
1910 	u32 tmp, coreclk;
1911 
1912 	vlv_dpio_get(dev_priv);
1913 
1914 	/* See eDP HDMI DPIO driver vbios notes doc */
1915 
1916 	/* PLL B needs special handling */
1917 	if (pipe == PIPE_B)
1918 		vlv_pllb_recal_opamp(display, phy, ch);
1919 
1920 	/* Set up Tx target for periodic Rcomp update */
1921 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
1922 
1923 	/* Disable target IRef on PLL */
1924 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch));
1925 	tmp &= 0x00ffffff;
1926 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp);
1927 
1928 	/* Disable fast lock */
1929 	vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
1930 
1931 	/* Set idtafcrecal before PLL is enabled */
1932 	tmp = DPIO_M1_DIV(clock->m1) |
1933 		DPIO_M2_DIV(clock->m2) |
1934 		DPIO_P1_DIV(clock->p1) |
1935 		DPIO_P2_DIV(clock->p2) |
1936 		DPIO_N_DIV(clock->n) |
1937 		DPIO_K_DIV(1);
1938 
1939 	/*
1940 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
1941 	 * but we don't support that).
1942 	 * Note: don't use the DAC post divider as it seems unstable.
1943 	 */
1944 	tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
1945 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
1946 
1947 	tmp |= DPIO_ENABLE_CALIBRATION;
1948 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
1949 
1950 	/* Set HBR and RBR LPF coefficients */
1951 	if (crtc_state->port_clock == 162000 ||
1952 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
1953 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1954 		vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
1955 				 0x009f0003);
1956 	else
1957 		vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
1958 				 0x00d0000f);
1959 
1960 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1961 		/* Use SSC source */
1962 		if (pipe == PIPE_A)
1963 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1964 					 0x0df40000);
1965 		else
1966 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1967 					 0x0df70000);
1968 	} else { /* HDMI or VGA */
1969 		/* Use bend source */
1970 		if (pipe == PIPE_A)
1971 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1972 					 0x0df70000);
1973 		else
1974 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1975 					 0x0df40000);
1976 	}
1977 
1978 	coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch));
1979 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
1980 	if (intel_crtc_has_dp_encoder(crtc_state))
1981 		coreclk |= 0x01000000;
1982 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk);
1983 
1984 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000);
1985 
1986 	vlv_dpio_put(dev_priv);
1987 }
1988 
1989 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1990 {
1991 	struct intel_display *display = to_intel_display(crtc_state);
1992 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1993 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1994 	enum pipe pipe = crtc->pipe;
1995 
1996 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1997 	intel_de_posting_read(display, DPLL(display, pipe));
1998 	udelay(150);
1999 
2000 	if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
2001 		drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
2002 }
2003 
2004 void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
2005 {
2006 	struct intel_display *display = to_intel_display(crtc_state);
2007 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2008 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2009 	enum pipe pipe = crtc->pipe;
2010 
2011 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2012 
2013 	/* PLL is protected by panel, make sure we can write it */
2014 	assert_pps_unlocked(display, pipe);
2015 
2016 	/* Enable Refclk */
2017 	intel_de_write(display, DPLL(display, pipe),
2018 		       hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
2019 
2020 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2021 		vlv_prepare_pll(crtc_state);
2022 		_vlv_enable_pll(crtc_state);
2023 	}
2024 
2025 	intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
2026 	intel_de_posting_read(display, DPLL_MD(display, pipe));
2027 }
2028 
2029 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
2030 {
2031 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2032 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2033 	const struct dpll *clock = &crtc_state->dpll;
2034 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2035 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2036 	u32 tmp, loopfilter, tribuf_calcntr;
2037 	u32 m2_frac;
2038 
2039 	m2_frac = clock->m2 & 0x3fffff;
2040 
2041 	vlv_dpio_get(dev_priv);
2042 
2043 	/* p1 and p2 divider */
2044 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch),
2045 		       DPIO_CHV_S1_DIV(5) |
2046 		       DPIO_CHV_P1_DIV(clock->p1) |
2047 		       DPIO_CHV_P2_DIV(clock->p2) |
2048 		       DPIO_CHV_K_DIV(1));
2049 
2050 	/* Feedback post-divider - m2 */
2051 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch),
2052 		       DPIO_CHV_M2_DIV(clock->m2 >> 22));
2053 
2054 	/* Feedback refclk divider - n and m1 */
2055 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch),
2056 		       DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
2057 		       DPIO_CHV_N_DIV(1));
2058 
2059 	/* M2 fraction division */
2060 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch),
2061 		       DPIO_CHV_M2_FRAC_DIV(m2_frac));
2062 
2063 	/* M2 fraction division enable */
2064 	tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
2065 	tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
2066 	tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
2067 	if (m2_frac)
2068 		tmp |= DPIO_CHV_FRAC_DIV_EN;
2069 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp);
2070 
2071 	/* Program digital lock detect threshold */
2072 	tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch));
2073 	tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
2074 		      DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
2075 	tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
2076 	if (!m2_frac)
2077 		tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
2078 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp);
2079 
2080 	/* Loop filter */
2081 	if (clock->vco == 5400000) {
2082 		loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
2083 			DPIO_CHV_INT_COEFF(0x8) |
2084 			DPIO_CHV_GAIN_CTRL(0x1);
2085 		tribuf_calcntr = 0x9;
2086 	} else if (clock->vco <= 6200000) {
2087 		loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
2088 			DPIO_CHV_INT_COEFF(0xB) |
2089 			DPIO_CHV_GAIN_CTRL(0x3);
2090 		tribuf_calcntr = 0x9;
2091 	} else if (clock->vco <= 6480000) {
2092 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2093 			DPIO_CHV_INT_COEFF(0x9) |
2094 			DPIO_CHV_GAIN_CTRL(0x3);
2095 		tribuf_calcntr = 0x8;
2096 	} else {
2097 		/* Not supported. Apply the same limits as in the max case */
2098 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2099 			DPIO_CHV_INT_COEFF(0x9) |
2100 			DPIO_CHV_GAIN_CTRL(0x3);
2101 		tribuf_calcntr = 0;
2102 	}
2103 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter);
2104 
2105 	tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch));
2106 	tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
2107 	tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
2108 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp);
2109 
2110 	/* AFC Recal */
2111 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch),
2112 		       vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) |
2113 		       DPIO_AFC_RECAL);
2114 
2115 	vlv_dpio_put(dev_priv);
2116 }
2117 
2118 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
2119 {
2120 	struct intel_display *display = to_intel_display(crtc_state);
2121 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2122 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2123 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2124 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2125 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2126 	enum pipe pipe = crtc->pipe;
2127 	u32 tmp;
2128 
2129 	vlv_dpio_get(dev_priv);
2130 
2131 	/* Enable back the 10bit clock to display controller */
2132 	tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
2133 	tmp |= DPIO_DCLKP_EN;
2134 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp);
2135 
2136 	vlv_dpio_put(dev_priv);
2137 
2138 	/*
2139 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
2140 	 */
2141 	udelay(1);
2142 
2143 	/* Enable PLL */
2144 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
2145 
2146 	/* Check PLL is locked */
2147 	if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
2148 		drm_err(display->drm, "PLL %d failed to lock\n", pipe);
2149 }
2150 
2151 void chv_enable_pll(const struct intel_crtc_state *crtc_state)
2152 {
2153 	struct intel_display *display = to_intel_display(crtc_state);
2154 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2155 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2156 	enum pipe pipe = crtc->pipe;
2157 
2158 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2159 
2160 	/* PLL is protected by panel, make sure we can write it */
2161 	assert_pps_unlocked(display, pipe);
2162 
2163 	/* Enable Refclk and SSC */
2164 	intel_de_write(display, DPLL(display, pipe),
2165 		       hw_state->dpll & ~DPLL_VCO_ENABLE);
2166 
2167 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2168 		chv_prepare_pll(crtc_state);
2169 		_chv_enable_pll(crtc_state);
2170 	}
2171 
2172 	if (pipe != PIPE_A) {
2173 		/*
2174 		 * WaPixelRepeatModeFixForC0:chv
2175 		 *
2176 		 * DPLLCMD is AWOL. Use chicken bits to propagate
2177 		 * the value from DPLLBMD to either pipe B or C.
2178 		 */
2179 		intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
2180 		intel_de_write(display, DPLL_MD(display, PIPE_B),
2181 			       hw_state->dpll_md);
2182 		intel_de_write(display, CBR4_VLV, 0);
2183 		display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
2184 
2185 		/*
2186 		 * DPLLB VGA mode also seems to cause problems.
2187 		 * We should always have it disabled.
2188 		 */
2189 		drm_WARN_ON(display->drm,
2190 			    (intel_de_read(display, DPLL(display, PIPE_B)) &
2191 			     DPLL_VGA_MODE_DIS) == 0);
2192 	} else {
2193 		intel_de_write(display, DPLL_MD(display, pipe),
2194 			       hw_state->dpll_md);
2195 		intel_de_posting_read(display, DPLL_MD(display, pipe));
2196 	}
2197 }
2198 
2199 /**
2200  * vlv_force_pll_on - forcibly enable just the PLL
2201  * @display: display device
2202  * @pipe: pipe PLL to enable
2203  * @dpll: PLL configuration
2204  *
2205  * Enable the PLL for @pipe using the supplied @dpll config. To be used
2206  * in cases where we need the PLL enabled even when @pipe is not going to
2207  * be enabled.
2208  */
2209 int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
2210 		     const struct dpll *dpll)
2211 {
2212 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
2213 	struct intel_crtc_state *crtc_state;
2214 
2215 	crtc_state = intel_crtc_state_alloc(crtc);
2216 	if (!crtc_state)
2217 		return -ENOMEM;
2218 
2219 	crtc_state->cpu_transcoder = (enum transcoder)pipe;
2220 	crtc_state->pixel_multiplier = 1;
2221 	crtc_state->dpll = *dpll;
2222 	crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
2223 
2224 	if (display->platform.cherryview) {
2225 		chv_compute_dpll(crtc_state);
2226 		chv_enable_pll(crtc_state);
2227 	} else {
2228 		vlv_compute_dpll(crtc_state);
2229 		vlv_enable_pll(crtc_state);
2230 	}
2231 
2232 	intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
2233 
2234 	return 0;
2235 }
2236 
2237 void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
2238 {
2239 	u32 val;
2240 
2241 	/* Make sure the pipe isn't still relying on us */
2242 	assert_transcoder_disabled(display, (enum transcoder)pipe);
2243 
2244 	val = DPLL_INTEGRATED_REF_CLK_VLV |
2245 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2246 	if (pipe != PIPE_A)
2247 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2248 
2249 	intel_de_write(display, DPLL(display, pipe), val);
2250 	intel_de_posting_read(display, DPLL(display, pipe));
2251 }
2252 
2253 void chv_disable_pll(struct intel_display *display, enum pipe pipe)
2254 {
2255 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2256 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
2257 	enum dpio_phy phy = vlv_pipe_to_phy(pipe);
2258 	u32 val;
2259 
2260 	/* Make sure the pipe isn't still relying on us */
2261 	assert_transcoder_disabled(display, (enum transcoder)pipe);
2262 
2263 	val = DPLL_SSC_REF_CLK_CHV |
2264 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2265 	if (pipe != PIPE_A)
2266 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2267 
2268 	intel_de_write(display, DPLL(display, pipe), val);
2269 	intel_de_posting_read(display, DPLL(display, pipe));
2270 
2271 	vlv_dpio_get(dev_priv);
2272 
2273 	/* Disable 10bit clock to display controller */
2274 	val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
2275 	val &= ~DPIO_DCLKP_EN;
2276 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val);
2277 
2278 	vlv_dpio_put(dev_priv);
2279 }
2280 
2281 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
2282 {
2283 	struct intel_display *display = to_intel_display(crtc_state);
2284 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2285 	enum pipe pipe = crtc->pipe;
2286 
2287 	/* Don't disable pipe or pipe PLLs if needed */
2288 	if (display->platform.i830)
2289 		return;
2290 
2291 	/* Make sure the pipe isn't still relying on us */
2292 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2293 
2294 	intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS);
2295 	intel_de_posting_read(display, DPLL(display, pipe));
2296 }
2297 
2298 
2299 /**
2300  * vlv_force_pll_off - forcibly disable just the PLL
2301  * @display: display device
2302  * @pipe: pipe PLL to disable
2303  *
2304  * Disable the PLL for @pipe. To be used in cases where we need
2305  * the PLL enabled even when @pipe is not going to be enabled.
2306  */
2307 void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
2308 {
2309 	if (display->platform.cherryview)
2310 		chv_disable_pll(display, pipe);
2311 	else
2312 		vlv_disable_pll(display, pipe);
2313 }
2314 
2315 /* Only for pre-ILK configs */
2316 static void assert_pll(struct intel_display *display,
2317 		       enum pipe pipe, bool state)
2318 {
2319 	bool cur_state;
2320 
2321 	cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
2322 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
2323 				 "PLL state assertion failure (expected %s, current %s)\n",
2324 				 str_on_off(state), str_on_off(cur_state));
2325 }
2326 
2327 void assert_pll_enabled(struct intel_display *display, enum pipe pipe)
2328 {
2329 	assert_pll(display, pipe, true);
2330 }
2331 
2332 void assert_pll_disabled(struct intel_display *display, enum pipe pipe)
2333 {
2334 	assert_pll(display, pipe, false);
2335 }
2336